diff --git "a/1185.jsonl" "b/1185.jsonl" new file mode 100644--- /dev/null +++ "b/1185.jsonl" @@ -0,0 +1,1325 @@ +{"seq_id": "16146516364", "text": "bl_info = {\r\n \"name\": \"BMAX Connector\",\r\n \"author\": \"Titus Lavrov / Email: Titus.mailbox@gmail.com\",\r\n \"version\": (0, 1, 6),\r\n \"blender\": (2, 80, 0),\r\n \"location\": \"View3D > Toolbar and View3D\",\r\n \"warning\": \"\",\r\n \"description\": \"Bridge between 3dmax and Blender\",\r\n \"wiki_url\": \"\"\r\n \"\",\r\n \"category\": \"Import-Export\",\r\n}\r\n\r\nimport bpy\r\n\r\nfrom .operators.operators import (BMAX_OT_Export, \r\n BMAX_OT_Import,\r\n BMAX_OT_Export_USD,\r\n BMAX_OT_Import_USD)\r\n\r\nfrom .ui.panel import VIEW3D_PT_BMAX\r\nfrom .preferences.preferences import BMAX_AddonPreferences\r\n\r\n#Classes for register and unregister\r\nclasses = (\r\n BMAX_OT_Export,\r\n BMAX_OT_Import,\r\n BMAX_OT_Export_USD,\r\n BMAX_OT_Import_USD,\r\n VIEW3D_PT_BMAX,\r\n BMAX_AddonPreferences\r\n )\r\n \r\ndef register():\r\n for cls in classes:\r\n bpy.utils.register_class(cls)\r\n print (\"BMAX Connector - Registred!\")\r\n\r\ndef unregister(): \r\n for cls in reversed(classes):\r\n bpy.utils.unregister_class(cls)\r\n print (\"BMAX Connector - UnRegistred!\")\r\n\r\nif __name__ == \"__main__\":\r\n register()", "repo_name": "TitusLVR/BMAX_Connector", "sub_path": "__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1263, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 111, "dataset": "github-code", "pt": "50", "api": [{"api_name": "operators.operators.BMAX_OT_Export", "line_number": 26, "usage_type": "name"}, {"api_name": "operators.operators.BMAX_OT_Import", "line_number": 27, "usage_type": "name"}, {"api_name": "operators.operators.BMAX_OT_Export_USD", "line_number": 28, "usage_type": "name"}, {"api_name": "operators.operators.BMAX_OT_Import_USD", "line_number": 29, "usage_type": "name"}, {"api_name": "ui.panel.VIEW3D_PT_BMAX", "line_number": 30, "usage_type": "name"}, {"api_name": "preferences.preferences.BMAX_AddonPreferences", "line_number": 31, "usage_type": "name"}, {"api_name": "bpy.utils.register_class", "line_number": 36, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_class", "line_number": 41, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "12793296864", "text": "from flake8 import engine\nimport pep8\n\nimport hacking.tests\n\n\ndef check(physical_line):\n \"\"\"Test check to make sure local-checks are working.\"\"\"\n if physical_line.strip() == \"#this-is-the-test-phrase\":\n return (0, \"L100: Found local-check test case\")\n\n\nclass HackingTestCase(hacking.tests.TestCase):\n def test_local_check(self):\n flake8_style = engine.get_style_guide(parse_argv=False, ignore='F')\n report = pep8.BaseReport(flake8_style.options)\n line = [\"#this-is-the-test-phrase\"]\n checker = pep8.Checker(lines=line, options=flake8_style.options,\n report=report)\n checker.check_all()\n self.assertIn(\"L100\", report.counters)\n", "repo_name": "michaelhenkel/contrail-tripleo-docu", "sub_path": ".tox/docs/lib/python2.7/site-packages/hacking/tests/test_local.py", "file_name": "test_local.py", "file_ext": "py", "file_size_in_byte": 714, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "hacking.tests.tests", "line_number": 13, "usage_type": "attribute"}, {"api_name": "hacking.tests", "line_number": 13, "usage_type": "name"}, {"api_name": "flake8.engine.get_style_guide", "line_number": 15, "usage_type": "call"}, {"api_name": "flake8.engine", "line_number": 15, "usage_type": "name"}, {"api_name": "pep8.BaseReport", "line_number": 16, "usage_type": "call"}, {"api_name": "pep8.Checker", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "5647586322", "text": "from django.shortcuts import render\nfrom .forms import StudentForm\nfrom .models import Student\n# Create your views here.\ndef student_form_view(request):\n form=StudentForm()\n if request.method=='POST':\n form=StudentForm(request.POST)\n if form.is_valid():\n form.save(commit=True)\n\n return render(request,'testapp/registration.html',{'form':form})\n", "repo_name": "sandipdeshmukh77/django-practice-projects", "sub_path": "modelformprojectdemo_1/testapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "forms.StudentForm", "line_number": 6, "usage_type": "call"}, {"api_name": "forms.StudentForm", "line_number": 8, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "6330418533", "text": "import demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\nimport io\n\n\ndef main():\n args = demisto.args()\n testName = args.get(\"testName\", \"\")\n listName = args.get(\"listName\", \"\")\n try:\n if listName != \"\":\n listlines = demisto.executeCommand(\"getList\", {'listName': listName})[0]['Contents']\n buf = io.StringIO(listlines)\n else:\n raise DemistoException(\"No test case list provided\")\n\n line = buf.readline()\n while line != \"\":\n words = line.split(\"|\", 1)\n testType = words[0].strip()\n datalist = words[1].strip().replace(\"\\n\", \"\")\n # Process commands to load fields or context\n if testType == \"LoadFields\":\n demisto.executeCommand(\"UnitTestLoadFieldsList\", {'list': datalist})\n elif testType == \"LoadContext\":\n demisto.executeCommand(\"UnitTestLoadContextList\", {'list': datalist})\n line = buf.readline()\n\n except Exception as ex:\n demisto.error(traceback.format_exc())\n return_error(f\"UnitTestCasePrep: {testName} Exception failed to execute. Error: {str(ex)}\")\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n main()\n", "repo_name": "demisto/content", "sub_path": "Packs/ContentTesting/Scripts/UnitTestCasePrep/UnitTestCasePrep.py", "file_name": "UnitTestCasePrep.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1023, "dataset": "github-code", "pt": "50", "api": [{"api_name": "demistomock.args", "line_number": 7, "usage_type": "call"}, {"api_name": "demistomock.executeCommand", "line_number": 12, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 13, "usage_type": "call"}, {"api_name": "demistomock.executeCommand", "line_number": 24, "usage_type": "call"}, {"api_name": "demistomock.executeCommand", "line_number": 26, "usage_type": "call"}, {"api_name": "demistomock.error", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "40668618346", "text": "from werkzeug.http import HTTP_STATUS_CODES\n\n\ndef is_informational(code):\n return code >= 100 and code <= 199\n\n\ndef is_success(code):\n return code >= 200 and code <= 299\n\n\ndef is_redirect(code):\n return code >= 300 and code <= 399\n\n\ndef is_client_error(code):\n return code >= 400 and code <= 499\n\n\ndef is_server_error(code):\n return code >= 500 and code <= 599\n\ndef get_status(code):\n if is_success(code):\n return 'success'\n elif is_client_error(code):\n return 'client_error'\n elif is_server_error(code):\n return 'server_error'\n elif is_redirect(code):\n return 'redirect'\n elif is_informational(code):\n return 'informational'\n\ndef get_message(code):\n ''' Maps an HTTP status code to the textual status\n '''\n return HTTP_STATUS_CODES.get(code, '')\n\n\nCONTINUE = 100\nSWITCHING_PROTOCOLS = 101\nOK = 200\nCREATED = 201\nACCEPTED = 202\nNON_AUTHORITATIVE_INFORMATION = 203\nNO_CONTENT = 204\nRESET_CONTENT = 205\nPARTIAL_CONTENT = 206\nMULTI_STATUS = 207\nMULTIPLE_CHOICES = 300\nMOVED_PERMANENTLY = 301\nFOUND = 302\nSEE_OTHER = 303\nNOT_MODIFIED = 304\nUSE_PROXY = 305\nRESERVED = 306\nTEMPORARY_REDIRECT = 307\nPERMANENT_REDIRECT = 308\nBAD_REQUEST = 400\nUNAUTHORIZED = 401\nPAYMENT_REQUIRED = 402\nFORBIDDEN = 403\nNOT_FOUND = 404\nMETHOD_NOT_ALLOWED = 405\nNOT_ACCEPTABLE = 406\nPROXY_AUTHENTICATION_REQUIRED = 407\nREQUEST_TIMEOUT = 408\nCONFLICT = 409\nGONE = 410\nLENGTH_REQUIRED = 411\nPRECONDITION_FAILED = 412\nREQUEST_ENTITY_TOO_LARGE = 413\nREQUEST_URI_TOO_LONG = 414\nUNSUPPORTED_MEDIA_TYPE = 415\nREQUESTED_RANGE_NOT_SATISFIABLE = 416\nEXPECTATION_FAILED = 417\nUNPROCESSABLE_ENTITY = 422\nPRECONDITION_REQUIRED = 428\nTOO_MANY_REQUESTS = 429\nREQUEST_HEADER_FIELDS_TOO_LARGE = 431\nCONNECTION_CLOSED_WITHOUT_RESPONSE = 444\nINTERNAL_SERVER_ERROR = 500\nNOT_IMPLEMENTED = 501\nBAD_GATEWAY = 502\nSERVICE_UNAVAILABLE = 503\nGATEWAY_TIMEOUT = 504\nHTTP_VERSION_NOT_SUPPORTED = 505\nLOOP_DETECTED = 508\nNOT_EXTENDED = 510\nNETWORK_AUTHENTICATION_REQUIRED = 511\n", "repo_name": "NikitinAP/ingenix", "sub_path": "api/restful/status.py", "file_name": "status.py", "file_ext": "py", "file_size_in_byte": 1949, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "werkzeug.http.HTTP_STATUS_CODES.get", "line_number": 38, "usage_type": "call"}, {"api_name": "werkzeug.http.HTTP_STATUS_CODES", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "74249101914", "text": "\"\"\" ************************************************\n* fileName: frames_folder_demo.py\n* desc: The demo of inference only \"a\" video splited into consecutive framess\n* author: mingdeng_cao\n* date: 2021/07/09 11:31\n* last revised: None\n************************************************ \"\"\"\n\nimport os\nimport sys\nimport argparse\n\nimport torch\nimport torch.nn as nn \nimport torch.nn.functional as F\nfrom easydict import EasyDict as edict\n\nfrom simdeblur.config import build_config\nfrom simdeblur.model import build_backbone\nfrom simdeblur.dataset.base import DatasetBase\nfrom simdeblur.dataset.frames_folder import FramesFolder\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\", type=str, help=\"The config .yaml file of deblurring model. \")\n parser.add_argument(\"ckpt\", type=str, help=\"The trained checkpoint of the selected deblurring model. \")\n parser.add_argument(\"--frames_folder_path\", type=str, help=\"The video frames folder path. \")\n parser.add_argument(\"--save_path\", type=str, help=\"The output deblurred path\")\n\n args = parser.parse_args()\n\n return args\n\n\ndef frames_foler_demo():\n args = parse_args()\n config = build_config(args.config)\n config.args = args\n model = build_backbone(config.model).cuda()\n\n ckpt = torch.load(args.ckpt, map_location=\"cuda:0\")\n\n model_ckpt = ckpt[\"model\"]\n model_ckpt = {k[7:]: v for k, v in model_ckpt.items()}\n model.load_state_dict(model_ckpt)\n\n data_config = edict({\n \"root_input\": \"/home/cmd/projects/simdeblur/datasets/DVD/qualitative_datasets/alley/input\",\n \"num_frames\": 5,\n \"overlapping\": True,\n \"sampling\": \"n_c\"\n })\n frames_data = FramesFolder(data_config)\n frames_dataloader = torch.utils.data.DataLoader(frames_data, 1)\n\n model.eval()\n with torch.no_grad():\n for i, batch_data in enumerate(frames_dataloader):\n out = model(batch_data[\"input_frames\"].cuda())\n print(batch_data[\"gt_names\"], out.shape)\n\n\nif __name__ == \"__main__\":\n frames_foler_demo()\n", "repo_name": "ljzycmd/SimDeblur", "sub_path": "demo/frames_folder_demo.py", "file_name": "frames_folder_demo.py", "file_ext": "py", "file_size_in_byte": 2055, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 262, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 24, "usage_type": "call"}, {"api_name": "simdeblur.config.build_config", "line_number": 37, "usage_type": "call"}, {"api_name": "simdeblur.model.build_backbone", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 41, "usage_type": "call"}, {"api_name": "easydict.EasyDict", "line_number": 47, "usage_type": "call"}, {"api_name": "simdeblur.dataset.frames_folder.FramesFolder", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "148357369", "text": "import json\nfrom django.core.management.base import BaseCommand\nfrom classification.models import Node\n\nclass Command(BaseCommand):\n help = 'Populates rds with hierachy'\n\n def handle(self, *args, **kwargs):\n \n with open(\"fixtures/hierachy.json\", \"r\") as hierachy:\n f = json.load(hierachy)\n for node in f:\n self.append_children(node)\n\n\n def append_children(self, node, parent=None):\n\n print(f\"Adding {node['title']}\")\n parent = Node.objects.create(title=node[\"title\"], parent=parent)\n if node[\"children\"]:\n for child in node[\"children\"]:\n self.append_children(child, parent)\n\n return 0\n \n", "repo_name": "IamMiracleAlex/admintools", "sub_path": "classification/management/commands/json_to_sql.py", "file_name": "json_to_sql.py", "file_ext": "py", "file_size_in_byte": 716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 5, "usage_type": "name"}, {"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "classification.models.Node.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "classification.models.Node.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "classification.models.Node", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "17952345720", "text": "import glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\nimport pickle\n\nfrom scipy import misc\n\n\nnp.random.seed(0)\n\n\nSAMPLE_QTY = 30000\nTRAINING_STEP = 0.001\nTRAINING_EPOCHS = 1000\nCLASS_QTY = None\nTEST_SET_PROPORTION = 0.25\nPICKLE_FILENAME = 'notMNIST.pickle'\n\n\ndef load_samples(filenames):\n matrix = None\n for filename in filenames:\n try:\n arr = misc.imread(filename)\n except:\n continue\n length = arr.shape[0] * arr.shape[1]\n sample = arr.reshape(1, length)\n if matrix is not None:\n matrix = np.vstack((matrix, sample))\n else:\n matrix = np.matrix(sample)\n return matrix\n\n\ndef load_dataset():\n global CLASS_QTY\n classes = glob.glob('../datasets/notMNIST_large/*')\n sample_files = {}\n for class_filepath in classes:\n name = class_filepath[-1]\n sample_files[name] = np.asarray(glob.glob('%s/*.png' % class_filepath))\n\n keys = tuple(sample_files.keys())\n CLASS_QTY = len(keys)\n class_sample_qty = SAMPLE_QTY // CLASS_QTY\n\n X = None\n y = None\n for key, val in sample_files.items():\n cls_value_qty = len(val)\n limit = min(cls_value_qty, class_sample_qty)\n perm = np.random.permutation(cls_value_qty)[:limit]\n samples = val[perm]\n mat = load_samples(samples)\n cls_id = ord(key) - ord('A')\n targets = np.zeros((mat.shape[0], CLASS_QTY))\n targets[:, cls_id] = 1\n if X is not None:\n X = np.vstack((X, mat))\n y = np.vstack((y, targets))\n else:\n X = mat\n y = targets\n perm = np.random.permutation(X.shape[0])\n X = X[perm, :]\n y = y[perm]\n return X, y\n\n\ndef divide_dataset(samples, targets):\n assert samples.shape[0] == targets.shape[0]\n sample_qty = samples.shape[0]\n train_size = int(sample_qty * (1 - TEST_SET_PROPORTION))\n return samples[:train_size], targets[:train_size],\\\n samples[train_size:], targets[train_size:]\n\n\ndef manual_testing(samples, targets, predictions):\n sample_qty = samples.shape[0]\n for i in range(20):\n index = np.random.randint(0, sample_qty)\n arr = samples[index, :].reshape(28, 28)\n tar = targets[index]\n pred = predictions[index]\n print(\"%d) Sample %d: is %s, but %s was predicted\" % (i, index, chr(tar + ord('A')), chr(pred + ord('A'))))\n plt.imshow(arr, cmap='Greys_r')\n plt.show()\n\n\nif __name__ == '__main__':\n if os.path.exists(PICKLE_FILENAME):\n with open(PICKLE_FILENAME, 'rb') as f:\n obj = pickle.load(f)\n samples, targets = obj['samples'], obj['targets']\n print(\"Dataset loaded from file...\")\n else:\n samples, targets = load_dataset()\n with open(PICKLE_FILENAME, 'wb') as f:\n obj = {'samples': samples, 'targets': targets}\n pickle.dump(obj, f)\n print(\"File saved....\")\n\n X_train, y_train, X_test, y_test = divide_dataset(samples, targets)\n\n train_mean = np.mean(X_train, 0)\n train_std = np.std(X_train, 0)\n preprocess = lambda X : np.divide(X - train_mean, train_std)\n\n X_proc = preprocess(X_train)\n X_test = preprocess(X_test)\n\n\n X = tf.placeholder(tf.float32, shape=[None, 784])\n y = tf.placeholder(tf.float32, shape=[None, 10])\n\n W = tf.Variable(tf.random_normal([784, 10], stddev=1))\n b = tf.Variable(tf.zeros([10]))\n\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n\n yhat = tf.nn.softmax(tf.matmul(X, W) + b)\n\n #neg_log = -tf.reduce_sum(y * tf.log(yhat), reduction_indices=[1])\n neg_log = -tf.reduce_sum(y * tf.log(tf.clip_by_value(yhat, 1e-10, 1.0)))\n ce_loss = tf.reduce_mean(neg_log)\n\n train_step = tf.train.GradientDescentOptimizer(TRAINING_STEP).minimize(ce_loss)\n\n for iter in range(TRAINING_EPOCHS):\n train_step.run(feed_dict={X: X_proc, y: y_train})\n if iter % 100 == 0:\n print(\"Iter %d: %f\" % (iter, ce_loss.eval({X: X_proc, y: y_train})))\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(yhat, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n print(accuracy.eval({X: X_train, y: y_train}))\n real = tf.argmax(y, 1).eval({X: X_train, y: y_train})\n pred = tf.argmax(yhat, 1).eval({X: X_train, y: y_train})\n manual_testing(X_train, real, pred)\n\n", "repo_name": "ealtamir/udacity_deeplearning", "sub_path": "tf_mnist_logit.py", "file_name": "tf_mnist_logit.py", "file_ext": "py", "file_size_in_byte": 4474, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.random.seed", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "scipy.misc.imread", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.vstack", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 34, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.permutation", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 84, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 96, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 117, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 117, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 119, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 125, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 131, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 138, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "30780740438", "text": "from __future__ import division\r\n\r\n'''Simulation of Chemotaxis, a modified randon walk used by some bacteria to\r\nfind food sources. This uses a run and tumble process'''\r\n\r\nimport numpy\r\nimport matplotlib.pyplot as pyplot\r\nimport time\r\nimport matplotlib.cm\r\nimport pylab\r\nimport random\r\n\r\n# Parameters\r\nx0, y0 = 20, 40 # microns\r\nk = 0.2\r\nV = 2 # microns per second\r\ndt = 0.1\r\ntimebase = numpy.arange(0, 100, dt)\r\nr = numpy.array((x0, y0))\r\n\r\ndef energy_density(r):\r\n '''This function simulates the sugar energy source specified by the function'''\r\n x, y = r\r\n f = 4000 - (x**2 + y**2)\r\n return f\r\n\r\ndef velocity(a):\r\n '''This function is for the velocities in the x and y plane'''\r\n Vx = V * numpy.cos(a)\r\n Vy = V * numpy.sin(a)\r\n \r\n return numpy.array((Vx, Vy))\r\n\r\ndef position(r):\r\n '''This function returns the positions of the bacteria''' \r\n x, y = r\r\n a = random.random() * 2 * numpy.pi\r\n \r\n position = numpy.zeros((len(timebase), 2))\r\n shift = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n \r\n for i in range(len(timebase)):\r\n\r\n position[i] = r\r\n \r\n eNew = energy_density(r)\r\n shift.append(eNew)\r\n shift = shift[-10:]\r\n df = shift[-1] - shift[0]\r\n\r\n t_half = 1 + k * df\r\n if t_half < 0.1: t_half = 0.1\r\n\r\n tau = t_half/ numpy.log(2)\r\n \r\n p = numpy.exp(-dt/ tau)\r\n\r\n if random.random() < p:\r\n '''move at speed * dt'''\r\n r = r + velocity(a) * dt\r\n \r\n else:\r\n a = random.random() * 2 * numpy.pi\r\n\r\n return position\r\n\r\ndef MSD(r):\r\n '''This function calculates the mean square displacement of bacteria\r\nagainst time from the origin and location of max energy'''\r\n x, y = r\r\n \r\n s_d_f_initial = numpy.zeros((20,1000))\r\n # Squared displacement from initial\r\n s_d_f_origin = numpy.zeros((20,1000))\r\n # Squared displacement form origin\r\n \r\n\r\n for i in range(20):\r\n\r\n x_data = position([20, 40])[:,0]\r\n y_data = position([20, 40])[:,1]\r\n s_d_f_initial[i] = ((20-x_data)**2+(40-y_data)**2)\r\n s_d_f_origin[i] = (x_data**2+y_data**2)\r\n\r\n MSD_i = numpy.average(s_d_f_initial, axis=0)\r\n MSD_f = numpy.average(s_d_f_origin, axis=0) \r\n\r\n \r\n return MSD_i, MSD_f\r\n \r\ny_axis = numpy.arange(0, 50, 0.1)\r\nx_axis = numpy.arange(0, 40, 0.1)\r\n\r\ndat = numpy.zeros((len(y_axis), len(x_axis)))\r\n\r\nfor iy, y in enumerate(y_axis):\r\n for ix, x in enumerate(x_axis):\r\n dat[iy, ix] = energy_density((x, y))\r\n\r\npyplot.subplot(221) \r\nfor i in range(20):\r\n\r\n position_list = position([20, 40])\r\n\r\n x = position_list[: ,0]\r\n y = position_list[: ,1]\r\n \r\n pyplot.subplot(221)\r\n pyplot.plot(x,y)\r\n \r\n x0, y0 = 20, 40\r\n x_f, y_f = x[-1], y[-1]\r\n x_TR = x0, x_f\r\n y_TR = y0, y_f\r\n pyplot.subplot(222)\r\n pyplot.plot(x_TR, y_TR, marker='o')\r\n \r\n \r\n\r\nMSD_i, MSD_f = MSD([20, 40])\r\n\r\npyplot.xlabel('x /microns')\r\npyplot.ylabel('y /microns')\r\npyplot.title('Inital and final bacteria position')\r\npyplot.tight_layout()\r\n\r\npyplot.subplot(221)\r\nim = pyplot.imshow(dat, extent = (-20, 40, -20, 50),\r\n origin = 'lower', cmap = matplotlib.cm.gray,\r\n aspect = 'auto')\r\npyplot.colorbar(im, orientation = 'vertical', label = 'energy density')\r\npyplot.xlabel('x /microns')\r\npyplot.ylabel('y /microns')\r\npyplot.title('Bacteria trajectory')\r\npyplot.tight_layout()\r\n\r\npyplot.subplot(212)\r\npyplot.plot(timebase, MSD_i, label = 'from origin', color = 'blue')\r\npyplot.plot(timebase, MSD_f, label = 'from Max energy', color = 'green')\r\npyplot.title('Mean Squared diplacement')\r\npyplot.xlabel('Time')\r\npyplot.ylabel('MSD')\r\npyplot.legend(loc= 'lower right') \r\npyplot.tight_layout()\r\npyplot.show()\r\n \r\n\r\n", "repo_name": "sochiho/Chemotaxis", "sub_path": "Chemotaxis.py", "file_name": "Chemotaxis.py", "file_ext": "py", "file_size_in_byte": 3817, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "random.random", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 56, "usage_type": "call"}, {"api_name": "random.random", "line_number": 58, "usage_type": "call"}, {"api_name": "random.random", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 129, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}]} +{"seq_id": "33936920249", "text": "import numpy as np\nimport cv2\nimport pandas as pd\nfrom pathlib import Path\nimport math\n\ndef get_angle(line1, line2):\n d1 = (line1[1][0] - line1[0][0], line1[1][1] - line1[0][1])\n d2 = (line2[1][0] - line2[0][0], line2[1][1] - line2[0][1])\n p = d1[0] * d2[0] + d1[1] * d2[1]\n n1 = math.sqrt(d1[0] * d1[0] + d1[1] * d1[1])\n n2 = math.sqrt(d2[0] * d2[0] + d2[1] * d2[1])\n ang = math.acos(p / (n1 * n2))\n ang = math.degrees(ang)\n return ang\ndef get_intersections(line1, line2):\n A = np.array(line1)\n B = np.array(line2)\n t, s = np.linalg.solve(np.array([A[1]-A[0], B[0]-B[1]]).T, B[0]-A[0]) \n \n return (1-t)*A[0] + t*A[1]\ndef getPerspectiveTransformMatrix(vid_path , show_frame = False):\n\n cap = cv2.VideoCapture(vid_path)\n # width , height = 1080 , 720\n border = np.zeros((720,1280,3), np.uint8)\n print(\"Start\")\n ret, frame = cap.read()\n\n test_frame =np.copy(frame)\n\n h, w = frame.shape[:2]\n mask = np.zeros([h+2, w+2], np.uint8) \n # bgr\n diff_value = (3,1,3)\n black = (0,0,0)\n frame = cv2.GaussianBlur(frame , (7,7) , 0)\n\n fillpoints = [(100,0) , (100,700) , (300,350) , (1000,350) , (1200,700)]\n \n for point in fillpoints:\n cv2.floodFill(frame, mask, point, black, diff_value, diff_value)\n for point in fillpoints:\n cv2.circle(frame , point , 5 ,(0,0,255) , -1)\n\n # 灰階\n imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n # 二值化\n ret,thresh = cv2.threshold(imgray,100,255,0)\n # 找輪廓\n contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n contours = list(contours)\n # 根據輪廓面積大小進行sort\n contours.sort(key = cv2.contourArea , reverse=True)\n # 畫出最大的輪廓\n cv2.drawContours(border, contours[0:1], -1, (0,0,255), 10)\n\n\n\n # 灰階\n imgray = cv2.cvtColor(border,cv2.COLOR_BGR2GRAY)\n\n canny = cv2.Canny(imgray, 30, 100)\n lines = cv2.HoughLinesP(canny , 1.0 , np.pi/180 , 100 , np.array([]) , 200 , 100)\n\n cv2.line(frame,(100,100),(100,500),(255,255,255),5)\n\n horizon_line = []\n left_vertical_line = None\n right_vertical_line = None\n\n try:\n # 畫球場的垂直線 和 找到水平線座標\n for line in lines:\n for x1,y1,x2,y2 in line:\n line_angle = get_angle([(x1,y1),(x2,y2)] , [(0,0),(0,100)])\n line_angle_90 = 180 - line_angle if line_angle > 90 else line_angle\n vectorx = x2 - x1\n vectory = y2 - y1\n \n if (line_angle_90 < 40 and int(line_angle)): \n\n if left_vertical_line == None and line_angle > 90:\n # left_vertical_line = [(x1 - 20 , y1) , (x2 -20, y2)]\n left_vertical_line = [(x1 , y1) , (x2, y2)]\n cv2.line(frame,(x1 - vectorx * 100, y1 - vectory * 100),(x1 + vectorx * 100,y1 + vectory * 100),(255,0,0),5)\n elif right_vertical_line == None and line_angle < 90:\n right_vertical_line = [(x1, y1) , (x2, y2)]\n cv2.line(frame,(x1 - vectorx * 100, y1 - vectory * 100),(x1 + vectorx * 100,y1 + vectory * 100),(255,0,0),5)\n\n elif line_angle_90 > 85:\n\n horizon_line.append([[x1 ,y1] , [x2,y2] ])\n\n # 畫上下兩條水平線\n top_line = min(horizon_line , key = lambda x : x[0][1] + x[1][1])\n # top_line[0][1] -= 20\n # top_line[1][1] -= 20\n x1 , y1 = top_line[0]\n x2 , y2 = top_line[1]\n cv2.line(frame,(x1 - (x2-x1) * 100, y1 - (y2-y1) * 100),(x1 + (x2-x1) * 100,y1 + (y2-y1) * 100),(255,0,0),5) \n bottom_line = max(horizon_line , key = lambda x : x[0][1] + x[1][1])\n print(bottom_line)\n x1 , y1 = bottom_line[0]\n x2 , y2 = bottom_line[1]\n cv2.line(frame,(x1 - (x2-x1) * 100, y1 - (y2-y1) * 100),(x1 + (x2-x1) * 100,y1 + (y2-y1) * 100),(255,0,0),5) \n\n # print(get_intersections(top_line , vertical_line[0]).astype(int))\n corner = []\n corner.append(get_intersections(top_line , left_vertical_line).astype(int))\n corner.append(get_intersections(bottom_line , left_vertical_line).astype(int))\n corner.append(get_intersections(bottom_line , right_vertical_line).astype(int))\n corner.append(get_intersections(top_line , right_vertical_line).astype(int))\n cv2.circle(frame , get_intersections(top_line , left_vertical_line).astype(int) , 5 , (0,255,0),-1)\n cv2.circle(frame , get_intersections(top_line , right_vertical_line).astype(int) , 5 , (0,255,0),-1)\n cv2.circle(frame , get_intersections(bottom_line , left_vertical_line).astype(int) , 5 , (0,255,0),-1)\n cv2.circle(frame , get_intersections(bottom_line , right_vertical_line).astype(int) , 5 , (0,255,0),-1)\n \n except:\n return -1 , -1 , -1\n \n\n # cv2.imshow('board' , border) \n \n # 進行透視變換\n old = np.float32(corner)\n new = np.float32([[0,0], [0,h-1], [w-1,h-1] , [w-1,0] ])\n matrix = cv2.getPerspectiveTransform(old , new)\n imgOutput = cv2.warpPerspective(test_frame, matrix, (w , h), cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(0,0,0))\n\n \n if show_frame:\n cv2.imshow('thresh' , thresh) \n cv2.imshow('board' , border) \n cv2.imshow('frame' , frame)\n cv2.imshow('Perspective', imgOutput)\n\n while cv2.waitKey(1) == -1:\n pass\n # cv2.destroyAllWindows()\n \n\n\n return matrix , corner , 1\n\n\n", "repo_name": "Yui-Arthur/AI_badminton", "sub_path": "utils/homography_transformation.py", "file_name": "homography_transformation.py", "file_ext": "py", "file_size_in_byte": 5576, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "math.sqrt", "line_number": 11, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "math.acos", "line_number": 13, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.floodFill", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.RETR_TREE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.drawContours", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 117, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 130, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 136, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 137, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "16637172478", "text": "\"\"\"Code for generating simulations using phase correction models\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport numba as nb\nfrom joblib import Parallel, delayed\nimport dill as pickle\nfrom datetime import timedelta\nimport statsmodels.formula.api as smf\n\nimport src.analyse.analysis_utils as autils\nfrom src.analyse.phase_correction_models import PhaseCorrectionModel\n\n# Define the objects we can import from this file into others\n__all__ = [\n 'generate_phase_correction_simulations_for_coupling_parameters',\n 'generate_phase_correction_simulations_for_individual_conditions',\n 'Simulation'\n]\n\n\nclass Simulation:\n \"\"\"\n Creates X number (default 500) of simulated performances from a given phase correction model.\n\n Number of simulations defaults to 500, the same number in Jacoby et al. (2021).\n \"\"\"\n def __init__(\n self, pcm: PhaseCorrectionModel, num_simulations: int = 500, **kwargs\n ):\n # Default parameters\n self._resample_interval: timedelta = timedelta(seconds=1) # Get mean of IOIs within this window\n # Check and generate our input data\n self.total_beats: int = 1000\n self.num_simulations: int = num_simulations\n # Rolling window parameters -- should be the same as for the phase correction models\n self._rolling_window_size: str = kwargs.get('rolling_window_size', '2s') # 2 seconds = 2 beats at 120BPM\n self._rolling_min_periods: int = kwargs.get('rolling_min_periods', 2)\n # Get the raw data from our phase correction model\n if isinstance(pcm, PhaseCorrectionModel):\n self.keys_pcm = pd.DataFrame([pcm.keys_dic])\n self.drms_pcm = pd.DataFrame([pcm.drms_dic])\n elif isinstance(pcm, pd.DataFrame):\n self.keys_pcm = pcm[pcm['instrument'] == 'Keys']\n self.drms_pcm = pcm[pcm['instrument'] != 'Keys']\n # Get the zoom array used in the performance\n self.latency: np.ndarray = self._append_timestamps_to_latency_array(self.keys_pcm['zoom_arr'].iloc[0])\n # Raw simulation parameters, which will be used to create the dictionaries used in the simulations.\n self.parameter: str = kwargs.get('parameter', 'original')\n # Noise parameters for the simulation\n self.noise = kwargs.get('noise', autils.CONSTANT_RESID_NOISE) # Default noise term\n self.use_original_noise: bool = kwargs.get('use_original_noise', False) # Whether to use noise from model\n # Musician parameters\n self.keys_params_raw: dict = self._get_raw_musician_parameters(init=self.keys_pcm)\n self.drms_params_raw: dict = self._get_raw_musician_parameters(init=self.drms_pcm)\n self.keys_params: nb.typed.Dict = self._convert_musician_parameters_dict_to_numba(\n self._modify_musician_parameters_by_simulation_type(self.keys_params_raw)\n )\n self.drms_params: nb.typed.Dict = self._convert_musician_parameters_dict_to_numba(\n self._modify_musician_parameters_by_simulation_type(self.drms_params_raw)\n )\n # Empty lists to store our keys and drums simulations in\n self.keys_simulations: list[pd.DataFrame] = []\n self.drms_simulations: list[pd.DataFrame] = []\n # Empty attribute that we will fill with our results dictionary after creating simulations\n self.results_dic: dict = None\n\n @staticmethod\n def _get_number_of_beats_for_simulation(\n kp, dp\n ) -> int:\n \"\"\"\n Averages the total number of beats across both keys and drums, then gets the upper ceiling.\n \"\"\"\n return int(np.ceil(np.mean([kp['total_beats'].iloc[0], dp['total_beats'].iloc[0]])))\n\n @staticmethod\n def _append_timestamps_to_latency_array(\n latency_array, offset: int = 8, resample_rate: float = 0.75\n ) -> np.array:\n \"\"\"\n Appends timestamps showing the onset time for each value in the latency array applied to a performance\n \"\"\"\n # Define the endpoint for the linear space\n end = offset + (len(latency_array) * resample_rate)\n # Create the linear space\n lin = np.linspace(offset, end, num=len(latency_array), endpoint=False)\n # Append the two arrays together\n return np.c_[latency_array / 1000, lin]\n\n @staticmethod\n def _get_raw_musician_parameters(\n init: pd.DataFrame\n ) -> dict:\n \"\"\"\n Gets necessary simulation parameters from pandas dataframe and converts to a dictionary\n \"\"\"\n # Need to reset our index so we can use 0 indexing\n init = init.reset_index(drop=True)\n # Variables we need from our input\n cols = [\n 'correction_self', 'correction_partner', 'intercept', 'resid_std',\n ]\n # Create the dictionary and return\n dic = {s: init[s].iloc[0].astype(np.float64) for s in cols}\n dic.update({'instrument': init['instrument'].iloc[0]})\n return dic\n\n def _modify_musician_parameters_by_simulation_type(\n self, input_data\n ):\n \"\"\"\n Modifies a simulated musician's parameters according to the given simulation type\n \"\"\"\n # Used to get the mean of a particular coefficient across both musicians\n return {\n 'correction_self': input_data['correction_self'],\n 'correction_partner': input_data['correction_partner'],\n 'intercept': input_data['intercept'],\n 'resid_std': input_data['resid_std'] if self.use_original_noise else self.noise\n }\n\n @staticmethod\n def _convert_musician_parameters_dict_to_numba(\n python_dict: dict\n ) -> nb.typed.Dict:\n \"\"\"\n Converts a Python dictionary into a type that can be utilised by Numba\n \"\"\"\n # Create the empty dictionary\n nb_dict = nb.typed.Dict.empty(key_type=nb.types.unicode_type, value_type=nb.types.float64,)\n # Iterate through our dictionary\n for k, v in python_dict.items():\n # If the type is compatible with numba floats, set it in the numba dictionary\n if type(v) != str:\n nb_dict[k] = v\n return nb_dict\n\n def _initialise_empty_data(\n self, iois: tuple[float] = (0.5, 0.5), onset: float = 8\n ) -> nb.typed.Dict:\n \"\"\"\n Initialise an empty numba dictionary of string-array pairs, for storing data from one simulation in.\n \"\"\"\n # Make dictionary with strings as keys and arrays as values\n nb_dict = nb.typed.Dict.empty(\n key_type=nb.types.unicode_type,\n value_type=nb.types.float64[:],\n )\n # Fill the dictionary with arrays (pre-allocated in order to make running the simulation easier)\n for s in [\n 'my_onset', 'asynchrony', 'asynchrony_third_person',\n 'my_next_ioi', 'my_prev_ioi', 'my_next_ioi_diff', 'my_prev_ioi_diff'\n ]:\n nb_dict[s] = np.zeros(shape=self.total_beats)\n # Fill the dictionary arrays with our starting values\n # My onset\n nb_dict['my_onset'][0] = onset\n nb_dict['my_onset'][1] = onset + iois[0]\n # My next ioi\n nb_dict['my_next_ioi'][0] = iois[0]\n nb_dict['my_next_ioi'][1] = iois[1]\n # My previous ioi\n nb_dict['my_prev_ioi'][0] = np.nan\n nb_dict['my_prev_ioi'][1] = iois[0]\n # My next ioi diff\n nb_dict['my_next_ioi_diff'][0] = np.nan\n nb_dict['my_next_ioi_diff'][1] = iois[1] - iois[0] # This will always be 0\n # My previous ioi diff\n nb_dict['my_prev_ioi_diff'][0] = np.nan\n nb_dict['my_prev_ioi_diff'][1] = np.nan\n return nb_dict\n\n def create_all_simulations(\n self\n ) -> None:\n \"\"\"\n Run the simulations and create a list of dataframes for each individual performer\n \"\"\"\n # Create the simulations with joblib and numba function\n all_sims = Parallel(n_jobs=-1, prefer='threads')(delayed(autils.create_one_simulation)(\n self._initialise_empty_data(),\n self._initialise_empty_data(),\n self.keys_params,\n self.drms_params,\n np.random.normal(0, self.keys_params['resid_std'], 10000),\n np.random.normal(0, self.drms_params['resid_std'], 10000),\n self.latency,\n self.total_beats\n ) for _ in range(self.num_simulations))\n # Format simulated data\n self.keys_simulations = [self._format_simulated_data(d[0]) for d in all_sims]\n self.drms_simulations = [self._format_simulated_data(d[1]) for d in all_sims]\n # After running simulations, clean up by converting simulation parameters from numba back to python dictionary\n # This will allow us to pickle instances of the Simulation class without errors, as numba dictionaries are\n # currently not supported by the pickle package.\n self.keys_params = dict(self.keys_params)\n self.drms_params = dict(self.drms_params)\n # Save our results dictionary to an attribute in the class, so we don't have to do it later (or multiple times!)\n self.results_dic = self._create_summary_dictionary()\n\n def _format_simulated_data(\n self, data: nb.typed.Dict\n ) -> pd.DataFrame:\n \"\"\"\n Formats data from one simulation by creating a dataframe, adding in the timedelta column, and resampling\n to get the mean IOI (defaults to every second)\n \"\"\"\n # Create dataframe from the numba dictionary by first converting it to a python dictionary then to a dataframe\n df = pd.DataFrame(dict(data))\n # Drop rows with all zeros in\n df = df.loc[~(df == 0).all(axis=1)]\n # Convert my onset column to a timedelta\n idx = pd.to_timedelta([timedelta(seconds=val) for val in df['my_onset']])\n offset = timedelta(seconds=8 - df.iloc[0]['my_onset'])\n # Set the index to our timedelta column\n df = df.set_index(idx)\n # Get our rolling IOI standard deviation values\n df = self._get_rolling_standard_deviation_values(df)\n # Resample to the desired frequency with offset, get the mean values, and interpolate to fill NaNs\n return df.resample('1s', offset=offset).apply(np.nanmean).interpolate(limit_direction='backward')\n\n def _get_rolling_standard_deviation_values(\n self, df: pd.DataFrame, cols: tuple[str] = ('my_prev_ioi',)\n ) -> pd.DataFrame:\n # Create the rolling window with the desired window size\n roll = df.rolling(\n window=self._rolling_window_size, min_periods=self._rolling_min_periods, closed='both', on=df.index\n )\n # Iterate through the required columns\n for col in cols:\n # Extract the standard deviation and convert into milliseconds\n df[f'{col}_std'] = roll[col].std() * 1000\n return df\n\n @staticmethod\n def _get_average_var_for_one_simulation(\n all_perf: list[pd.DataFrame], var: str = 'my_next_ioi'\n ) -> pd.DataFrame:\n \"\"\"\n Concatenate all simulations together and get the row-wise average (i.e. avg IOI every second)\n \"\"\"\n # We use absolute values here, as this makes most sense when getting mean async across performers\n # For example, if keys-drums async = -0.5 and drums-keys async = 0.5, mean without absolute values == 0\n # Instead, this should be 0.5.\n return pd.DataFrame(\n pd.concat([df_[var] for df_ in all_perf], axis=1).abs().mean(axis=1), columns=[var]\n )\n\n def get_average_tempo_slope(\n self, func=np.nanmean, **kwargs\n ) -> float:\n \"\"\"\n Returns the average tempo slope for all simulations.\n\n Method:\n ---\n - For every simulation, zip the corresponding keys and drums performance together.\n - Then, get the average IOI for every second across both keys and drums.\n - This is straightforward, because we resampled to average IOI per second in _format_simulated_data\n - Convert average IOI to average BPM by dividing by 60, then regress against elapsed seconds\n - Extract the slope coefficient, take the median across all simulations, and return\n \"\"\"\n coeffs = []\n # Iterate through every simulation individually\n for keys, drms in zip(self.keys_simulations, self.drms_simulations):\n # Concatenate keyboard and drums performance and get average IOI every second\n avg = self._get_average_var_for_one_simulation([keys, drms])\n # Get elapsed number of seconds\n avg['elapsed_seconds'] = avg.index.seconds\n # Convert IOIs to BPM for tempo slope regression\n avg['my_next_bpm'] = 60 / avg['my_next_ioi']\n avg = avg.dropna()\n # Conduct and fit the regression model\n md = smf.ols('my_next_bpm~elapsed_seconds', data=avg.dropna()).fit()\n # Extract the tempo slope coefficient and append to list\n coeffs.append(md.params[1])\n # Calculate the median tempo slope coefficient (robust to outliers!) from all simulations and return\n return func(pd.Series(coeffs).replace(-np.Inf, np.nan), **kwargs)\n\n def get_average_ioi_variability(\n self, func=np.nanmean, **kwargs\n ) -> float:\n \"\"\"\n Returns the average tempo slope for all simulations.\n\n Method:\n ---\n - For every simulation, get the median IOI standard deviation value over the window size\n - Calculate the mean of all of these values.\n \"\"\"\n return func([\n [s['my_prev_ioi_std'].median() for s in self.keys_simulations],\n [s['my_prev_ioi_std'].median() for s in self.drms_simulations]\n ], **kwargs)\n\n def get_average_pairwise_asynchrony(\n self, func=np.nanmean, async_col: str = 'asynchrony', **kwargs\n ) -> float:\n \"\"\"\n Gets the average pairwise asynchrony (in milliseconds!) across all simulated performances\n \"\"\"\n def pw_async(keys, drms):\n \"\"\"\n Function used to calculate the pairwise asynchrony for a single simulation, in milliseconds\n \"\"\"\n # Concatenate the two asynchrony columns together\n conc = np.concatenate((keys[async_col].to_numpy(), drms[async_col].to_numpy()))\n # Square the values, take the mean, then the square root, then convert to miliseconds and return\n return np.sqrt(np.nanmean(np.square(conc))) * 1000\n\n # Calculate the mean pairwise async across all performances\n return func([pw_async(k_, d_) for k_, d_ in zip(self.keys_simulations, self.drms_simulations)], **kwargs)\n\n def get_simulation_data_for_plotting(\n self, plot_individual: bool = True, plot_average: bool = True, var: str = 'my_next_ioi',\n timespan: tuple = (7, 101),\n ) -> tuple:\n \"\"\"\n Wrangles simulation data into a format that can be plotted and returns.\n \"\"\"\n # Create simulations if we haven't already done so\n if len(self.keys_simulations) < 1 or len(self.drms_simulations) < 1:\n self.create_all_simulations()\n # If we're plotting individual simulations\n individual_sims = []\n grand_avg = None\n if plot_individual:\n # Iterate through individual keys and drums simulations\n for k_, d_ in zip(self.keys_simulations, self.drms_simulations):\n # Average individual simulation\n avg = self._get_average_var_for_one_simulation([k_, d_], var=var)\n # Subset for required timespan\n avg = avg[(avg.index.seconds >= timespan[0]) & (avg.index.seconds <= timespan[1])]\n individual_sims.append(avg)\n # If we're plotting our average simulation\n if plot_average:\n # Get grand average simulation by averaging our average simulations\n zi = zip(self.keys_simulations, self.drms_simulations)\n grand_avg = self._get_average_var_for_one_simulation(\n [self._get_average_var_for_one_simulation([k_, d_], var=var) for k_, d_ in zi], var=var\n )\n # Subset for required timespan\n grand_avg = grand_avg[(grand_avg.index.seconds >= timespan[0]) & (grand_avg.index.seconds <= timespan[1])]\n return individual_sims, grand_avg\n\n def _create_summary_dictionary(\n self,\n ) -> dict:\n \"\"\"\n Creates a summary dictionary with important simulation parameters\n \"\"\"\n return {\n # Condition metadata\n 'trial': self.keys_pcm[\"trial\"].iloc[0],\n 'block': self.keys_pcm[\"block\"].iloc[0],\n 'latency': self.keys_pcm[\"latency\"].iloc[0],\n 'jitter': self.keys_pcm[\"jitter\"].iloc[0],\n # Simulation metadata\n 'parameter': self.parameter,\n 'original_noise': self.use_original_noise,\n 'keys_parameters': self.keys_params_raw,\n 'drums_parameters': self.drms_params_raw,\n # Metrics from the actual original performance on which this simulation was based\n 'tempo_slope_original': np.mean(\n [self.keys_pcm['tempo_slope'].iloc[0], self.drms_pcm['tempo_slope'].iloc[0]]\n ),\n 'ioi_variability_original': np.mean(\n [self.keys_pcm['ioi_std'].iloc[0], self.drms_pcm['ioi_std'].iloc[0]]\n ),\n 'asynchrony_original': np.mean(\n [self.keys_pcm['pw_asym'].iloc[0], self.drms_pcm['pw_asym'].iloc[0]]\n ),\n # Summary metrics from all the simulated performances\n 'tempo_slope_simulated': self.get_average_tempo_slope(func=np.nanmedian), # average\n 'tempo_slope_simulated_std': self.get_average_tempo_slope(func=np.nanstd), # standard deviation\n 'tempo_slope_simulated_ci': self.get_average_tempo_slope(func=np.nanpercentile, q=[2.5, 97.5]), # 95% ci\n 'ioi_variability_simulated': self.get_average_ioi_variability(func=np.nanmedian),\n 'ioi_variability_simulated_std': self.get_average_ioi_variability(func=np.nanstd),\n 'ioi_variability_simulated_ci': self.get_average_ioi_variability(func=np.nanpercentile, q=[2.5, 97.5]),\n 'asynchrony_simulated': self.get_average_pairwise_asynchrony(func=np.nanmedian,\n async_col='asynchrony_third_person'),\n 'asynchrony_simulated_indiv': self.get_average_pairwise_asynchrony(func=np.nanmedian),\n 'asynchrony_simulated_std': self.get_average_pairwise_asynchrony(func=np.nanstd),\n 'asynchrony_simulated_ci': self.get_average_pairwise_asynchrony(func=np.nanpercentile, q=[2.5, 97.5]),\n }\n\n\ndef generate_phase_correction_simulations_for_individual_conditions(\n mds: list[PhaseCorrectionModel], output_dir: str, logger=None, force_rebuild: bool = False,\n num_simulations: int = autils.NUM_SIMULATIONS\n) -> list[Simulation]:\n \"\"\"\n Create simulated performances using the coupling within every individual performance.\n \"\"\"\n # Try and load the models from the disk to save time, unless we're forcing them to rebuild anyway\n if not force_rebuild:\n all_sims = autils.load_from_disc(output_dir, filename='phase_correction_sims_orig.p')\n # If we've successfully loaded models, return these straight away\n if all_sims is not None:\n return all_sims\n # Create an empty list to hold simulations in\n all_sims = []\n for pcm in mds:\n # Initialise the simulation class\n sim = Simulation(\n pcm=pcm, num_simulations=num_simulations, parameter='original', leader=None, use_original_noise=True\n )\n autils.log_simulation(sim, logger)\n # Create all simulations for this parameter/leader combination\n sim.create_all_simulations()\n # Append the simulation to our list\n all_sims.append(sim)\n # Pickle the result -- this will be quite large, depending on the number of simulations!\n pickle.dump(all_sims, open(f\"{output_dir}\\\\phase_correction_sims_orig.p\", \"wb\"))\n return all_sims\n\n\ndef generate_phase_correction_simulations_for_coupling_parameters(\n mds: list[PhaseCorrectionModel], output_dir: str, logger=None, force_rebuild: bool = False,\n num_simulations: int = autils.NUM_SIMULATIONS\n) -> tuple[list[Simulation], str]:\n \"\"\"\n Create simulated performances across a range of artificial coupling parameters for every phase correction model\n \"\"\"\n def grouper(gr):\n return gr.groupby('instrument', as_index=False).agg(\n {\n 'trial': 'mean', 'block': 'mean', 'latency': 'mean', 'jitter': 'mean', 'tempo_slope': 'mean',\n 'ioi_std': 'mean', 'pw_asym': 'mean', 'zoom_arr': 'first', 'intercept': 'mean',\n 'correction_partner': 'mean', 'correction_self': 'mean', 'resid_std': 'mean'\n }\n )\n\n # Try and load the models from the disk to save time, unless we're forcing them to rebuild anyway\n if not force_rebuild:\n all_sims = autils.load_from_disc(output_dir, filename='phase_correction_sims.p')\n # If we've successfully loaded models, return these straight away\n if all_sims is not None and isinstance(all_sims, list):\n if len(all_sims) != 0:\n return (\n all_sims,\n f'... skipping, simulations loaded from {output_dir}\\\\phase_correction_sims.p'\n )\n # Create the dataframe\n df = pd.concat(\n [pd.concat([pd.DataFrame([pcm.keys_dic]), pd.DataFrame([pcm.drms_dic])]) for pcm in mds]\n ).reset_index(drop=True)\n # Create an empty list to hold our simulations\n all_sims = []\n avg_noise = 0.005\n # Iterate through each condition\n for idx, grp in df.groupby(by=['latency', 'jitter']):\n # Iterate through each duo\n for i, g in grp.groupby('trial'):\n # Create the grouped model, averaging performance of each duo for one condition over both sessions\n pcm_o = grouper(g)\n # Create the simulation object\n sim = Simulation(\n pcm=pcm_o, num_simulations=num_simulations, parameter='original', leader=None, use_original_noise=False,\n noise=avg_noise\n )\n # Log the current duo and condition in our GUI, if we've passed a logger\n autils.log_simulation(sim, logger)\n # Create all simulations and append the simulation object to our list\n sim.create_all_simulations()\n all_sims.append(sim)\n # Create the grouped phase correction model, across all trials\n pcm_a = grouper(grp)\n # Set our trial metadata to 0 (helpful when logging)\n pcm_a['trial'] = 0\n # Create our anarchy model: both coupling coefficients set to 0\n anarchy_md = pcm_a.copy()\n anarchy_md['correction_partner'] = 0\n anarchy_md['intercept'] = 0\n # Create our democracy model: both coupling coefficients set to their means\n democracy_md = pcm_a.copy()\n democracy_md['correction_partner'] = democracy_md['correction_partner'].mean()\n democracy_md['intercept'] = 0\n # Create our leadership model: drums coupling set to 0, keys coupling set to mean\n leadership_md = pcm_a.copy()\n leadership_md['correction_partner'] = np.where(\n leadership_md['instrument'] == 'Drums', 0,\n leadership_md[leadership_md['instrument'] == 'Keys']['correction_partner']\n )\n leadership_md['intercept'] = 0\n # Iterate over all of our paradigm models\n for md, param in zip([anarchy_md, democracy_md, leadership_md],\n ['anarchy', 'democracy', 'leadership']):\n # Create our simulation\n sim = Simulation(\n pcm=md, num_simulations=num_simulations, parameter=param, leader=None, use_original_noise=False,\n noise=avg_noise\n )\n # Log the current simulation in our GUI\n autils.log_simulation(sim, logger)\n # Create the simulation and append to our list\n sim.create_all_simulations()\n all_sims.append(sim)\n # Pickle the result -- this can be quite large, if we're creating lots of simulations!\n pickle.dump(all_sims, open(f\"{output_dir}\\\\phase_correction_sims.p\", \"wb\"))\n return all_sims, f'...simulations saved in {output_dir}\\\\phase_correction_sims.p'\n\n\nif __name__ == '__main__':\n import logging\n import os\n\n # Configure logger\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n logger = logging.getLogger(__name__)\n # Default location for phase correction models\n logger.info(f\"Making simulations from models in {os.path.abspath(r'../../models')}\")\n\n # Default location for phase correction models\n raw = autils.load_from_disc(r\"..\\..\\models\", filename=\"phase_correction_mds.p\")\n # Default location to save output simulations\n output = r\"..\\..\\models\"\n # Generate simulations using coupling parameters and pickle\n generate_phase_correction_simulations_for_coupling_parameters(\n mds=raw, output_dir=output, force_rebuild=True, logger=logger\n )\n", "repo_name": "HuwCheston/Jazz-Jitter-Analysis", "sub_path": "src/analyse/simulations.py", "file_name": "simulations.py", "file_ext": "py", "file_size_in_byte": 25454, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "src.analyse.phase_correction_models.PhaseCorrectionModel", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 32, "usage_type": "name"}, {"api_name": "src.analyse.phase_correction_models.PhaseCorrectionModel", "line_number": 40, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 47, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils.CONSTANT_RESID_NOISE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils", "line_number": 51, "usage_type": "name"}, {"api_name": "numba.typed", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numba.typed", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.ceil", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numba.typed.Dict.empty", "line_number": 131, "usage_type": "call"}, {"api_name": "numba.typed", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numba.types", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numba.typed", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numba.typed.Dict.empty", "line_number": 146, "usage_type": "call"}, {"api_name": "numba.typed", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numba.types", "line_number": 147, "usage_type": "attribute"}, {"api_name": "numba.types", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numba.typed", "line_number": 141, "usage_type": "attribute"}, {"api_name": "joblib.Parallel", "line_number": 181, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 181, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils.create_one_simulation", "line_number": 181, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numba.typed", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 210, "usage_type": "call"}, {"api_name": "pandas.to_timedelta", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 246, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 247, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 251, "usage_type": "attribute"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 275, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 275, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.Inf", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 282, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 298, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 365, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 375, "usage_type": "attribute"}, {"api_name": "numpy.nanstd", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.nanpercentile", "line_number": 377, "usage_type": "attribute"}, {"api_name": "numpy.nanmedian", "line_number": 378, "usage_type": "attribute"}, {"api_name": "numpy.nanstd", "line_number": 379, "usage_type": "attribute"}, {"api_name": "numpy.nanpercentile", "line_number": 380, "usage_type": "attribute"}, {"api_name": "numpy.nanmedian", "line_number": 381, "usage_type": "attribute"}, {"api_name": "numpy.nanmedian", "line_number": 383, "usage_type": "attribute"}, {"api_name": "numpy.nanstd", "line_number": 384, "usage_type": "attribute"}, {"api_name": "numpy.nanpercentile", "line_number": 385, "usage_type": "attribute"}, {"api_name": "src.analyse.phase_correction_models.PhaseCorrectionModel", "line_number": 390, "usage_type": "name"}, {"api_name": "src.analyse.analysis_utils.NUM_SIMULATIONS", "line_number": 391, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils", "line_number": 391, "usage_type": "name"}, {"api_name": "src.analyse.analysis_utils.load_from_disc", "line_number": 398, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 398, "usage_type": "name"}, {"api_name": "src.analyse.analysis_utils.log_simulation", "line_number": 409, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 409, "usage_type": "name"}, {"api_name": "dill.dump", "line_number": 415, "usage_type": "call"}, {"api_name": "src.analyse.phase_correction_models.PhaseCorrectionModel", "line_number": 420, "usage_type": "name"}, {"api_name": "src.analyse.analysis_utils.NUM_SIMULATIONS", "line_number": 421, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils", "line_number": 421, "usage_type": "name"}, {"api_name": "src.analyse.analysis_utils.load_from_disc", "line_number": 437, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 437, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 446, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 447, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 447, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils.log_simulation", "line_number": 464, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 464, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 482, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils.log_simulation", "line_number": 496, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 496, "usage_type": "name"}, {"api_name": "dill.dump", "line_number": 501, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 511, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 511, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 512, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 514, "usage_type": "call"}, {"api_name": "os.path", "line_number": 514, "usage_type": "attribute"}, {"api_name": "src.analyse.analysis_utils.load_from_disc", "line_number": 517, "usage_type": "call"}, {"api_name": "src.analyse.analysis_utils", "line_number": 517, "usage_type": "name"}]} +{"seq_id": "35522820470", "text": "import time\n\nimport matplotlib as mpl \nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import animation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport curses\nimport logging\nimport math\nimport cflib.crtp\nfrom cflib.crazyflie import Crazyflie\nfrom cflib.crazyflie.syncCrazyflie import SyncCrazyflie\nfrom cflib.crazyflie.log import LogConfig\nfrom cflib.crazyflie.syncLogger import SyncLogger\n\nimport csv\nfilename = \"datas\"\n\nURI = 'radio://0/88/2M/E7E7E7E7EB'\n# Only output errors from the logging framework\nlogging.basicConfig(level=logging.ERROR)\nlogging.basicConfig(filename='test.log',level=logging.DEBUG)\n\nx_list = list()\ny_list = list()\nz_list = list()\n\n\ndef init():\n line.set_data(x[:2],y[:2])\n return line,\n#The following functions are created for easy use of them\ndef Backward(t,vf):\n print(\"Backward \")\n for _ in range(t):\n cf.commander.send_hover_setpoint(vf, 0, 0, 0.2)\n time.sleep(0.1)\n real_time()\n return\ndef Forward(t,vf):\n print(\"Forward \")\n for y in range(t):\n cf.commander.send_hover_setpoint(vf, 0, 0, 0.2)\n time.sleep(0.1)\n real_time()\n return\ndef Right(t,vs):\n print(\"Right \")\n for _ in range(t):\n cf.commander.send_hover_setpoint(0, vs, 0, 0.2)\n time.sleep(0.1)\n real_time()\n return\ndef Left(t,vs):\n print(\"Left \")\n for _ in range(t):\n cf.commander.send_hover_setpoint(0, vs, 0, 0.2)\n time.sleep(0.1)\n real_time()\n return\ndef Hovering(t):\n print(\"Hovering \")\n for _ in range(t):\n cf.commander.send_hover_setpoint(0, 0, 0, 0.2)\n time.sleep(0.1)\n real_time()\n return\ndef Takeoff(t):\n print(\"Takeoff \")\n for y in range(t):\n cf.commander.send_hover_setpoint(0, 0, 0, y/25)\n time.sleep(0.1)\n real_time()\n return\ndef landing(t):\n print(\"Landing \")\n for y in range(t):\n cf.commander.send_hover_setpoint(0, 0, 0, (t-y) / 25)\n time.sleep(0.1)\n cf.commander.send_stop_setpoint()\n real_time()\n return\n\ndef main(stdscr):\n curses.curs_set(0)\n stdscr.clear()\n while 1:\n key=stdscr.getch()\n #print(key)\n #TAKING OFF\n if key == 10:#curses.KEY_ENTER:#take off move\n print(\"Takeoff \\r\")\n for y in range(5):\n if key==curses.KEY_UP:\n Forward(30,0.2)\n elif key==curses.KEY_DOWN:\n Backward(30,-0.2)\n\n elif key==curses.KEY_RIGHT:\n Right(30,-0.2)\n\n elif key==curses.KEY_LEFT:\n Left(30,0.2) \n elif key==curses.KEY_BACKSPACE:\n landing(5)\n\n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30)\n cf.commander.send_hover_setpoint(0, 0, 0, y / 25)\n time.sleep(0.1)\n logging.debug('take off'.format(0,0,0,y/25))\n real_time()\n \n #LANDING\n elif key== curses.KEY_BACKSPACE:#landing \n print(\"Landing \\r\")\n for y in range(5):\n if key==curses.KEY_UP:\n Forward(30,0.2)\n \n elif key==curses.KEY_DOWN:\n Backward(30,-0.2) \n\n elif key==curses.KEY_RIGHT:\n Right(30,-0.2) \n elif key==curses.KEY_LEFT:\n Left(30,0.2) \n elif key==curses.KEY_ENTER:\n Takeoff(10) \n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30)\n \n cf.commander.send_hover_setpoint(0, 0, 0, (5 - y) / 25)\n time.sleep(0.1)\n cf.commander.send_stop_setpoint()\n real_time()\n #ESCAPING\n elif key == 27:#Escape tab\n break\n #FORWARD_DIRECTION\n elif key==curses.KEY_UP:\n \n print(\"Forward \\r\")\n for _ in range(10):\n if key==curses.KEY_ENTER:\n\n Takeoff(10) \n elif key==curses.KEY_DOWN:\n Backward(30,-0.2) \n elif key==curses.KEY_RIGHT:\n Right(30,-0.2)\n \n elif key==curses.KEY_LEFT:\n Left(30,0.2)\n \n elif key==curses.KEY_BACKSPACE:\n landing(5) \n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30)\n cf.commander.send_hover_setpoint(0.2, 0, 0, 0.2)\n time.sleep(0.1)\n real_time()\n \n #BACKWARD_DIRECTION\n elif key==curses.KEY_DOWN:\n print(\"Backward \\r\")\n for _ in range(30):\n if key==curses.KEY_UP:\n Forward(30,0.2)\n \n elif key==curses.KEY_ENTER:\n Takeoff(10) \n elif key==curses.KEY_RIGHT:\n Right(30,-0.2) \n elif key==curses.KEY_LEFT:\n Left(30,0.2) \n elif key==curses.KEY_BACKSPACE:\n landing(5)\n \n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30) \n cf.commander.send_hover_setpoint(-0.2, 0, 0, 0.2)\n time.sleep(0.1)\n real_time()\n #RIGHT_DIRECTION \n elif key==curses.KEY_RIGHT:\n print(\"Right \\r\")\n for _ in range(30):\n if key==curses.KEY_UP:\n Forward(30,0.2) \n elif key==curses.KEY_DOWN:\n Backward(30,-0.2) \n elif key==curses.KEY_ENTER:\n Takeoff(10)\n \n elif key==curses.KEY_LEFT:\n Left(30,0.2)\n \n elif key==curses.KEY_BACKSPACE:\n landing(5)\n \n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30)\n \n cf.commander.send_hover_setpoint(0, -0.2, 0, 0.2)\n time.sleep(0.1)\n real_time()\n #LEFT_DIRECTION \n elif key==curses.KEY_LEFT:\n print(\"Left \\r\")\n for _ in range(30):\n if key==curses.KEY_UP:\n Forward(30,0.2) \n elif key==curses.KEY_DOWN:\n Backward(30,-0.2)\n \n elif key==curses.KEY_ENTER:\n Takeoff(10)\n \n elif key==curses.KEY_RIGHT:\n Right(30,-0.2)\n \n elif key==curses.KEY_BACKSPACE:\n landing(5)\n \n elif key==27:\n break\n elif key==curses.KEY_HOME:\n Hovering(30)\n cf.commander.send_hover_setpoint(0, 0.2, 0, 0.2)\n time.sleep(0.1)\n real_time()\n #HOVERING \n elif key==curses.KEY_HOME:#This is Hovering==Fn+Left arrow\n print(\"Hovering \\r\")\n for _ in range(30):\n if key==curses.KEY_UP:\n Forward(30,0.2)\n \n elif key==curses.KEY_DOWN:\n Backward(30,-0.2)\n \n elif key==curses.KEY_ENTER:\n Takeoff(10)\n \n elif key==curses.KEY_LEFT:\n Left(30,0.2) \n elif key==curses.KEY_BACKSPACE:\n landing(5) \n elif key==27:\n break\n elif key==curses.KEY_RIGHT:\n Right(30,-0.2) \n cf.commander.send_hover_setpoint(0, 0, 0, 0.2)\n time.sleep(0.1) \n real_time()\n \n \n stdscr.refresh()\n\ndef wait_for_position_estimator(scf):\n print('Waiting for estimator to find position...')\n\n log_config = LogConfig(name='Kalman Variance', period_in_ms=500)\n log_config.add_variable('kalman.varPX', 'float')\n log_config.add_variable('kalman.varPY', 'float')\n log_config.add_variable('kalman.varPZ', 'float')\n\n var_y_history = [1000] * 10\n var_x_history = [1000] * 10\n var_z_history = [1000] * 10\n\n threshold = 0.001\n\n with SyncLogger(scf, log_config) as logger:\n for log_entry in logger:\n data = log_entry[1]\n\n var_x_history.append(data['kalman.varPX'])\n var_x_history.pop(0)\n var_y_history.append(data['kalman.varPY'])\n var_y_history.pop(0)\n var_z_history.append(data['kalman.varPZ'])\n var_z_history.pop(0)\n\n min_x = min(var_x_history)\n max_x = max(var_x_history)\n min_y = min(var_y_history)\n max_y = max(var_y_history)\n min_z = min(var_z_history)\n max_z = max(var_z_history)\n\n #print(\"{} {} {}\".format(max_x - min_x, max_y - min_y, max_z - min_z))\n\n if (max_x - min_x) < threshold and (\n max_y - min_y) < threshold and (\n max_z - min_z) < threshold:\n break\ndef reset_estimator(scf):\n cf = scf.cf\n cf.param.set_value('kalman.resetEstimation', '1')\n time.sleep(0.1)\n cf.param.set_value('kalman.resetEstimation', '0')\n\n wait_for_position_estimator(cf)\n\ndef start_position_printing(scf):\n log_conf = LogConfig(name='Position', period_in_ms=500)\n log_conf.add_variable('kalman.stateX', 'float')\n log_conf.add_variable('kalman.stateY', 'float')\n log_conf.add_variable('kalman.stateZ', 'float')\n\n scf.cf.log.add_config(log_conf)\n log_conf.data_received_cb.add_callback(position_callback)\n log_conf.start()\n\ndef position_callback(timestamp, data, logconf):\n x = data['kalman.stateX']\n y = data['kalman.stateY']\n z = data['kalman.stateZ']\n # print('pos: ({}, {}, {})'.format(x, y, z))\n x_list.append(x)\n y_list.append(y)\n z_list.append(z)\n#real time plot in 2d \ny = y_list\nx = x_list\n\nfig, ax = plt.subplots(1,1)\nline, = ax.plot([], [], '-')\nplt.ion()\nplt.show()\nax.margins(0.05)\n\ndef animate(i):\n xdata = x[:i]\n ydata = y[:i]\n \n line.set_data(xdata, ydata)\n plt.plot(x,y)\n plt.gca().line[0].set_xdata(x)\n plt.gca().line[0].set_ydata(y)\n plt.gcf().canvas.flush_events()\n plt.gca().relim()\n plt.gca().autoscale_view()\n plt.pause(0.05)\n \n return line,\ndef real_time():#creating function which can plot trajectories in 2D in real time\n anim = animation.FuncAnimation(fig, animate, init_func=init, interval=5)\n plt.ion()\n plt.draw()\n \n plt.pause(0.00001) \n return\n\n# this is the last line of code for real time plotting in 2d\nif __name__ == '__main__':\n # Initialize the low-level drivers (don't list the debug drivers)\n cflib.crtp.init_drivers(enable_debug_driver=False)\n \n with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:\n cf = scf.cf\n time.sleep(2)\n logging.basicConfig(filename='app.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s')\n \n reset_estimator(scf)\n start_position_printing(scf)\n curses.wrapper(main)\n f = open(filename + '.csv', 'w')\n\n\n with f:\n\n writer = csv.writer(f)\n writer.writerows([x_list,y_list,z_list])\n #3d plot after the flight\n fig=plt.figure()\n ax=fig.gca(projection='3d')\n\n ax.plot(x_list,y_list,z_list,'b-')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.plot([x_list[0]],[y_list[0]],[z_list[0]],'o',markerfacecolor='none', markeredgecolor='red',markersize=12.5,label='start')\n ax.plot([x_list[-1]],[y_list[-1]],[z_list[-1]],'o',markerfacecolor='red', markeredgecolor='red',markersize=12.5,label='end')\n plt.title('Trajectory of CF')\n \n ax.legend(numpoints=1)\n plt.show(block=True)\n plt.show()\n \n\n\n\n\n\n\n\n", "repo_name": "ozay-group/drone-testbed", "sub_path": "trajectory-planning/keyboard.py", "file_name": "keyboard.py", "file_ext": "py", "file_size_in_byte": 13275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.basicConfig", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 88, "usage_type": "call"}, {"api_name": "curses.KEY_UP", "line_number": 97, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 99, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 102, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 107, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 112, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 116, "usage_type": "call"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 120, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 123, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 126, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 129, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 133, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 137, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 141, "usage_type": "call"}, {"api_name": "curses.KEY_UP", "line_number": 148, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 152, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 155, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 157, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 160, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 163, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 167, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 170, "usage_type": "call"}, {"api_name": "curses.KEY_DOWN", "line_number": 174, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 177, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 180, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 184, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 186, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 191, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 194, "usage_type": "call"}, {"api_name": "curses.KEY_RIGHT", "line_number": 197, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 200, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 202, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 204, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 207, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 210, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 215, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 219, "usage_type": "call"}, {"api_name": "curses.KEY_LEFT", "line_number": 222, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 225, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 227, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 230, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 233, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 236, "usage_type": "attribute"}, {"api_name": "curses.KEY_HOME", "line_number": 241, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 244, "usage_type": "call"}, {"api_name": "curses.KEY_HOME", "line_number": 247, "usage_type": "attribute"}, {"api_name": "curses.KEY_UP", "line_number": 250, "usage_type": "attribute"}, {"api_name": "curses.KEY_DOWN", "line_number": 253, "usage_type": "attribute"}, {"api_name": "curses.KEY_ENTER", "line_number": 256, "usage_type": "attribute"}, {"api_name": "curses.KEY_LEFT", "line_number": 259, "usage_type": "attribute"}, {"api_name": "curses.KEY_BACKSPACE", "line_number": 261, "usage_type": "attribute"}, {"api_name": "curses.KEY_RIGHT", "line_number": 265, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 268, "usage_type": "call"}, {"api_name": "cflib.crazyflie.log.LogConfig", "line_number": 277, "usage_type": "call"}, {"api_name": "cflib.crazyflie.syncLogger.SyncLogger", "line_number": 288, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 315, "usage_type": "call"}, {"api_name": "cflib.crazyflie.log.LogConfig", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 353, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 357, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.animation", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 365, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 365, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 367, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 367, "usage_type": "name"}, {"api_name": "cflib.crtp.crtp.init_drivers", "line_number": 373, "usage_type": "call"}, {"api_name": "cflib.crtp.crtp", "line_number": 373, "usage_type": "attribute"}, {"api_name": "cflib.crtp", "line_number": 373, "usage_type": "name"}, {"api_name": "cflib.crazyflie.syncCrazyflie.SyncCrazyflie", "line_number": 375, "usage_type": "call"}, {"api_name": "cflib.crazyflie.Crazyflie", "line_number": 375, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 377, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 378, "usage_type": "call"}, {"api_name": "curses.wrapper", "line_number": 382, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 388, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 391, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 391, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 400, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 400, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 403, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 404, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 404, "usage_type": "name"}]} +{"seq_id": "42688332175", "text": "\"\"\"``tamago`` lives on\nhttps://github.com/hhollenstain/tamago-web\n\"\"\"\nfrom setuptools import setup, find_packages\nimport tamago_web\n\nINSTALL_REQUIREMENTS = [\n 'coloredlogs',\n 'flask',\n 'waitress',\n]\n\nTEST_REQUIREMENTS = {\n 'test':[\n 'pytest',\n 'pylint',\n 'sure',\n ]\n }\n\nsetup(\n name='Tamago Web',\n version=tamago_web.VERSION,\n description='Tamago web for Tamago Discord Bot',\n url='https://github.com/hhollenstain/tamago-web',\n packages=find_packages(),\n include_package_data=True,\n install_requires=INSTALL_REQUIREMENTS,\n extras_require=TEST_REQUIREMENTS,\n entry_points={\n 'console_scripts': [\n 'tamago_web = tamago_web.tamago_web:main',\n ],\n },\n )\n", "repo_name": "hhollenstain/tamago-web", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "setuptools.setup", "line_number": 21, "usage_type": "call"}, {"api_name": "tamago_web.VERSION", "line_number": 23, "usage_type": "attribute"}, {"api_name": "setuptools.find_packages", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "1717803019", "text": "# отдельные импорты\n\n\n# импорты aiogram\nfrom aiogram.types import KeyboardButton\nfrom aiogram.utils.keyboard import ReplyKeyboardMarkup\n\n\n\nasync def main_menu_keyboard():\n '''\n Клавиатура главного меню\n :return: markup\n '''\n markup = ReplyKeyboardMarkup(\n keyboard=[\n [KeyboardButton(text='Прогуляться по городу'),\n KeyboardButton(text='О проекте')]\n ],\n resize_keyboard=True)\n\n return markup", "repo_name": "RevelRies/BotMayakovka_2", "sub_path": "Bot/keyboards/main_menu_keyboard.py", "file_name": "main_menu_keyboard.py", "file_ext": "py", "file_size_in_byte": 525, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "aiogram.utils.keyboard.ReplyKeyboardMarkup", "line_number": 15, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 17, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "28704663076", "text": "import ast\nimport re\n\nfrom setuptools import setup, find_packages\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\nsetup(\n name = 'docker_rest_service',\n version = version,\n url = 'https://github.com/alsbi/docker_rest_service',\n license = 'MIT',\n author = 'alsbi',\n author_email = 'feano4ik@gmail.com',\n description = 'Docker control service',\n long_description = open('README.md').read(),\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n 'flask',\n 'requests_unixsocket',\n ],\n entry_points = {\n 'console_scripts': [\n 'docker_rest_service=docker_rest_service:main'\n ],\n }\n)\n", "repo_name": "alsbi/docker_rest_service", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.compile", "line_number": 6, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 12, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "14290470161", "text": "import os\nimport sys\n\nimport pandas as pd\nimport numpy as np\nimport dill\n\n\nfrom pathlib import Path\n\nfrom src.exception import CustomException\nfrom src.logger import logger\n\ndef save_object(file_path: Path, obj: object) -> None:\n \"\"\"Guarda un objeto en un archivo .pkl.\n\n Parámetros:\n - file_path: Ruta del archivo.\n - obj: Objeto a guardar.\n\n \"\"\"\n\n try:\n dir_path = os.path.dirname(file_path)\n\n os.makedirs(dir_path, exist_ok=True)\n\n with open(file_path, 'wb') as f:\n dill.dump(obj, f)\n logger.info(f'Objeto guardado exitosamente en {file_path}')\n \n except Exception as e:\n logger.error(f'Ocurrió un error al guardar el objeto: {e}', exc_info=True)\n raise CustomException(f'Error al guardar el objeto en {file_path}', e, sys.exc_info()[2])\n \ndef load_object(file_path: Path) -> object:\n \"\"\"Carga un objeto desde un archivo .pkl.\n\n Parámetros:\n - file_path: Ruta del archivo.\n\n Retorna:\n - Objeto cargado desde el archivo.\n \"\"\"\n try:\n with open(file_path, 'rb') as f:\n obj = dill.load(f)\n logger.info(f'Objeto cargado exitosamente desde {file_path}')\n return obj\n except Exception as e:\n logger.error(f'Ocurrió un error al cargar el objeto: {e}', exc_info=True)\n raise CustomException(f'Error al cargar el objeto desde {file_path}', e, sys.exc_info()[2])\n\n\ndef get_random_params(model_name: str) -> list:\n \"\"\"Obtiene una lista de parámetros aleatorios para el pipeline dado.\n\n Parámetros:\n - model_name: Nombre del modelo.\n \n Retorna:\n - Lista de parámetros aleatorios.\n \"\"\"\n if model_name == 'DecisionTree':\n return [\n {\n \"smote__sampling_strategy\": [float(x) for x in np.arange(0.4, 1.0, 0.1)],\n \"smote__k_neighbors\": [2, 3, 4, 5, 6],\n \"selectfrommodel__max_features\": [2, 3, 4, 5, 6],\n \"decisiontreeclassifier__criterion\": ['gini', 'entropy', 'log_loss'],\n \"decisiontreeclassifier__max_depth\": [int(x) for x in np.arange(1, 7)],\n \"decisiontreeclassifier__min_samples_split\": [float(x) for x in np.arange(0.05, 0.3, 0.05)],\n \"decisiontreeclassifier__splitter\": ['best', 'random'],\n \"decisiontreeclassifier__min_samples_leaf\": [1, 5, 10, 15, 20],\n \"decisiontreeclassifier__class_weight\": [None, 'balanced']\n\n }\n ]\n elif model_name == 'RandomForest':\n return [\n {\n \"smote__sampling_strategy\": [float(x) for x in np.arange(0.5, 1.1, 0.1)],\n \"smote__k_neighbors\": [3, 4, 5, 6],\n \"randomforestclassifier__n_estimators\": [int(x) for x in np.linspace(100, 500, 10)],\n \"randomforestclassifier__criterion\": ['gini', 'entropy', 'log_loss'],\n \"randomforestclassifier__min_samples_split\": [float(x) for x in np.arange(0.05, 0.3, 0.05)],\n \"randomforestclassifier__min_samples_leaf\": [1, 5, 10, 15, 20],\n \"randomforestclassifier__max_features\": ['sqrt', 'log2'],\n \"randomforestclassifier__class_weight\": [None, 'balanced', 'balanced_subsample'],\n \"randomforestclassifier__bootstrap\": [True],\n \"randomforestclassifier__oob_score\": [True],\n \"randomforestclassifier__warm_start\": [True, False],\n \"randomforestclassifier__max_samples\": [0.5, 0.8, 1.0],\n \"randomforestclassifier__max_depth\": [int(x) for x in np.arange(1, 7)]\n }\n ]\n else:\n raise ValueError(f\"{model_name} : Modelo no sujeto a optimización.\")\n\n\ndef get_params_grid(puntuaciones, model_name: str) -> list:\n \"\"\"Obtiene una lista de parámetros para el pipeline dado.\n\n Parámetros:\n - model_name: Nombre del modelo.\n\n Retorna:\n - Lista de parámetros.\n \"\"\"\n \n\n if model_name == 'DecisionTree':\n return [\n {\n \"smote__sampling_strategy\": [puntuaciones.best_params_[\"smote__sampling_strategy\"] - 0.1, \n puntuaciones.best_params_[\"smote__sampling_strategy\"],\n puntuaciones.best_params_[\"smote__sampling_strategy\"] + 0.1],\n \"smote__k_neighbors\": [puntuaciones.best_params_[\"smote__k_neighbors\"]],\n \"selectfrommodel__max_features\": [puntuaciones.best_params_['selectfrommodel__max_features']],\n \"decisiontreeclassifier__criterion\": ['entropy', 'gini', 'log_loss'],\n \"decisiontreeclassifier__max_depth\": [puntuaciones.best_params_[\"decisiontreeclassifier__max_depth\"] - 1,\n puntuaciones.best_params_[\"decisiontreeclassifier__max_depth\"],\n puntuaciones.best_params_[\"decisiontreeclassifier__max_depth\"] + 1],\n \"decisiontreeclassifier__min_samples_split\": [puntuaciones.best_params_[\"decisiontreeclassifier__min_samples_split\"] - 0.05,\n puntuaciones.best_params_[\"decisiontreeclassifier__min_samples_split\"],\n puntuaciones.best_params_[\"decisiontreeclassifier__min_samples_split\"] + 0.05],\n \"decisiontreeclassifier__splitter\": [puntuaciones.best_params_[\"decisiontreeclassifier__splitter\"]],\n \"decisiontreeclassifier__min_samples_leaf\": [puntuaciones.best_params_[\"decisiontreeclassifier__min_samples_leaf\"]],\n \"decisiontreeclassifier__class_weight\": [puntuaciones.best_params_[\"decisiontreeclassifier__class_weight\"]]\n }\n ]\n elif model_name == 'RandomForest':\n return [\n {\n \"smote__sampling_strategy\" : [puntuaciones.best_params_[\"smote__sampling_strategy\"] - 0.1 , puntuaciones.best_params_[\"smote__sampling_strategy\"] , puntuaciones.best_params_[\"smote__sampling_strategy\"] +0.1],\n \"smote__k_neighbors\" : [puntuaciones.best_params_[\"smote__k_neighbors\"]],\n \"randomforestclassifier__n_estimators\": [puntuaciones.best_params_[\"randomforestclassifier__n_estimators\"]],\n \"randomforestclassifier__criterion\": ['gini', 'entropy', 'log_loss'],\n \"randomforestclassifier__min_samples_split\" : [puntuaciones.best_params_['randomforestclassifier__min_samples_split']],\n \"randomforestclassifier__min_samples_leaf\" : [puntuaciones.best_params_['randomforestclassifier__min_samples_leaf']-5,puntuaciones.best_params_['randomforestclassifier__min_samples_leaf'],puntuaciones.best_params_['randomforestclassifier__min_samples_leaf']+5 ],\n \"randomforestclassifier__max_features\" : [puntuaciones.best_params_['randomforestclassifier__max_features']],\n \"randomforestclassifier__class_weight\": [puntuaciones.best_params_['randomforestclassifier__class_weight']],\n \"randomforestclassifier__bootstrap\": [True],\n \"randomforestclassifier__oob_score\": [True],\n \"randomforestclassifier__warm_start\": [puntuaciones.best_params_['randomforestclassifier__warm_start']],\n \"randomforestclassifier__max_samples\": [puntuaciones.best_params_['randomforestclassifier__max_samples']-0.1,puntuaciones.best_params_['randomforestclassifier__max_samples'],puntuaciones.best_params_['randomforestclassifier__max_samples']+0.1],\n \"randomforestclassifier__max_depth\": [puntuaciones.best_params_['randomforestclassifier__max_depth']-1,puntuaciones.best_params_['randomforestclassifier__max_depth'],puntuaciones.best_params_['randomforestclassifier__max_depth']+1]\n }\n ]\n else:\n raise ValueError(f\"{model_name} : Modelo no sujeto a optimización.\")", "repo_name": "DavidPalazon/Implementacion-en-python-de-metodos-de-ayuda-al-diagnostico-precoz-de-glaucoma", "sub_path": "src/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7847, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pathlib.Path", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 26, "usage_type": "call"}, {"api_name": "dill.dump", "line_number": 29, "usage_type": "call"}, {"api_name": "src.logger.logger.info", "line_number": 30, "usage_type": "call"}, {"api_name": "src.logger.logger", "line_number": 30, "usage_type": "name"}, {"api_name": "src.logger.logger.error", "line_number": 33, "usage_type": "call"}, {"api_name": "src.logger.logger", "line_number": 33, "usage_type": "name"}, {"api_name": "src.exception.CustomException", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 34, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 36, "usage_type": "name"}, {"api_name": "dill.load", "line_number": 47, "usage_type": "call"}, {"api_name": "src.logger.logger.info", "line_number": 48, "usage_type": "call"}, {"api_name": "src.logger.logger", "line_number": 48, "usage_type": "name"}, {"api_name": "src.logger.logger.error", "line_number": 51, "usage_type": "call"}, {"api_name": "src.logger.logger", "line_number": 51, "usage_type": "name"}, {"api_name": "src.exception.CustomException", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "13528237258", "text": "import numpy as np\n\nfrom itertools import count\n\n\n\ndef load(filename: str= \"input.txt\"):\n with open(filename, mode=\"r\", encoding=\"UTF-8\") as cin:\n cin = map(str.strip, cin)\n data = map(list, cin)\n data = list(data)\n data = np.asarray(data).astype(int)\n\n return data\n\n\n\ndef perform_step(data):\n \"\"\"\n \"\"\"\n # First, the energy level of each octopus increases by 1.\n data += 1\n\n # This process continues as long as new octopuses keep having their energy\n # level increased beyond 9.\n while np.count_nonzero(data > 9) > 0:\n flashed = np.argwhere(data > 9)\n for x, y in flashed:\n # This increases the energy level of all adjacent octopuses by 1,\n # including octopuses that are diagonally adjacent.\n data[\n max(0, x-1):min(9, x+1)+1,\n max(0, y-1):min(9, y+1)+1\n ] += 1\n # An octopus can only flash at most once per step.\n data[x,y] = -100000\n\n # Finally, any octopus that flashed during this step has its energy level\n # set to 0, as it used all of its energy to flash.\n data[data < 0] = 0\n\n return data\n\n\n\ndef part1(data, num_steps=100) -> int:\n \"\"\"\n How many total flashes are there after 100 steps?\n \"\"\"\n total = 0\n for step in range(num_steps):\n data = perform_step(data)\n total += np.count_nonzero(data == 0)\n #print(f\"Step {step+1}:\", data, sep=\"\\n\")\n\n return total\n\n\n\ndef part2(data) -> int:\n \"\"\"\n What is the first step during which all octopuses flash?\n \"\"\"\n for step in count(1):\n data = perform_step(data)\n if np.all(data == 0):\n return step\n", "repo_name": "SamuelLarkin/AdventOfCode2021", "sub_path": "day11/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 1716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.asarray", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 53, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "39815188910", "text": "import os\nimport torch\nimport numpy as np\nimport torch.utils.data as data\nfrom utils.logger import *\nfrom .build import DATASETS\nfrom numpy.random import default_rng\nfrom .group_utils import Group\nfrom torchvision import transforms\nfrom datasets import data_transforms\n\n\nbiomass_transforms = transforms.Compose(\n [\n data_transforms.PointcloudRotate(),\n #data_transforms.PointcloudJitter(std=0.01, clip=0.05),\n data_transforms.RandomHorizontalFlip(),\n data_transforms.PointcloudTranslate(translate_range=0.2)\n ])\n\ntrain_transforms = transforms.Compose(\n [\n data_transforms.PointcloudRotate(),\n data_transforms.PointcloudScaleAndTranslate(scale_low=0.9, scale_high=1.1, translate_range=0.1),\n data_transforms.RandomHorizontalFlip(),\n ])\n\ntoken_transforms = transforms.Compose(\n [\n data_transforms.PatchDropout(max_ratio=0.95)\n ])\n\ntest_transforms = transforms.Compose([])\n\nVAR = \"transfer_treeset\"\n@DATASETS.register_module()\nclass Cls_Treeset(data.Dataset):\n def __init__(self, args, config):\n self.plot_folders = config.plot_folders\n self.npoints = config.npoints\n self.normalization = config.normalization\n self.normalization_pars = np.array(config.normalization_pars)\n self.fewshot = config.few_shot if hasattr(config, \"few_shot\") else None\n self.target_label = config.target_type == \"label\"\n self.biomass = config.target_type == \"biomass\"\n assert config.target_type == \"biomass\" or config.target_type == \"label\"\n\n self.samples_list = []\n for folder in self.plot_folders:\n files = os.listdir(folder)\n files = [os.path.join(folder, file) for file in files]\n self.samples_list += files\n self.samples_list = np.array(self.samples_list)\n # grouping config\n num_group, group_size, sampling_method = config.model.num_group, config.model.group_size, config.sampling_method\n self.grouper = Group(num_group, group_size, sampling_method)\n\n if self.fewshot is None:\n rng = default_rng(seed=config.seed)\n self.samples_list = rng.permutation(self.samples_list)\n splitidx = np.floor(config.train_ratio * len(self.samples_list)).astype(int)\n print_log(\"SHUFFLED THE DATA\", logger = VAR+ config.subset)\n \n if config.subset == \"train\":\n self.samples_list = self.samples_list[0:splitidx]\n else:\n self.samples_list = self.samples_list[splitidx:]\n else: # if self.fewshot is smth\n if config.subset == \"train\":\n idx = np.load(config.few_shot_train_path)[self.fewshot]\n else:\n idx = np.load(config.few_shot_eval_path)[self.fewshot]\n self.samples_list = self.samples_list[idx.reshape(-1)]\n print_log(f'[DATASET]use fewshot {self.fewshot}', logger = VAR + config.subset)\n self.samples_path = self.samples_list.copy()\n self.samples_list = [np.load(sample, allow_pickle=True) for sample in self.samples_list]\n if config.validate_samples:\n for idx, (mass, sample) in enumerate(self.samples_list):\n if len(sample) < self.npoints:\n print(idx, \"has length\", len(sample))\n \n self.token_transforms =transforms.Compose([data_transforms.PatchDropout(max_ratio=args.patch_dropout)])\n\n if self.target_label:\n self.transforms = train_transforms\n\n elif self.biomass:\n print(\"use biomass transforms\")\n self.transforms = biomass_transforms\n else:\n raise NotImplementedError\n if config.subset != \"train\":\n self.transforms, self.token_transforms = test_transforms, test_transforms\n self.center = True\n \n def normalize(self, pc):\n pc = pc - pc.mean(axis=0)\n pc = pc / self.normalization_pars\n return pc\n\n def random_sample(self, pc, num):\n if len(pc) < num:\n choice = np.random.choice(len(pc), num, replace=True)\n else:\n choice = np.random.choice(len(pc), num, replace=False)\n return pc[choice]\n\n def __getitem__(self, idx):\n target, points = self.samples_list[idx]\n points = self.random_sample(points, self.npoints)\n if self.normalization:\n points = self.normalize(points)\n points = self.transforms(points)\n neighborhood, center, idx = self.grouper.group(points)\n if self.center:\n neighborhood = neighborhood - center.reshape(-1, 1, 3)\n neighborhood, center = self.token_transforms((neighborhood, center))\n #neighborhood = np.zeros(neighborhood.shape)\n neighborhood = torch.from_numpy(neighborhood).float()\n center = torch.from_numpy(center).float()\n if self.target_label:\n target = torch.from_numpy(np.array(target)).int()\n elif self.biomass: \n target = torch.from_numpy(np.array(target)).float()\n return neighborhood, center, target\n\n def __len__(self):\n return len(self.samples_list)\n\nif __name__ == '__main__':\n pass\n", "repo_name": "JanvDelden/Forest-Point-MAE", "sub_path": "datasets/cls_treeset.py", "file_name": "cls_treeset.py", "file_ext": "py", "file_size_in_byte": 5190, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PointcloudRotate", "line_number": 15, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 15, "usage_type": "name"}, {"api_name": "datasets.data_transforms.RandomHorizontalFlip", "line_number": 17, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 17, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PointcloudTranslate", "line_number": 18, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 18, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 21, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PointcloudRotate", "line_number": 23, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 23, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PointcloudScaleAndTranslate", "line_number": 24, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "datasets.data_transforms.RandomHorizontalFlip", "line_number": 25, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 25, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 28, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 28, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PatchDropout", "line_number": 30, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 30, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 33, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 37, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "group_utils.Group", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random.default_rng", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 82, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 82, "usage_type": "name"}, {"api_name": "datasets.data_transforms.PatchDropout", "line_number": 82, "usage_type": "call"}, {"api_name": "datasets.data_transforms", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "build.DATASETS.register_module", "line_number": 36, "usage_type": "call"}, {"api_name": "build.DATASETS", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "19682427235", "text": "from fastapi.testclient import TestClient\n\nfrom main import app\n\nclient = TestClient(app)\n\n\ndef test_show_cat_facts():\n response = client.get(\"/cat-facts/50\")\n assert response.status_code == 200\n\ndef test_cat_facts_bigger_amount():\n response = client.get(\"/cat-facts/501\")\n assert response.status_code == 422\n assert response.json() == {\n \"detail\": [\n {\n \"loc\": [\n \"path\",\n \"fact_amount\"\n ],\n \"msg\": \"ensure this value is less than or equal to 500\",\n \"type\": \"value_error.number.not_le\",\n \"ctx\": {\n \"limit_value\": 500\n }\n }\n ]\n }\n\ndef test_cat_facts_lower_amount():\n response = client.get(\"/cat-facts/0\")\n assert response.status_code == 422\n assert response.json() == {\n \"detail\": [\n {\n \"loc\": [\n \"path\",\n \"fact_amount\"\n ],\n \"msg\": \"ensure this value is greater than 0\",\n \"type\": \"value_error.number.not_gt\",\n \"ctx\": {\n \"limit_value\": 0\n }\n }\n ]\n }\n", "repo_name": "jbsg97/Cat-API", "sub_path": "src/test_main.py", "file_name": "test_main.py", "file_ext": "py", "file_size_in_byte": 1245, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.testclient.TestClient", "line_number": 5, "usage_type": "call"}, {"api_name": "main.app", "line_number": 5, "usage_type": "argument"}]} +{"seq_id": "36557397280", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom oslo_db.sqlalchemy import utils\nfrom sqlalchemy import Column\nfrom sqlalchemy import MetaData\nfrom sqlalchemy import String\n\nfrom nova.db.sqlalchemy import api\n\n\ndef upgrade(migrate_engine):\n meta = MetaData()\n meta.bind = migrate_engine\n\n shadow_prefix = api._SHADOW_TABLE_PREFIX\n uuid_col = Column('uuid', String(36))\n\n pci_devices = utils.get_table(migrate_engine, 'pci_devices')\n if not hasattr(pci_devices.c, 'uuid'):\n pci_devices.create_column(uuid_col.copy())\n\n shadow_pci_devices = utils.get_table(migrate_engine,\n shadow_prefix + 'pci_devices')\n if not hasattr(shadow_pci_devices.c, 'uuid'):\n shadow_pci_devices.create_column(uuid_col.copy())\n", "repo_name": "starlingx-staging/stx-nova", "sub_path": "nova/db/sqlalchemy/migrate_repo/versions/362_add_pci_devices_uuid.py", "file_name": "362_add_pci_devices_uuid.py", "file_ext": "py", "file_size_in_byte": 1309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.MetaData", "line_number": 23, "usage_type": "call"}, {"api_name": "nova.db.sqlalchemy.api._SHADOW_TABLE_PREFIX", "line_number": 26, "usage_type": "attribute"}, {"api_name": "nova.db.sqlalchemy.api", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "call"}, {"api_name": "oslo_db.sqlalchemy.utils.get_table", "line_number": 29, "usage_type": "call"}, {"api_name": "oslo_db.sqlalchemy.utils", "line_number": 29, "usage_type": "name"}, {"api_name": "oslo_db.sqlalchemy.utils.get_table", "line_number": 33, "usage_type": "call"}, {"api_name": "oslo_db.sqlalchemy.utils", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "72965466075", "text": "from enum import Enum\n\n#Definition of Errors for the WrongDict in value_verifier to use\n#Key is Enum, Value is more info regarding error as described in comments below\nclass ErrorType(Enum):\n MAX_OPEN_PORTS = \"MAX_OPEN_PORTS\", # Number of ports over limit\n BANNED_PORTS = \"BANNED_PORTS\", # List of ports used that are banned\n NO_ROOT = \"NO_ROOT\", # None, key existing is enough\n BANNED_IMAGES = \"BANNED_IMAGES\", # List of images used that are not allowed\n BANNED_USERS = \"BANNED_USERS\", # List of users who are banned\n BANNED_APIS = \"BANNED_APIS\", # List of banned APIs\n BANNED_SERVICES = \"BANNED SERVICES\", # List of banned services\n PORT_NUMBER = \"PORT_NUMBER\", # If user only wants one port, then this is specified\n RBAC = \"RBAC\", # Checks to see if Role Based Access Control is enabled or not\n TLS = \"TLS\", # Checks to see if transport layer security is used\n REPLICA_COUNT = \"REPLICA_COUNT\", # Number of replica counts\n PULL_POLICY = \"PULL_POLICY\" # If the pull policy is present or not\n ALLOWED_IMAGES = \"ALLOWED_IMAGES\"\n ALLOWED_REGISTRY_REPO = \"ALLOWED_REGISTRY_REPO\"\n", "repo_name": "cyberatlas/K8sCheckmate", "sub_path": "Project/Models/error_type.py", "file_name": "error_type.py", "file_ext": "py", "file_size_in_byte": 1145, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "18604213343", "text": "from maya import cmds\n\ndef tween(percentage, obj=None, attrs=None, selection=True):\n if not obj and not selection:\n raise ValueError(\"No object given to tween\")\n if not obj:\n obj = cmds.ls(selection=True)[0]\n\n if not attrs:\n attrs = cmds.listAttr(obj, keyable=True)\n\n currentTime = cmds.currentTime(query=True)\n\n for attr in attrs:\n attr_full = '%s.%s' % (obj, attr)\n key_frames = cmds.keyframe(attr_full, query=True)\n if not key_frames:\n continue\n previous_key_frames = []\n for frame in key_frames:\n if frame < currentTime:\n previous_key_frames.append(frame)\n later_key_frames = [frame for frame in key_frames if frame > currentTime]\n\n if not previous_key_frames and not later_key_frames:\n continue\n\n if previous_key_frames:\n previous_frame = max(previous_key_frames)\n else:\n previous_frame = None\n\n next_frame = min(later_key_frames) if later_key_frames else None\n\n if not previous_frame or not next_frame:\n continue\n\n previous_value = cmds.getAttr(attr_full, time=previous_frame)\n next_value = cmds.getAttr(attr_full, time=next_frame)\n\n diff = next_value - previous_value\n\n weight_diff = (diff * percentage) / 100.0\n\n current_val = previous_value + weight_diff\n\n cmds.setKeyframe(attr_full, time=currentTime, value=current_val)\n\nclass TweenWindow(object):\n\n window_name = \"TweenerWindow\"\n\n def show(self):\n\n if cmds.window(self.window_name, query=True, exists=True):\n cmds.deleteUI(self.window_name)\n\n cmds.window(self.window_name)\n\n self.buildUI()\n\n cmds.showWindow()\n\n def buildUI(self):\n column = cmds.columnLayout()\n\n cmds.text(label=\"Use this slider to set the tween amount\")\n row = cmds.rowLayout(numberOfColumns=2)\n self.slider = cmds.floatSlider(min=0, max=100, value=50, step=1, changeCommand=tween)\n\n cmds.button(label=\"Reset\", command=self.reset)\n\n cmds.setParent(column)\n\n cmds.button(label=\"Close\", command=self.close)\n\n def reset(self, *args):\n cmds.floatSlider(self.slider, edit=True, value=50)\n\n def close(self, *args):\n cmds.deleteUI(self.window_name)\n\nTweenWindow().show()", "repo_name": "Olsonrowan/maya-scripts", "sub_path": "tweenerUI.py", "file_name": "tweenerUI.py", "file_ext": "py", "file_size_in_byte": 2346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "maya.cmds.ls", "line_number": 7, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 7, "usage_type": "name"}, {"api_name": "maya.cmds.listAttr", "line_number": 10, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 10, "usage_type": "name"}, {"api_name": "maya.cmds.currentTime", "line_number": 12, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 12, "usage_type": "name"}, {"api_name": "maya.cmds.keyframe", "line_number": 16, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 16, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 38, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 38, "usage_type": "name"}, {"api_name": "maya.cmds.getAttr", "line_number": 39, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 39, "usage_type": "name"}, {"api_name": "maya.cmds.setKeyframe", "line_number": 47, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 47, "usage_type": "name"}, {"api_name": "maya.cmds.window", "line_number": 55, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 55, "usage_type": "name"}, {"api_name": "maya.cmds.deleteUI", "line_number": 56, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 56, "usage_type": "name"}, {"api_name": "maya.cmds.window", "line_number": 58, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 58, "usage_type": "name"}, {"api_name": "maya.cmds.showWindow", "line_number": 62, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 62, "usage_type": "name"}, {"api_name": "maya.cmds.columnLayout", "line_number": 65, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 65, "usage_type": "name"}, {"api_name": "maya.cmds.text", "line_number": 67, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 67, "usage_type": "name"}, {"api_name": "maya.cmds.rowLayout", "line_number": 68, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 68, "usage_type": "name"}, {"api_name": "maya.cmds.floatSlider", "line_number": 69, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 69, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 71, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 71, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 73, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 73, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 75, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 75, "usage_type": "name"}, {"api_name": "maya.cmds.floatSlider", "line_number": 78, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 78, "usage_type": "name"}, {"api_name": "maya.cmds.deleteUI", "line_number": 81, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "15619801019", "text": "from flask import Blueprint, request, render_template, redirect , url_for\nimport json\nfrom models.store import Store\nfrom models.user import requires_admin, requires_login\n\nstore_blueprint = Blueprint(\"stores\", __name__)\n\n\n@store_blueprint.route('/')\n@requires_login\ndef index():\n stores = Store.all()\n return render_template(\"stores/index.html\", stores=stores)\n\n\n@store_blueprint.route('/new', methods=['GET', 'POST'])\n@requires_admin\ndef new_store():\n if request.method == 'POST':\n name = request.form['name']\n url_prefix = request.form['url_prefix']\n tag_name = request.form['tag_name']\n query_json = request.form['query']\n query = json.loads(query_json)\n Store(name, url_prefix, tag_name, query).save_to_mongo()\n stores = Store.all()\n return render_template(\"stores/index.html\", stores=stores)\n return render_template(\"stores/new_store.html\")\n\n@store_blueprint.route('/edit/' , methods=['GET','POST'])\n@requires_admin\ndef edit_store(store_id):\n store = Store.get_by_id(store_id)\n\n if request.method == \"POST\":\n store.url_prefix = request.form['url_prefix']\n store.tag_name = request.form['tag_name']\n query_json = request.form['query']\n store.query = json.loads(query_json.replace('\\'','\"'))\n store.save_to_mongo()\n return redirect(url_for('.index'))\n return render_template(\"stores/edit_store.html\" , store = store)\n\n@store_blueprint.route('/delete/' , methods=['GET'])\n@requires_admin\ndef delete_store(store_id):\n store = Store.get_by_id(store_id)\n store.remove_from_mongo()\n stores = Store.all()\n return render_template(\"stores/index.html\", stores=stores)", "repo_name": "amalmajeed/Pricing_Service", "sub_path": "views/stores.py", "file_name": "stores.py", "file_ext": "py", "file_size_in_byte": 1727, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call"}, {"api_name": "models.store.Store.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "models.user.requires_login", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 25, "usage_type": "call"}, {"api_name": "models.store.Store.all", "line_number": 26, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 28, "usage_type": "call"}, {"api_name": "models.user.requires_admin", "line_number": 17, "usage_type": "name"}, {"api_name": "models.store.Store.get_by_id", "line_number": 33, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "models.user.requires_admin", "line_number": 31, "usage_type": "name"}, {"api_name": "models.store.Store.get_by_id", "line_number": 47, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 47, "usage_type": "name"}, {"api_name": "models.store.Store.all", "line_number": 49, "usage_type": "call"}, {"api_name": "models.store.Store", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "models.user.requires_admin", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "13404833410", "text": "from django.conf import settings\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\nfrom hades_star_backend.members.views import MemberViewSet\n\napp_name = \"corporations\"\nif settings.DEBUG:\n router = DefaultRouter()\nelse:\n router = SimpleRouter()\n\nrouter.register(\"\", MemberViewSet)\n\nurlpatterns = router.urls\n", "repo_name": "michal-stachura/hades-star-backend", "sub_path": "hades_star_backend/members/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.conf.settings.DEBUG", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "rest_framework.routers.SimpleRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "hades_star_backend.members.views.MemberViewSet", "line_number": 12, "usage_type": "argument"}]} +{"seq_id": "12962777643", "text": "import os\nfrom typing import Dict, Optional\n\nfrom ..datapoint.annotation import ImageAnnotation\nfrom ..datapoint.box import BoundingBox\nfrom ..datapoint.image import Image\nfrom ..utils.detection_types import JsonDict\nfrom ..utils.fs import load_image_from_file\nfrom ..utils.settings import get_type\nfrom .maputils import MappingContextManager, curry, maybe_get_fake_score\n\n\n@curry\ndef pascal_voc_dict_to_image(\n dp: JsonDict,\n categories_name_as_key: Dict[str, str],\n load_image: bool,\n filter_empty_image: bool,\n fake_score: bool,\n category_name_mapping: Optional[Dict[str, str]] = None,\n) -> Optional[Image]:\n \"\"\"\n Map a dataset in a structure equivalent to iiitar13k annotation style to image format\n\n :param dp: a datapoint in serialized iiitar13k format. Note that another conversion from xml to\n a dict structure is required.\n :param categories_name_as_key: A dict of categories, e.g. DatasetCategories.get_categories(name_as_key=True)\n :param load_image: If 'True' it will load image to attr: Image.image\n :param filter_empty_image: Will return None, if datapoint has no annotations\n :param fake_score: If dp does not contain a score, a fake score with uniform random variables in (0,1)\n will be added.\n :param category_name_mapping: Map incoming category names, e.g. {\"source_name\":\"target_name\"}\n :return: Image\n \"\"\"\n\n anns = dp.get(\"objects\", [])\n if not anns and filter_empty_image:\n return None\n\n with MappingContextManager(dp.get(\"filename\")) as mapping_context:\n image = Image(\n file_name=os.path.split(dp[\"filename\"])[1].replace(\".xml\", \".jpg\"),\n location=dp[\"filename\"].replace(\".xml\", \".jpg\").replace(\"xml\", \"images\"),\n )\n\n if load_image:\n image.image = load_image_from_file(image.location)\n image.set_width_height(float(dp.get(\"width\", 0)), float(dp.get(\"height\", 0)))\n\n for ann in anns:\n x_1 = min(max(ann[\"xmin\"], 0), image.width if image.width else float(dp.get(\"width\", 0)))\n x_2 = min(max(ann[\"xmax\"], 0), image.width if image.width else float(dp.get(\"width\", 0)))\n y_1 = min(max(ann[\"ymin\"], 0), image.height if image.height else float(dp.get(\"height\", 0)))\n y_2 = min(max(ann[\"ymax\"], 0), image.height if image.height else float(dp.get(\"height\", 0)))\n\n bbox = BoundingBox(absolute_coords=True, ulx=x_1, uly=y_1, lrx=x_2, lry=y_2)\n\n if category_name_mapping is not None:\n label = category_name_mapping.get(ann[\"name\"])\n if not label:\n label = ann[\"name\"]\n else:\n label = ann[\"label\"]\n assert isinstance(label, str)\n\n annotation = ImageAnnotation(\n category_name=get_type(label),\n bounding_box=bbox,\n category_id=categories_name_as_key[label],\n score=maybe_get_fake_score(fake_score),\n )\n image.dump(annotation)\n\n if mapping_context.context_error:\n return None\n return image\n", "repo_name": "deepdoctection/deepdoctection", "sub_path": "deepdoctection/mapper/pascalstruct.py", "file_name": "pascalstruct.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1814, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.detection_types.JsonDict", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 20, "usage_type": "name"}, {"api_name": "maputils.MappingContextManager", "line_number": 40, "usage_type": "call"}, {"api_name": "datapoint.image.Image", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.fs.load_image_from_file", "line_number": 47, "usage_type": "call"}, {"api_name": "datapoint.box.BoundingBox", "line_number": 56, "usage_type": "call"}, {"api_name": "datapoint.annotation.ImageAnnotation", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.settings.get_type", "line_number": 67, "usage_type": "call"}, {"api_name": "maputils.maybe_get_fake_score", "line_number": 70, "usage_type": "call"}, {"api_name": "maputils.curry", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 21, "usage_type": "name"}, {"api_name": "datapoint.image.Image", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "28665719978", "text": "import sys\nfrom PIL import Image\n#by Yusdel Lorenzo 2019\ndef AStar(start, goal, neighborNodes, distance, cost):\n def makePath(parentNode, currNode):\n path = []\n while currNode is not None:\n path.append(currNode)\n currNode = parentNode[currNode]\n return list(reversed(path))\n\n gScore = {start: 0}\n fScore = {start: gScore[start] + cost(start, goal)}\n openSet = {start}\n closedSet = set()\n parentNode = {start: None}\n\n while openSet:\n current = min(openSet, key=lambda x: fScore[x])\n if current == goal:\n return makePath(parentNode, goal)\n openSet.remove(current)\n closedSet.add(current)\n for neighbor in neighborNodes(current):\n if neighbor in closedSet:\n continue\n if neighbor not in openSet:\n openSet.add(neighbor)\n newScore = gScore[current] + distance(current, neighbor)\n if newScore >= gScore.get(neighbor, float('inf')):\n continue\n parentNode[neighbor] = current\n gScore[neighbor] = newScore\n fScore[neighbor] = newScore + cost(neighbor, goal)\n return []\n\ndef blocked(p):\n x,y = p\n pixel = path_pixels[x,y]\n if any(c < 225 for c in pixel):\n return True\n\ndef getNeighbors(Pixel):\n x, y = Pixel\n neighbors = [(x-1, y), (x, y-1), (x+1, y), (x, y+1)]\n return [Pixel for Pixel in neighbors if not blocked(Pixel)]\n\ndef manhattan(pixel1, pixel2):\n return abs(pixel1[0]-pixel2[0]) + abs(pixel1[1]-pixel2[1])\n\ndef crowFlies(pixel1, pixel2):\n return (pixel1[0]-pixel2[0])**2 + (pixel1[1]-pixel2[1])**2\n\nstart = (400, 984) #middle at the top\ngoal = (398, 25) #middle at the bottom\npath_img = Image.open(sys.argv[1])\npath_pixels = path_img.load()\ndistance = manhattan\nheuristic = manhattan\npath = AStar(start, goal, getNeighbors, distance, heuristic)\nfor position in path:\n x,y = position\n path_pixels[x,y] = (255,0,0) # red\npath_img.save(sys.argv[2])\n", "repo_name": "lima-yusdel/Python", "sub_path": "Procedurally generated maze solving/Solver.py", "file_name": "Solver.py", "file_ext": "py", "file_size_in_byte": 2017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 64, "usage_type": "attribute"}]} +{"seq_id": "29476017828", "text": "from argparse import Namespace\nfrom django.urls import path\nfrom django.urls.resolvers import URLPattern\nfrom .views import home,panelAdmin,panelMedico,panelBodeguero,panelFarmaceutico,caducarMedicamentos,eliminarCuentas,generarInformes,modificarCuentas,ConsultarMedicamentos,registrarConsulta,registrarCuentas,registrarMedicamentos, reservarMedicamentos,retiroMedicamentos,verCuentas,carrito,caducar\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom .views import Agregar_productos, Eliminar_productos, Restar_productos,Limpiar\n\n\nurlpatterns = [\n\n path('',home,name='home'),\n path('panel_medico/',panelMedico,name='panelMedico'),\n path('panel_bodeguero/',panelBodeguero,name='panelBodeguero'),\n path('panel_farmaceutico/',panelFarmaceutico,name='panelFarmaceutico'),\n path('panel_admin/',panelAdmin,name='panelAdmin'),\n path('registrar_medicamentos/',registrarMedicamentos,name='registrarMedicamentos'),\n path('registrar_consulta/',registrarConsulta,name='registrarConsulta'),\n path('ver_medicamentos/',ConsultarMedicamentos,name='ConsultarMedicamentos'),\n path('caducar_medicamentos/',caducarMedicamentos,name='caducarMedicamentos'),\n path('eliminar_cuentas/',eliminarCuentas,name='eliminarCuentas'),\n path('generar_informes/',generarInformes,name='generarInformes'),\n path('modificar_cuentas/',modificarCuentas,name='modificarCuentas'),\n path('ver_cuentas/',verCuentas,name='verCuentas'),\n path('registrar_cuentas/',registrarCuentas,name='registrarCuentas'),\n path('retirar_medicamentos/',retiroMedicamentos,name='retiroMedicamentos'),\n path('reservar_medicamentos/',reservarMedicamentos,name='reservarMedicamentos'),\n path('carrito/',carrito,name='carrito'),\n path('caducar/',caducar,name='caducar'),\n \n \n]\n\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n", "repo_name": "RkzJacob/APPWEB", "sub_path": "ProyectoWeb/AppWeb/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1914, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.home", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.panelMedico", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.panelBodeguero", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "views.panelFarmaceutico", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "views.panelAdmin", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "views.registrarMedicamentos", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "views.registrarConsulta", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "views.ConsultarMedicamentos", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "views.caducarMedicamentos", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "views.eliminarCuentas", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "views.generarInformes", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "views.modificarCuentas", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "views.verCuentas", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "views.registrarCuentas", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "views.retiroMedicamentos", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "views.reservarMedicamentos", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "views.carrito", "line_number": 29, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "views.caducar", "line_number": 30, "usage_type": "argument"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "25500020374", "text": "from sqlalchemy.orm import Session\nfrom db import models\nfrom app import schemas\n\nfrom typing import List\n\ndef get_count_ambassadors_without_link_repo(db: Session) -> int:\n count = db.query(models.Ambassadors.id).filter(models.Ambassadors.link.is_(None)).count()\n return count\n\ndef get_ambassador_by_link(db: Session, link: str) -> models.Ambassadors:\n return (\n db.query(models.Ambassadors)\n .filter(models.Ambassadors.link == link)\n .first()\n )\n\ndef insert_urls(db: Session, links:List[str]):\n ambassadors = db.query(models.Ambassadors).filter(models.Ambassadors.link.is_(None)).all()\n\n for index, row in enumerate(ambassadors):\n row.link = links[index]\n\n db.add_all(ambassadors)\n db.commit()\n\ndef insert_ambassadors_repo(db: Session, ambassadors_input: List):\n ambassadors = []\n\n for row in ambassadors_input:\n ambassadors.append(\n models.Ambassadors(\n name=row[0],\n email=row[1],\n link=None,\n points=0,\n is_valid=True,\n )\n )\n \n db.add_all(ambassadors)\n db.commit()", "repo_name": "ramasbeinaty/keygen", "sub_path": "db/repo.py", "file_name": "repo.py", "file_ext": "py", "file_size_in_byte": 1146, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.orm.Session", "line_number": 7, "usage_type": "name"}, {"api_name": "db.query", "line_number": 8, "usage_type": "call"}, {"api_name": "db.models.Ambassadors", "line_number": 8, "usage_type": "attribute"}, {"api_name": "db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "db.models.Ambassadors.link.is_", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 11, "usage_type": "name"}, {"api_name": "db.query", "line_number": 13, "usage_type": "call"}, {"api_name": "db.models.Ambassadors", "line_number": 13, "usage_type": "attribute"}, {"api_name": "db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "db.models.Ambassadors", "line_number": 14, "usage_type": "attribute"}, {"api_name": "db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "db.models.Ambassadors", "line_number": 11, "usage_type": "attribute"}, {"api_name": "db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "db.query", "line_number": 19, "usage_type": "call"}, {"api_name": "db.models.Ambassadors", "line_number": 19, "usage_type": "attribute"}, {"api_name": "db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "db.models.Ambassadors.link.is_", "line_number": 19, "usage_type": "call"}, {"api_name": "db.add_all", "line_number": 24, "usage_type": "call"}, {"api_name": "db.commit", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "db.models.Ambassadors", "line_number": 32, "usage_type": "call"}, {"api_name": "db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "db.add_all", "line_number": 41, "usage_type": "call"}, {"api_name": "db.commit", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "3980300030", "text": "import pandas as pd\nimport string\nimport argparse\nimport os\n\nstr2exact = {}\n\ndef main():\n print('\\n#######################')\n print('Preprocess Part 1')\n print('#######################')\n\n data_dir = [(os.path.join(args.data_dir,'atomic2020.tsv'),'atomic2020'),\n (os.path.join(args.data_dir,'conceptnet.tsv'),'conceptnet'),\n (os.path.join(args.data_dir,'transomcs.tsv'),'transomcs'),\n (os.path.join(args.data_dir,'atomic.tsv'),'atomic')\n ]\n\n conceptnet_label_whitelist = {\n 'AtLocation':None,#\n 'CapableOf':None,#\n 'Causes':None,#\n 'CausesDesire':None,#\n 'Desires':None,#\n 'HasA':None,#\n 'HasFirstSubevent':None,#\n 'HasLastSubevent':None,#\n 'HasPrerequisite':None,#\n 'HasProperty':None,#\n 'HasSubevent':None,#\n 'MadeOf':None,#\n 'MotivatedByGoal':None,#\n 'NotDesires':None,#\n 'PartOf':None,#\n 'ReceivesAction':None,#\n 'UsedFor':None,#\n 'ObstructedBy':None\n }\n\n for f,kb in data_dir:\n print('\\n{}: reading file'.format(kb))\n df_all = pd.read_csv(f, sep='\\t')\n before_size = len(df_all)\n df_all.drop_duplicates(inplace=True)\n before_uniq = len(df_all)\n\n if kb.startswith('atomic'):\n df = df_all.copy()\n else:\n df = df_all[df_all['relation'].isin(conceptnet_label_whitelist)].copy()\n\n print('{}: processing head'.format(kb))\n df['head_exact'] = df[['head','relation']].apply(lambda x: str2exact[x['head']] if x['head'] in str2exact else clean_str(x['head'], kb, x['relation']), axis=1)\n\n print('{}: processing tail'.format(kb))\n df['tail_exact'] = df[['tail','relation']].apply(lambda x: str2exact[x['tail']] if x['tail'] in str2exact else clean_str(x['tail'], kb, x['relation']), axis=1)\n\n print('{}: writing processed file'.format(kb))\n\n d = '/'.join(f.split('/')[:-1]) + '/'\n df[['head', 'relation', 'tail', 'head_exact', 'tail_exact']].to_csv(d + kb + '_exact.tsv', index=False, sep='\\t')\n\n\n\ndef clean_str(s_raw,kb, relation):\n if pd.isnull(s_raw):\n s_raw = ''\n\n s = s_raw.lower()\n if kb[:6] == 'atomic' and 'person' in s:\n s = s.replace('personx','person')\n s = s.replace('persony','person')\n s = s.replace('personz','person')\n\n s = s.strip().translate(str.maketrans('', '', string.punctuation))\n l = s.split()\n\n if not l:\n rv = ''\n elif kb[:6] == 'atomic' and (relation[0] in [\"o\",\"x\"] or relation in ['isFilledBy', 'HinderedBy', 'isBefore', 'isAfter']) and l[0][:6]=='person':\n rv = ' '.join(l[1:])\n else:\n rv = ' '.join(l)\n\n str2exact[s_raw] = rv\n return rv\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument('data_dir', help=\"directory with KBs in tab separated data files. Required headers and columns: [head, relation, tail]\")\n\n args = parser.parse_args()\n\n main()\n", "repo_name": "allenai/comet-atomic-2020", "sub_path": "human_eval/coverage/preprocess_kb_triples_part1.py", "file_name": "preprocess_kb_triples_part1.py", "file_ext": "py", "file_size_in_byte": 3014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 199, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 66, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 75, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "34983160680", "text": "playliststart = 1\nplaylistend = 24\ndownload_link = ''\n\ndef guess_name():\n artist = ''\n title = ''\n if file[:-16].find(' - ') != -1:\n #if splittable\n artist = file[:-16].split(' - ')[0]\n title = file[:-16].split(' - ')[1]\n print(f'Artist: {artist} \\nTitle: {title}')\n answer = input('Is this correct? (y/n)')\n else:\n title = file[:-16]\n print(f'Title: {title}')\n answer = input('Is this correct? (y/n)')\n return eval_answer(answer, artist, title)\n\ndef eval_answer(answer, artist, title):\n print(f'answer: {answer}')\n if answer == 'n':\n print(f'Filename: {file}')\n artist = input('Artist: ')\n title = input('Title: ')\n elif answer == 'y' and artist == '':\n print(f'Filename: {file}')\n artist = input('Artist: ')\n elif answer == 'y' and artist != '':\n print('You said title and artist are correct')\n else:\n print('answer must be y/n')\n guess_name()\n\n return artist, title\n\n\nif __name__ == '__main__':\n import os\n if not os.path.exists('music'):\n os.mkdir('music')\n os.chdir('music')\n # download mp3\n import youtube_dl\n\n def my_hook(d):\n if d['status'] == 'finished':\n print('Done downloading, now converting ...')\n\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n 'playliststart': playliststart,\n 'playlistend': playlistend,\n 'skip_download': False,\n 'progress_hooks': [my_hook],\n 'writethumbnail': True,\n 'ignoreerrors': True\n }\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([''])\n\n # add the thumbnails\n import eyed3\n import glob\n\n print('\\n\\n\\nmp3 editing')\n\n # for every file\n for file in glob.glob(\"*.mp3\"):\n print(file)\n # load mp3\n audiofile = eyed3.load(file)\n if (audiofile.tag == None):\n audiofile.initTag()\n\n print('thumbnail')\n\n # if .webp format exists, must convert\n if os.path.exists(file[:-4] + '.webp'):\n image_name = file[:-4] + '.webp'\n #convert thumbnails to jpeg\n from PIL import Image\n im = Image.open(image_name).convert('RGB')\n im.save(image_name[:-5] + '.jpg', 'jpeg')\n # save jpg to mp3\n if os.path.exists(file[:-4] + '.jpg'):\n image_file = open(file[:-4] + '.jpg', 'rb').read()\n audiofile.tag.images.set(3, image_file, 'image/jpeg')\n else:\n print(f'\\n\\n\\n\\n\\n\\n\\nNO THUMBNAIL for {file}')\n\n #generate artist title\n artist, title = guess_name()\n audiofile.tag.title = title\n audiofile.tag.artist = artist\n\n audiofile.tag.save()\n print('\\n\\n\\n')\n\n print('\\n\\n\\n\\n\\nStarting Cleanup')\n\n for item in os.listdir('.'):\n if item.endswith(\".jpg\") or item.endswith(\".webp\"):\n print(f'Deleting: {item}')\n os.remove(item)\n os.chdir('..')\n", "repo_name": "kjelpw/yt-music-downloader", "sub_path": "download.py", "file_name": "download.py", "file_ext": "py", "file_size_in_byte": 3137, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 42, "usage_type": "call"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 64, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 74, "usage_type": "call"}, {"api_name": "eyed3.load", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 88, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 88, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 107, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 110, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "13198362458", "text": "import argparse\nimport os\nimport h5py\nimport numpy as np\nimport torch\nimport torchtext.data as data\nimport torch.utils.data\nimport sys\nfrom tqdm import tqdm\nimport json\n\ndef get_CMU_words(CMU_path):\n words = []\n with open(CMU_path) as f:\n lines = f.readlines()\n for wcnt, line in enumerate(lines):\n grapheme, phoneme = line.split(\" \",1)\n words.append(grapheme)\n return words\n\ndef get_LRW_split(args, split, CMUwords):\n lst_path = args.LRW_words_path\n word_indices = []\n with open(lst_path) as f:\n lines = f.readlines() #list of words\n for word in tqdm(lines):\n word = word.strip()\n widx_array = []\n widx = CMUwords.index(word) \n widx_array.append(widx)\n for filename in os.listdir(os.path.join(args.LRW_path, word.upper(), split)):\n Fwidx = {}\n L = filename.strip()\n path = os.path.join(word.upper(), split, L).replace(\".mp4.npy\", \"\")\n Fwidx['widx']=widx_array\n Fwidx['fn']=path\n word_indices.append(Fwidx)\n return word_indices\n\ndef get_LRW_splits():\n parser = argparse.ArgumentParser(description='Script for creating main splits of LRW.')\n parser.add_argument('--CMUdict_path', default='../data/vocab/cmudict.dict')\n parser.add_argument('--LRW_path', default='../data/lrw/features/main/') \n parser.add_argument('--LRW_words_path', default='../data/lrw/LRWwords.lst')\n args = parser.parse_args()\n CMUwords = get_CMU_words(args.CMUdict_path) \n S = ['train', 'val', 'test']\n Dsplits = {}\n for i,s in enumerate(S):\n Dsplits[s] = get_LRW_split(args, s, CMUwords)\n with open(\"../data/lrw/DsplitsLRW.json\", \"w\") as fp:\n json.dump(Dsplits, fp)\n\nif __name__=='__main__':\n get_LRW_splits()\n", "repo_name": "lilianemomeni/KWS-Net", "sub_path": "misc/data_splits_lrw.py", "file_name": "data_splits_lrw.py", "file_ext": "py", "file_size_in_byte": 1764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 57, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tqdm.tqdm", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "22867233999", "text": "import argparse, sys\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\n 'Print a vector of line numbers containing each line of reference file that occurs in input files')\n parser.add_argument('input_files', nargs='*', default=None)\n parser.add_argument('reference_file')\n parser.add_argument('-p', '--python', action='store_true', help='format output as python list')\n parser.add_argument('-l', '--lisp', action='store_true', help='format output as lisp list')\n args = parser.parse_args()\n\n input_lines = []\n if len(args.input_files) > 0:\n for f in args.input_files:\n input_lines += [line for line in open(f)]\n else:\n input_lines = [line for line in sys.stdin]\n\n indices = [i for i, line in enumerate(open(args.reference_file)) if line in input_lines]\n\n if args.python:\n print('[%s]'.join(', '.join(map(str, indices))))\n if args.lisp:\n print('(%s)'.join(' '.join(map(str, indices))))\n if not (args.python and args.lisp):\n print('\\n'.join(map(str, indices)))\n\n\n", "repo_name": "bjvanderweij/rhythm", "sub_path": "rhythm/findl.py", "file_name": "findl.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "35397224293", "text": "from datetime import datetime\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flaskblog import db, login_manager, app\nfrom flask_login import UserMixin\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key = True)\n username = db.Column(db.String(20), nullable=False, unique=True)\n email = db.Column(db.String(120), nullable=False, unique=True)\n password = db.Column(db.String(), nullable=False)\n image_file = db.Column(db.String(60), nullable=False, default='default.jpg')\n posts = db.relationship('Post', backref='author', lazy=True)\n\n def __repr__(self):\n return f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\n\n def get_reset_token(self, expired_sec = 1800):\n s = Serializer(app.config['SECRET_KEY'], expired_sec)\n token = s.dumps({'user_id':self.id}).decode('utf-8')\n return token\n\n @staticmethod\n def verify_reset_token(token):\n s = Serializer(app.config['SECRET_KEY'])\n try:\n user_id = s.loads(token)['user_id']\n except:\n return None\n return User.query.get(user_id)\n\nclass Post(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n title = db.Column(db.String(100), nullable=False)\n content = db.Column(db.Text, nullable=False)\n posted_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'),nullable=False)\n comments = db.relationship('Comment', backref='post', lazy=True)\n\n def __repr__(self):\n return f\"Post('{self.title}', '{self.posted_date}')\"\n\nclass Comment(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n content = db.Column(db.String(255), nullable=False)\n created_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n user = db.Column(db.Boolean(), default=False)\n post_id = db.Column(db.Integer, db.ForeignKey('post.id'),nullable=False)\n\n def __repr__(self):\n return f\"Comment('{self.user}')\"", "repo_name": "adeoti-ade/FlaskBlog", "sub_path": "flaskblog/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flaskblog.login_manager.user_loader", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flaskblog.login_manager", "line_number": 8, "usage_type": "name"}, {"api_name": "flaskblog.db.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flaskblog.db", "line_number": 12, "usage_type": "name"}, {"api_name": "flask_login.UserMixin", "line_number": 12, "usage_type": "name"}, {"api_name": "flaskblog.db.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 13, "usage_type": "name"}, {"api_name": "flaskblog.db.Integer", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flaskblog.db.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 14, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 14, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 15, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 15, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 16, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 16, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 17, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 17, "usage_type": "call"}, {"api_name": "flaskblog.db.relationship", "line_number": 18, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 18, "usage_type": "name"}, {"api_name": "itsdangerous.TimedJSONWebSignatureSerializer", "line_number": 24, "usage_type": "call"}, {"api_name": "flaskblog.app.config", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flaskblog.app", "line_number": 24, "usage_type": "name"}, {"api_name": "itsdangerous.TimedJSONWebSignatureSerializer", "line_number": 30, "usage_type": "call"}, {"api_name": "flaskblog.app.config", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flaskblog.app", "line_number": 30, "usage_type": "name"}, {"api_name": "flaskblog.db.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flaskblog.db", "line_number": 37, "usage_type": "name"}, {"api_name": "flaskblog.db.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 38, "usage_type": "name"}, {"api_name": "flaskblog.db.Integer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flaskblog.db.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 39, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 39, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 40, "usage_type": "name"}, {"api_name": "flaskblog.db.Text", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flaskblog.db.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 41, "usage_type": "name"}, {"api_name": "flaskblog.db.DateTime", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "name"}, {"api_name": "flaskblog.db.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 42, "usage_type": "name"}, {"api_name": "flaskblog.db.Integer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flaskblog.db.ForeignKey", "line_number": 42, "usage_type": "call"}, {"api_name": "flaskblog.db.relationship", "line_number": 43, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 43, "usage_type": "name"}, {"api_name": "flaskblog.db.Model", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flaskblog.db", "line_number": 48, "usage_type": "name"}, {"api_name": "flaskblog.db.Column", "line_number": 49, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 49, "usage_type": "name"}, {"api_name": "flaskblog.db.Integer", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flaskblog.db.Column", "line_number": 50, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 50, "usage_type": "name"}, {"api_name": "flaskblog.db.String", "line_number": 50, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 51, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 51, "usage_type": "name"}, {"api_name": "flaskblog.db.DateTime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 51, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "name"}, {"api_name": "flaskblog.db.Column", "line_number": 52, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 52, "usage_type": "name"}, {"api_name": "flaskblog.db.Boolean", "line_number": 52, "usage_type": "call"}, {"api_name": "flaskblog.db.Column", "line_number": 53, "usage_type": "call"}, {"api_name": "flaskblog.db", "line_number": 53, "usage_type": "name"}, {"api_name": "flaskblog.db.Integer", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flaskblog.db.ForeignKey", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "31627565906", "text": "import sqlite3\nimport os\n\nDATABASE_FILE = os.path.abspath(os.path.join(__file__, \"../llama.db\"))\nconn = sqlite3.connect(DATABASE_FILE)\nconn.execute(\"\"\"CREATE TABLE IF NOT EXISTS users\n (username VARCHAR(50) PRIMARY KEY,\n points INTEGER);\"\"\")\nconn.commit()\n\ndef newConnection():\n # Needed for multiple threads working in the db.\n # cron thread calls and uses this for its changes.\n return Connection()\n\nclass Connection:\n def __init__(self):\n self.conn = sqlite3.connect(DATABASE_FILE)\n\n def setPoints(self, user, points):\n cmd = \"INSERT OR REPLACE INTO users VALUES (?,?)\"\n print(\"Set points\", user, points)\n self.conn.execute(cmd, (user, points))\n self.conn.commit()\n\n def addPoints(self, user, delta):\n points = self.getPoints(user)\n self.setPoints(user, delta + points)\n\n def getPoints(self, user):\n print(\"Get points:\", user)\n cmd = \"SELECT points FROM users WHERE username = ?\"\n cursor = self.conn.execute(cmd, (user,))\n self.conn.commit()\n row = cursor.fetchone()\n if row is None:\n return 0\n return row[0]\n\n def hasUser(self, user):\n print(\"---- has user\", user)\n cmd = \"SELECT points FROM users WHERE username = ?\"\n row = self.conn.execute(cmd, (user,)).fetchone()\n return row is not None\n\n def getTopUsers(self):\n cmd = \"SELECT username, points FROM users ORDER BY points*1 DESC LIMIT 13\"\n cursor = self.conn.execute(cmd)\n rows = cursor.fetchall()\n out = []\n for user, points in rows:\n out.append(\"{} {}\".format(user, points))\n return \" | \".join(out)\n\n", "repo_name": "tehNinth/lorenzotherobot", "sub_path": "src/lib/llama.py", "file_name": "llama.py", "file_ext": "py", "file_size_in_byte": 1585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 4, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "17488167326", "text": "from typing import List\nclass Solution:\n\n#DETECT CYCLE IN DIRECTED GRAPH. KET POINT : UNMARK A NODE AS VISITED EVERYTIME YOU BACKTRACK. \n def cycle(self,node,visited,adj,d) :\n if node in d :\n return d[node]\n \n if node in visited : \n d[node] = True\n return d[node]\n else :\n visited.add(node) #MARK VISITED\n for i in adj[node] :\n c = self.cycle(i,visited,adj,d)\n if c == True :\n d[node] = True\n return d[node]\n visited.remove(node) #UNMARK VISITED\n d[node] = False\n return d[node]\n \n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n adj = [[] for i in range((numCourses))]\n for i in prerequisites :\n adj[i[1]].append(i[0])\n #print(adj)\n visited = set()\n d = {}\n c = False\n for i in range(len(adj)) :\n \n c = self.cycle(i,visited,adj,d)\n if c == True :\n return False\n visited = set()\n return True\n ", "repo_name": "shauryasoni/Leetcode", "sub_path": "graphs_trees/courseScheduleI.py", "file_name": "courseScheduleI.py", "file_ext": "py", "file_size_in_byte": 1163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "7801329554", "text": "from Question import Question\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass FragGabiEntry:\n \"\"\"\n Scraper for the Frag Gabi Section on maedchen.de\n \"\"\"\n answer = str()\n question = Question()\n\n def __init__(self, url):\n \"\"\"\n On object creation directly initiate scraping of given site\n :param url: URL to a question on maedchen.de (Format like https://www.maedchen.de/love/frag-gabi/)\n \"\"\"\n self.scrape_site(url)\n\n def scrape_site(self, url):\n \"\"\"\n Request site and extract contents\n :param url: URL to a question on maedchen.de (Format like https://www.maedchen.de/love/frag-gabi/)\n :return: True on success\n \"\"\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n question_content_raw = soup.find_all(class_='question-detail')[0].get_text().strip()\n self.question.content = question_content_raw.split('\\n\\n\\n')[0].strip()\n self.question.title = soup.find_all(class_='question-header')[0].get_text().strip()\n author_date = question_content_raw.split('\\n\\n\\n')[1].strip()[4:].split(' / ')\n self.question.author = author_date[0]\n self.question.set_date(author_date[1])\n self.answer = soup.find_all(class_='question-answers__content--expert')[0].get_text(separator='\\n')\n return True\n", "repo_name": "cmantsch/FragGabiScraper", "sub_path": "FragGabiEntry.py", "file_name": "FragGabiEntry.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Question.Question", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "27425667552", "text": "from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.contrib.auth.models import User\nfrom apps.characters.models import Character\nfrom apps.characters.serializers import CharacterSerializer\n\n\n@api_view(['GET'])\ndef characterList(request):\n characters = Character.objects.all()\n serializer = CharacterSerializer(characters, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef characterCreate(request):\n serializer = CharacterSerializer(data=request.data, many=True, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n \n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST', 'GET'])\ndef characterUpdate(request, id):\n characters = Character.objects.get(id=id)\n serializer = CharacterSerializer(instance=characters, data=request.data, partial=True)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef characterDelete(request, id):\n characters = Character.objects.get(id=id)\n characters.delete()\n\n return Response('Character successfully deleted!')\n", "repo_name": "lucasjaroszewski/Anaden-Tracker", "sub_path": "apps/characters/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "apps.characters.models.Character.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "apps.characters.models.Character.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "apps.characters.models.Character", "line_number": 11, "usage_type": "name"}, {"api_name": "apps.characters.serializers.CharacterSerializer", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 9, "usage_type": "call"}, {"api_name": "apps.characters.serializers.CharacterSerializer", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 22, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.characters.models.Character.objects.get", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.characters.models.Character.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "apps.characters.models.Character", "line_number": 28, "usage_type": "name"}, {"api_name": "apps.characters.serializers.CharacterSerializer", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 26, "usage_type": "call"}, {"api_name": "apps.characters.models.Character.objects.get", "line_number": 38, "usage_type": "call"}, {"api_name": "apps.characters.models.Character.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "apps.characters.models.Character", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "70900801444", "text": "import os\nimport time\nimport re\nfrom tabulate import tabulate\nfrom .common import writeCSV, readCSV\n\n\nclass ThinkData:\n def __init__(\n self, title: str, samples: list, features: list, weights: list, scores: list\n ):\n self.title = title\n self.samples = samples\n self.features = features\n self.weights = weights\n self.scores = scores\n assert len(self.scores) == len(self.features)\n for col in self.scores:\n assert len(col) == len(self.samples)\n\n @property\n def nrow(self):\n return len(self.samples)\n\n @property\n def ncol(self):\n nSampleCol = 1\n nFinalScoreCol = 1\n return len(self.features) + nSampleCol + nFinalScoreCol\n\n @property\n def headers(self):\n featureHeaders = []\n for i in range(len(self.features)):\n header = \"{feature}({weight})\".format(\n feature=self.features[i], weight=self.weights[i]\n )\n featureHeaders.append(header)\n return [\"Options\"] + featureHeaders + [\"Final score(Sorted)\"]\n\n @property\n def rows(self):\n rows = []\n for i in range(self.nrow):\n sample = self.samples[i]\n values = [col[i] for col in self.scores]\n finalScore = weightSum(values, self.weights)\n row = [sample] + values + [finalScore]\n rows.append(row)\n # sortedRows = sorted(rows, key=lambda x: x[-1], reverse=True)\n return rows\n\n @property\n def table(self):\n rows = sorted(self.rows, key=lambda x: x[-1], reverse=True)\n return tabulate(rows, headers=self.headers)\n\n def __str__(self):\n return self.table\n\n def save(self, saveDir):\n \"\"\"save data as csv file\"\"\"\n filename = \"\".join([self.title, \".csv\"])\n filepath = os.path.join(saveDir, filename)\n writeCSV(filepath, rows=self.rows, headers=self.headers)\n return filepath\n\n\ndef readExists(filepath, title):\n rows, headers = readCSV(filepath)\n samples = []\n weights = []\n features = []\n scores = []\n for header in headers[1:-1]:\n # search = re.search(r\"(.+)\\((-?\\d+)\\)\", header)\n feature, weight = parseHeader(header)\n features.append(feature)\n weights.append(weight)\n for row in rows:\n row.pop()\n samples.append(row.pop(0))\n for i in range(len(features)):\n colSocre = [row[i] for row in rows]\n scores.append([int(x) for x in colSocre])\n tkdata = ThinkData(title, samples, features, weights, scores)\n return tkdata\n\n\ndef parseHeader(header):\n search = re.search(r\"(.+)\\((-?\\d+)\\)\", header)\n feature, weight = search.groups()\n weight = int(weight)\n return feature, weight\n\n\ndef uniqueId():\n uid = time.strftime(\"%Y%m%d%H%m%S\", time.localtime())\n return uid\n\n\ndef weightSum(values, weights):\n s = 0\n for i in range(len(values)):\n s += values[i] * weights[i]\n return s\n", "repo_name": "brendonlin/yunyun", "sub_path": "yunyun/thinkdata.py", "file_name": "thinkdata.py", "file_ext": "py", "file_size_in_byte": 2964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tabulate.tabulate", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "common.writeCSV", "line_number": 65, "usage_type": "call"}, {"api_name": "common.readCSV", "line_number": 70, "usage_type": "call"}, {"api_name": "re.search", "line_number": 91, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "42046958193", "text": "import cv2\nimport torch\nimport RPi.GPIO as GPIO\nfrom yolov5.models.experimental import attempt_load\nfrom yolov5.utils.general import non_max_suppression, scale_coords\nfrom yolov5.utils.torch_utils import select_device\n\n# Set up YOLOv5\ndevice = select_device('')\nmodel = attempt_load('yolov5s.pt', map_location=device)\nstride = int(model.stride.max())\n\n# Set up GPIO for controlling LEDs on the Raspberry Pi\nGPIO.setmode(GPIO.BCM)\nled_pin = 18 # Change this pin number to the GPIO pin connected to the LED\nGPIO.setup(led_pin, GPIO.OUT)\n\n# Load the video\nvideo_path = 'path_to_your_video.mp4' # Replace this with the path to your video\ncap = cv2.VideoCapture(video_path)\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n\n if not ret:\n break\n\n # Convert frame to tensor\n img = torch.from_numpy(frame).to(device)\n img = img.float() / 255.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Detect objects in the frame using YOLOv5\n pred = model(img, augment=False)[0]\n pred = non_max_suppression(pred, 0.4, 0.5)\n\n # Check for person detection\n if pred[0] is not None:\n det = scale_coords(img.shape[2:], pred[0][:, :4], frame.shape).round()\n for *xyxy, conf, cls in reversed(pred[0]):\n if int(cls) == 0: # Person class index\n # If person detected, turn on the LED\n GPIO.output(led_pin, GPIO.HIGH)\n break\n else:\n # If no person detected, turn off the LED\n GPIO.output(led_pin, GPIO.LOW)\n\n cv2.imshow('Video', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\nGPIO.cleanup()\n", "repo_name": "Sravanji/project", "sub_path": "yolov5.py", "file_name": "yolov5.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yolov5.utils.torch_utils.select_device", "line_number": 9, "usage_type": "call"}, {"api_name": "yolov5.models.experimental.attempt_load", "line_number": 10, "usage_type": "call"}, {"api_name": "RPi.GPIO.setmode", "line_number": 14, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 14, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 14, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 16, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 16, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 29, "usage_type": "call"}, {"api_name": "yolov5.utils.general.non_max_suppression", "line_number": 36, "usage_type": "call"}, {"api_name": "yolov5.utils.general.scale_coords", "line_number": 40, "usage_type": "call"}, {"api_name": "RPi.GPIO.output", "line_number": 44, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 44, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 44, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 48, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 48, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 48, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 55, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 56, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "27871327919", "text": "import hashlib\nimport hmac\nfrom Cryptodome.Cipher import AES\nfrom Cryptodome.Util.Padding import pad, unpad\nfrom time import time\nfrom typing import Optional, Union\n\n\nclass Clock:\n\n def now(self) -> float:\n return time()\n\n def nonce(self):\n return round(self.now() * 1000)\n\n\nclass RequestVerifier:\n\n def __init__(self, signing_secret: str, access_token: Optional[str] = None, clock: Clock = Clock()):\n self.signing_secret = signing_secret\n self.access_token = access_token\n self.clock = clock\n\n def request_id(self, timestamp: Optional[int] = None):\n if timestamp is not None:\n return self.md5_string(str(self.md5_string(timestamp)))\n\n return self.md5_string(self.md5_string(str(self.clock.nonce())))\n\n def md5_string(self, body: Union[str, bytes] = \"\") -> str:\n if isinstance(body, str):\n body = str.encode(body)\n return hashlib.md5(body).hexdigest()\n\n def generate_signature(\n self, *, timestamp: str, body: Union[str, bytes]\n ) -> Optional[str]:\n \"\"\"Generates a standard signature\"\"\"\n if timestamp is None:\n return None\n if body is None:\n body = \"\"\n if isinstance(body, bytes):\n body = body.decode(\"utf-8\")\n\n format_req = str.encode(f\"{body}\")\n encoded_secret = str.encode(self.signing_secret)\n request_hash = hmac.new(encoded_secret, format_req, hashlib.md5).hexdigest()\n calculated_signature = f\"{request_hash}\"\n return calculated_signature\n\n def generate_dynamic_signature(\n self, *, timestamp: str, body: Union[str, bytes]\n ) -> Optional[str]:\n \"\"\"Generates a dynamic signature\"\"\"\n if timestamp is None:\n return None\n if body is None:\n body = \"\"\n if isinstance(body, bytes):\n body = body.decode(\"utf-8\")\n\n format_req = str.encode(f\"{body}\")\n encoded_secret = str.encode(self.md5_string(f\"{self.access_token}{self.signing_secret}\"))\n request_hash = hmac.new(encoded_secret, format_req, hashlib.md5).hexdigest()\n calculated_signature = f\"{request_hash}\"\n return calculated_signature\n\n\nclass MD5Hasher:\n\n def hash(self, data: Union[str, bytes] = \"\") -> bytes:\n if isinstance(data, str):\n data = data.encode()\n return hashlib.md5(data).digest()\n\n def hex(self, data: Union[str, bytes] = \"\") -> str:\n if isinstance(data, str):\n data = data.encode()\n return hashlib.md5(data).hexdigest()\n\n\nclass CBCEncryptor:\n\n def __init__(self, iv: Union[str, bytes]):\n if isinstance(iv, str):\n iv = iv.encode()\n self.iv = iv\n\n def encrypt(self, key: Union[str, bytes], data: Union[str, bytes]) -> bytes:\n if isinstance(key, str):\n key = key.encode()\n if isinstance(data, str):\n data = data.encode()\n cipher = AES.new(key, AES.MODE_CBC, self.iv)\n return cipher.encrypt(pad(data, AES.block_size))\n\n def decrypt(self, key: Union[str, bytes], data: Union[str, bytes]) -> bytes:\n if isinstance(key, str):\n key = key.encode()\n if isinstance(data, str):\n data = bytes.fromhex(data)\n cipher = AES.new(key, AES.MODE_CBC, self.iv)\n return unpad(cipher.decrypt(data), AES.block_size)\n", "repo_name": "shauntarves/wyze-sdk", "sub_path": "wyze_sdk/signature/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 3369, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 264, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 31, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 34, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 37, "usage_type": "name"}, {"api_name": "hmac.new", "line_number": 49, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 54, "usage_type": "name"}, {"api_name": "hmac.new", "line_number": 66, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 66, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 73, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 76, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 78, "usage_type": "name"}, {"api_name": "hashlib.md5", "line_number": 81, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 91, "usage_type": "name"}, {"api_name": "Cryptodome.Cipher.AES.new", "line_number": 96, "usage_type": "call"}, {"api_name": "Cryptodome.Cipher.AES", "line_number": 96, "usage_type": "name"}, {"api_name": "Cryptodome.Cipher.AES.MODE_CBC", "line_number": 96, "usage_type": "attribute"}, {"api_name": "Cryptodome.Util.Padding.pad", "line_number": 97, "usage_type": "call"}, {"api_name": "Cryptodome.Cipher.AES.block_size", "line_number": 97, "usage_type": "attribute"}, {"api_name": "Cryptodome.Cipher.AES", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 99, "usage_type": "name"}, {"api_name": "Cryptodome.Cipher.AES.new", "line_number": 104, "usage_type": "call"}, {"api_name": "Cryptodome.Cipher.AES", "line_number": 104, "usage_type": "name"}, {"api_name": "Cryptodome.Cipher.AES.MODE_CBC", "line_number": 104, "usage_type": "attribute"}, {"api_name": "Cryptodome.Util.Padding.unpad", "line_number": 105, "usage_type": "call"}, {"api_name": "Cryptodome.Cipher.AES.block_size", "line_number": 105, "usage_type": "attribute"}, {"api_name": "Cryptodome.Cipher.AES", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "39401682142", "text": "# coding: utf-8\n\n\"\"\"\n Marketing API v.1.0\n\n IMPORTANT: This swagger links to Criteo production environment. Any test applied here will thus impact real campaigns. # noqa: E501\n\n The version of the OpenAPI document: v.1.0\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass ProductShippingV3(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'price': 'Price',\n 'country': 'str',\n 'region': 'str',\n 'service': 'str',\n 'location_id': 'object',\n 'location_group_name': 'str',\n 'postal_code': 'str'\n }\n\n attribute_map = {\n 'price': 'price',\n 'country': 'country',\n 'region': 'region',\n 'service': 'service',\n 'location_id': 'locationId',\n 'location_group_name': 'locationGroupName',\n 'postal_code': 'postalCode'\n }\n\n def __init__(self, price=None, country=None, region=None, service=None, location_id=None, location_group_name=None, postal_code=None): # noqa: E501\n \"\"\"ProductShippingV3 - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._price = None\n self._country = None\n self._region = None\n self._service = None\n self._location_id = None\n self._location_group_name = None\n self._postal_code = None\n self.discriminator = None\n\n if price is not None:\n self.price = price\n if country is not None:\n self.country = country\n if region is not None:\n self.region = region\n if service is not None:\n self.service = service\n if location_id is not None:\n self.location_id = location_id\n if location_group_name is not None:\n self.location_group_name = location_group_name\n if postal_code is not None:\n self.postal_code = postal_code\n\n @property\n def price(self):\n \"\"\"Gets the price of this ProductShippingV3. # noqa: E501\n\n\n :return: The price of this ProductShippingV3. # noqa: E501\n :rtype: Price\n \"\"\"\n return self._price\n\n @price.setter\n def price(self, price):\n \"\"\"Sets the price of this ProductShippingV3.\n\n\n :param price: The price of this ProductShippingV3. # noqa: E501\n :type: Price\n \"\"\"\n\n self._price = price\n\n @property\n def country(self):\n \"\"\"Gets the country of this ProductShippingV3. # noqa: E501\n\n The CLDR territory code of the country to which an item will ship. # noqa: E501\n\n :return: The country of this ProductShippingV3. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country\n\n @country.setter\n def country(self, country):\n \"\"\"Sets the country of this ProductShippingV3.\n\n The CLDR territory code of the country to which an item will ship. # noqa: E501\n\n :param country: The country of this ProductShippingV3. # noqa: E501\n :type: str\n \"\"\"\n\n self._country = country\n\n @property\n def region(self):\n \"\"\"Gets the region of this ProductShippingV3. # noqa: E501\n\n The geographic region to which a shipping rate applies. # noqa: E501\n\n :return: The region of this ProductShippingV3. # noqa: E501\n :rtype: str\n \"\"\"\n return self._region\n\n @region.setter\n def region(self, region):\n \"\"\"Sets the region of this ProductShippingV3.\n\n The geographic region to which a shipping rate applies. # noqa: E501\n\n :param region: The region of this ProductShippingV3. # noqa: E501\n :type: str\n \"\"\"\n\n self._region = region\n\n @property\n def service(self):\n \"\"\"Gets the service of this ProductShippingV3. # noqa: E501\n\n A free-form description of the service class or delivery speed. # noqa: E501\n\n :return: The service of this ProductShippingV3. # noqa: E501\n :rtype: str\n \"\"\"\n return self._service\n\n @service.setter\n def service(self, service):\n \"\"\"Sets the service of this ProductShippingV3.\n\n A free-form description of the service class or delivery speed. # noqa: E501\n\n :param service: The service of this ProductShippingV3. # noqa: E501\n :type: str\n \"\"\"\n\n self._service = service\n\n @property\n def location_id(self):\n \"\"\"Gets the location_id of this ProductShippingV3. # noqa: E501\n\n The numeric ID of a location that the shipping rate applies to as defined in the AdWords API. # noqa: E501\n\n :return: The location_id of this ProductShippingV3. # noqa: E501\n :rtype: object\n \"\"\"\n return self._location_id\n\n @location_id.setter\n def location_id(self, location_id):\n \"\"\"Sets the location_id of this ProductShippingV3.\n\n The numeric ID of a location that the shipping rate applies to as defined in the AdWords API. # noqa: E501\n\n :param location_id: The location_id of this ProductShippingV3. # noqa: E501\n :type: object\n \"\"\"\n\n self._location_id = location_id\n\n @property\n def location_group_name(self):\n \"\"\"Gets the location_group_name of this ProductShippingV3. # noqa: E501\n\n The location where the shipping is applicable, represented by a location group name. # noqa: E501\n\n :return: The location_group_name of this ProductShippingV3. # noqa: E501\n :rtype: str\n \"\"\"\n return self._location_group_name\n\n @location_group_name.setter\n def location_group_name(self, location_group_name):\n \"\"\"Sets the location_group_name of this ProductShippingV3.\n\n The location where the shipping is applicable, represented by a location group name. # noqa: E501\n\n :param location_group_name: The location_group_name of this ProductShippingV3. # noqa: E501\n :type: str\n \"\"\"\n\n self._location_group_name = location_group_name\n\n @property\n def postal_code(self):\n \"\"\"Gets the postal_code of this ProductShippingV3. # noqa: E501\n\n The postal code range that the shipping rate applies to, represented by a postal code, a postal code prefix followed by a * wildcard, a range between two postal codes or two postal code prefixes of equal length. # noqa: E501\n\n :return: The postal_code of this ProductShippingV3. # noqa: E501\n :rtype: str\n \"\"\"\n return self._postal_code\n\n @postal_code.setter\n def postal_code(self, postal_code):\n \"\"\"Sets the postal_code of this ProductShippingV3.\n\n The postal code range that the shipping rate applies to, represented by a postal code, a postal code prefix followed by a * wildcard, a range between two postal codes or two postal code prefixes of equal length. # noqa: E501\n\n :param postal_code: The postal_code of this ProductShippingV3. # noqa: E501\n :type: str\n \"\"\"\n\n self._postal_code = postal_code\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ProductShippingV3):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "repo_name": "criteo/criteo-python-marketing-sdk", "sub_path": "criteo_marketing/models/product_shipping_v3.py", "file_name": "product_shipping_v3.py", "file_ext": "py", "file_size_in_byte": 8744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "52", "api": [{"api_name": "six.iteritems", "line_number": 243, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 265, "usage_type": "call"}]} +{"seq_id": "37666637527", "text": "\"\"\"Spectogram input/output.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio\n\nimport scipy.io.wavfile\nimport librosa\n\ndef main(_):\n # enable logging\n tf.logging.set_verbosity(tf.logging.INFO)\n # start tensorflow session\n sess = tf.Session()\n \n ''' model settings '''\n desired_samples = int(FLAGS.sample_rate * FLAGS.clip_duration_ms / 1000)\n window_size_samples = int(FLAGS.sample_rate * FLAGS.window_size_ms / 1000)\n window_stride_samples = int(FLAGS.sample_rate * FLAGS.window_stride_ms / 1000)\n length_minus_window = (desired_samples - window_size_samples)\n if length_minus_window < 0:\n spectrogram_length = 0\n else:\n spectrogram_length = 1 + int(length_minus_window / window_stride_samples)\n fingerprint_size = FLAGS.dct_coefficient_count * spectrogram_length\n ''' ------------- '''\n \n ''' loads and reads audio file '''\n wav_loader = io_ops.read_file(FLAGS.input_wav)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples) \n sample_rate, audio = sess.run(\n [\n wav_decoder.sample_rate,\n wav_decoder.audio\n ])\n\n print('original:', audio.shape)\n #print(audio)\n #plt.figure(1)\n #plt.plot(np.concatenate(audio,axis=0))\n #plt.show()\n \n ''' scale shift and padd '''\n scaled_foreground = sess.run(tf.multiply(audio, FLAGS.scale_factor))\n print('scaled:', scaled_foreground.shape)\n \n if FLAGS.time_shift == 0:\n time_shift_amount = 0\n else:\n time_shift_amount = np.random.randint(-FLAGS.time_shift, FLAGS.time_shift)\n if time_shift_amount > 0:\n time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n time_shift_offset = [0, 0]\n else:\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n \n print('padding :', time_shift_offset, 'ms')\n print('shifting :', time_shift_padding)\n \n # padding\n padded_foreground = tf.pad(\n scaled_foreground,\n time_shift_padding,\n mode='CONSTANT') \n padded_foreground = sess.run(padded_foreground)\n \n #plt.figure(2)\n #plt.plot(padded_foreground)\n #plt.show()\n \n # slicing \n sliced_foreground = tf.slice(padded_foreground,\n time_shift_offset,\n [FLAGS.sample_rate, -1])\n sliced_foreground = sess.run(sliced_foreground) \n \n #plt.figure(3)\n #plt.plot(sliced_foreground)\n #plt.show()\n\n test_diff = scaled_foreground - sliced_foreground\n print('diff between padded and non-padded:', np.linalg.norm(test_diff))\n\n current_wav = sliced_foreground\n noise_add = current_wav\n\n if bool(FLAGS.add_noise):\n ''' loads and reads noise audio file '''\n wav_loader = io_ops.read_file(FLAGS.noise_input_wav)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples) \n noise_sample_rate, noise_audio = sess.run(\n [\n wav_decoder.sample_rate,\n wav_decoder.audio\n ])\n noise_audio = sess.run(tf.multiply(noise_audio, FLAGS.noise_scale_factor))\n\n #plt.figure(4)\n #plt.plot(np.concatenate(noise_audio,axis=0))\n #plt.show()\n\n ''' add noise to audio '''\n noise_add = sess.run(tf.add(noise_audio, sliced_foreground))\n #print('add:', noise_add)\n #plt.figure(4)\n #plt.plot(np.concatenate(noise_add,axis=0))\n #plt.show()\n\n current_wav = noise_add\n\n #output_data = np.concatenate(current_wav, axis=0)\n #print('blurred shape:', output_data.shape)\n #print(output_data)\n #scipy.io.wavfile.write('yes_noised.wav', sample_rate, output_data)\n #librosa.output.write_wav('yes_noised1.wav',output_data,sample_rate)\n\n \n ''' clip all tensor values to segment [-1.0,1.0] '''\n clipped_wav = sess.run(tf.clip_by_value(current_wav, -1.0, 1.0))\n #print('clamp:',noise_clamp.shape)\n #plt.figure(3)\n #plt.plot(np.concatenate(clipped_wav,axis=0))\n #plt.show()\n\n \n ''' create spectrogram '''\n spectrogram = contrib_audio.audio_spectrogram(\n clipped_wav,\n window_size=window_size_samples,\n stride=window_stride_samples,\n magnitude_squared=True) \n spectrogram = sess.run(spectrogram)\n print('spectrogram shape :', spectrogram.shape)\n\n print(spectrogram) \n spectrogram_length = desired_samples / window_size_samples\n \n print('spectrogram length:', spectrogram_length)\n print('fingerprint_size :', fingerprint_size)\n \n ''' create mfcc '''\n mfcc = contrib_audio.mfcc(\n spectrogram,\n wav_decoder.sample_rate,\n dct_coefficient_count=FLAGS.dct_coefficient_count) \n mfcc = sess.run(mfcc)#.flatten()\n print('mfcc shape :', mfcc.shape)\n #print(mfcc)\n \n\n ''' plotting process '''\n f, ((ax1,ax2),(ax3,ax4),(ax5,ax6)) = plt.subplots(3,2,sharey='row')\n \n ax1.set_title('original audio file') \n ax1.plot(audio)\n\n ax2.set_title('scaled, padded, sliced')\n ax2.plot(sliced_foreground)\n\n ax3.set_title('noised') \n ax3.plot(noise_add)\n\n ax4.set_title('clipped')\n ax4.plot(clipped_wav)\n\n ax5.set_title('spectrogram')\n ax5.matshow(spectrogram[0], \n interpolation='nearest', \n aspect='auto', \n cmap=plt.get_cmap('Greys_r'),\n origin='lower')\n ax5.xaxis.set_ticks_position('bottom')\n\n ax6.set_title('MFCC')\n ax6.matshow(mfcc[0], \n interpolation='nearest', \n aspect='auto', \n cmap=plt.get_cmap('Greys_r'), \n origin='lower')\n ax6.xaxis.set_ticks_position('bottom')\n\n plt.tight_layout()\n plt.show()\n\n \n ''' plot audio and mfcc only '''\n f, (ax1,ax2) = plt.subplots(2,1)\n\n ax1.set_title('noised, shifted audio file; word YES') \n ax1.set_xlim([0,current_wav.shape[0]])\n ax1.set_ylim([1.1*min(current_wav),1.1*max(current_wav)])\n ax1.plot(current_wav)\n\n ax2.set_title('MFCC') \n ax2.set_xlim([0,mfcc.shape[2]])\n ax2.set_ylim([0,mfcc.shape[1]])\n ax2.matshow(mfcc[0], \n interpolation='nearest', \n aspect='auto', \n cmap=plt.get_cmap('Greys_r'),\n origin='lower')\n ax2.xaxis.set_ticks_position('bottom')\n \n plt.tight_layout()\n plt.show()\n\n\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\n '--input_wav',\n type=str,\n default='',\n help='.wav file to create spectrogram from.')\n \n parser.add_argument(\n '--clip_duration_ms',\n type=int,\n default=1000,\n help='.wav duration in ms.')\n \n parser.add_argument(\n '--sample_rate',\n type=int,\n default=16000,\n help='expected sample rate of wav files.')\n\n parser.add_argument(\n '--add_noise',\n type=int,\n default=1,\n help='whether to add noise.')\n\n parser.add_argument(\n '--noise_input_wav',\n type=str,\n default='data/_background_noise_/white_noise.wav',\n help='noise .wav files.')\n\n parser.add_argument(\n '--noise_scale_factor',\n type=float,\n default=0.1,\n help='coefficient to scale noise volume by.')\n \n parser.add_argument(\n '--time_shift',\n type=float,\n default=1000.0,\n help='range to randomly shift audio in time (ms).')\n \n parser.add_argument(\n '--scale_factor',\n type=float,\n default=1.0,\n help='coefficient to scale volume by.')\n \n parser.add_argument(\n '--window_size_ms',\n type=float,\n default=30.0,\n help=' --- ')\n \n parser.add_argument(\n '--window_stride_ms',\n type=float,\n default=10.0,\n help=' --- ')\n \n parser.add_argument(\n '--dct_coefficient_count',\n type=int,\n default=30,\n help='How many bins to use for the MFCC fingerprint')\n\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n\n", "repo_name": "Qkvad/SpeechRecognition", "sub_path": "tf/src/preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 7931, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "tensorflow.logging.set_verbosity", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.logging", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.io_ops.read_file", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.io_ops", "line_number": 41, "usage_type": "name"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops.decode_wav", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops", "line_number": 42, "usage_type": "name"}, {"api_name": "tensorflow.multiply", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.pad", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.slice", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.python.ops.io_ops.read_file", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.python.ops.io_ops", "line_number": 103, "usage_type": "name"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops.decode_wav", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops", "line_number": 104, "usage_type": "name"}, {"api_name": "tensorflow.multiply", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 134, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops.audio_spectrogram", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops", "line_number": 142, "usage_type": "name"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops.mfcc", "line_number": 157, "usage_type": "call"}, {"api_name": "tensorflow.contrib.framework.python.ops.audio_ops", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_cmap", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow.app.run", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 294, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 294, "usage_type": "attribute"}]} +{"seq_id": "23169170262", "text": "from fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import Session, joinedload\n\nimport schemas\nfrom auth import user_dependency\nfrom database import get_db\nfrom models import Author, Book\n\nrouter = APIRouter()\n\n# current_user: user_dependency,\n\n\ndef verify_author_id(db, author_id):\n db_author = db.query(Author).filter(Author.id == author_id).first()\n if not db_author:\n raise HTTPException(status_code=404, detail=\"Author not found\")\n\n\n@router.post(\"/\", response_model=schemas.BookWithID)\ndef add_book(current_user: user_dependency, book: schemas.BookUpsert, db: Session = Depends(get_db)):\n # Verify that the author exists\n verify_author_id(db, book.author_id)\n\n db_book = Book(name=book.name, author_id=book.author_id, num_pages=book.num_pages)\n db.add(db_book)\n db.commit()\n return db_book\n\n\n@router.delete(\"/{book_id}\", response_model=schemas.BookWithID)\ndef delete_book(current_user: user_dependency, book_id: int, db: Session = Depends(get_db)):\n db_book = db.query(Book).options(joinedload(Book.author)).filter(Book.id == book_id).first()\n if db_book is None:\n raise HTTPException(status_code=404, detail=\"Book not found\")\n\n db.delete(db_book)\n db.commit()\n\n return db_book\n\n\n@router.get(\"/\", response_model=schemas.PaginationResponse[schemas.Book])\ndef get_books(current_user: user_dependency, skip: int = 0, limit: int = 10, db: Session = Depends(get_db)):\n total_count = db.query(Book).count()\n books = db.query(Book, Author).join(Author).order_by(Book.id.desc()).offset(skip).limit(limit).all()\n books_with_authors = [\n {\"id\": book.id, \"name\": book.name, \"author\": author, \"num_pages\": book.num_pages} for book, author in books\n ]\n\n current_page = (skip // limit) + 1\n total_pages = (total_count - 1) // limit + 1\n has_next_page = skip + limit < total_count\n has_prev_page = skip > 0\n\n return schemas.PaginationResponse[schemas.Book](\n total_count=total_count,\n current_page=current_page,\n total_pages=total_pages,\n has_next_page=has_next_page,\n has_prev_page=has_prev_page,\n data=books_with_authors,\n )\n\n\n@router.get(\"/{book_id}\", response_model=schemas.Book)\ndef get_one_book(current_user: user_dependency, book_id: int, db: Session = Depends(get_db)):\n result = db.query(Book, Author).join(Author).filter(Book.id == book_id).first()\n\n if not result:\n raise HTTPException(status_code=404, detail=\"Book not found\")\n\n book, author = result\n\n return {\n \"id\": book.id,\n \"name\": book.name,\n \"author\": author,\n \"num_pages\": book.num_pages,\n }\n\n\n@router.patch(\"/{book_id}\", response_model=schemas.BookWithID)\ndef update_book(current_user: user_dependency, book_id: int, book: schemas.BookUpsert, db: Session = Depends(get_db)):\n db_book = db.query(Book).filter(Book.id == book_id).first()\n if not db_book:\n raise HTTPException(status_code=404, detail=\"Book not found\")\n\n # Verify that the author exists\n # if \"author_id\" in book.dict():\n # verify_author_id(db, book.author_id)\n\n book_data = book.dict(exclude_unset=True)\n for key, value in book_data.items():\n setattr(db_book, key, value)\n\n db.commit()\n db.refresh(db_book)\n return db_book\n\n\n@router.get(\"/search/\", response_model=list[schemas.Book])\ndef search_books(current_user: user_dependency, query: str, db: Session = Depends(get_db)):\n books = (\n db.query(Book, Author)\n .filter(\n or_(Book.name.ilike(f\"%{query}%\"), Author.name.ilike(f\"%{query}%\")),\n )\n .join(Book.author)\n .all()\n )\n\n results = []\n for book, author in books:\n result = {\n \"id\": book.id,\n \"name\": book.name,\n \"author\": {\n \"id\": author.id,\n \"name\": author.name,\n },\n \"num_pages\": book.num_pages,\n }\n results.append(result)\n\n return results\n\n\n@router.get(\"/{author_id}\", response_model=schemas.PaginationResponse[schemas.Book])\ndef get_books_by_author(\n current_user: user_dependency,\n author_id: int,\n skip: int = 0,\n limit: int = 10,\n db: Session = Depends(get_db),\n):\n total_count = db.query(Book).count()\n books = db.query(Book, Author).join(Author).offset(skip).limit(limit).filter(Author.id == author_id)\n books_with_authors = [{\"id\": book.id, \"name\": book.name, \"author\": author} for book, author in books]\n\n current_page = (skip // limit) + 1\n total_pages = (total_count - 1) // limit + 1\n has_next_page = skip + limit < total_count\n has_prev_page = skip > 0\n\n return schemas.PaginationResponse[schemas.Book](\n total_count=total_count,\n current_page=current_page,\n total_pages=total_pages,\n has_next_page=has_next_page,\n has_prev_page=has_prev_page,\n data=books_with_authors,\n )\n", "repo_name": "mrhassaan012/developer-full-stack-challenge", "sub_path": "src/api/routers/books.py", "file_name": "books.py", "file_ext": "py", "file_size_in_byte": 4948, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Author", "line_number": 16, "usage_type": "argument"}, {"api_name": "models.Author.id", "line_number": 16, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 18, "usage_type": "call"}, {"api_name": "auth.user_dependency", "line_number": 22, "usage_type": "name"}, {"api_name": "schemas.BookUpsert", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 22, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 22, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 26, "usage_type": "call"}, {"api_name": "schemas.BookWithID", "line_number": 21, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 33, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 33, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 33, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 34, "usage_type": "argument"}, {"api_name": "sqlalchemy.orm.joinedload", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Book.author", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Book.id", "line_number": 34, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 36, "usage_type": "call"}, {"api_name": "schemas.BookWithID", "line_number": 32, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 45, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 45, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 45, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 46, "usage_type": "argument"}, {"api_name": "models.Author", "line_number": 47, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 47, "usage_type": "argument"}, {"api_name": "models.Book.id.desc", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Book.id", "line_number": 47, "usage_type": "attribute"}, {"api_name": "schemas.PaginationResponse", "line_number": 57, "usage_type": "attribute"}, {"api_name": "schemas.Book", "line_number": 57, "usage_type": "attribute"}, {"api_name": "schemas.PaginationResponse", "line_number": 44, "usage_type": "attribute"}, {"api_name": "schemas.Book", "line_number": 44, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 68, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 68, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 68, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 68, "usage_type": "argument"}, {"api_name": "models.Author", "line_number": 69, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 69, "usage_type": "argument"}, {"api_name": "models.Book.id", "line_number": 69, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 72, "usage_type": "call"}, {"api_name": "schemas.Book", "line_number": 67, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 85, "usage_type": "name"}, {"api_name": "schemas.BookUpsert", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 85, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 85, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 85, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 86, "usage_type": "argument"}, {"api_name": "models.Book.id", "line_number": 86, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 88, "usage_type": "call"}, {"api_name": "schemas.BookWithID", "line_number": 84, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 104, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 104, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 104, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 104, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 106, "usage_type": "argument"}, {"api_name": "models.Author", "line_number": 106, "usage_type": "argument"}, {"api_name": "sqlalchemy.or_", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Book.name.ilike", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Book.name", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 108, "usage_type": "name"}, {"api_name": "models.Author.name.ilike", "line_number": 108, "usage_type": "call"}, {"api_name": "models.Author.name", "line_number": 108, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 108, "usage_type": "name"}, {"api_name": "models.Book.author", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 110, "usage_type": "name"}, {"api_name": "schemas.Book", "line_number": 103, "usage_type": "attribute"}, {"api_name": "auth.user_dependency", "line_number": 132, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 136, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 136, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 136, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 138, "usage_type": "argument"}, {"api_name": "models.Author", "line_number": 139, "usage_type": "argument"}, {"api_name": "models.Book", "line_number": 139, "usage_type": "argument"}, {"api_name": "models.Author.id", "line_number": 139, "usage_type": "attribute"}, {"api_name": "schemas.PaginationResponse", "line_number": 147, "usage_type": "attribute"}, {"api_name": "schemas.Book", "line_number": 147, "usage_type": "attribute"}, {"api_name": "schemas.PaginationResponse", "line_number": 130, "usage_type": "attribute"}, {"api_name": "schemas.Book", "line_number": 130, "usage_type": "attribute"}]} +{"seq_id": "72792818725", "text": "from collections import Counter\n\n\ndef high_card(hand):\n highest = 0\n for i in hand:\n if i == 'A':\n return 14\n elif i == 'K':\n if 13 >= highest:\n highest = 13\n elif i == 'Q':\n if 12 >= highest:\n highest = 12\n elif i == 'J':\n if 11 >= highest:\n highest = 11\n elif i == 'T':\n if 10 >= highest:\n highest = 10\n elif int(i) > highest:\n highest = int(i)\n return highest\n\n\ndef convert_to_lists(s):\n cards = s.split(' ')\n card_value = []\n card_type = []\n for i in cards:\n card_value.append(i[0])\n card_type.append(i[1])\n return [card_value, card_type]\n\n\ndef number_of_pairs(l):\n count = Counter(l) - Counter(set(l))\n pairs = 0\n for i in count:\n if count[i] == 1:\n pairs += 1\n if pairs:\n return pairs + 1\n return 0\n\n\ndef three_of_a_kind(l):\n count = Counter(l) - Counter(set(l))\n for i in count:\n if count[i] == 2:\n return 4\n return 0\n\n\ndef straight(l):\n rating = {14: 'A', 13: 'K', 12: 'Q', 11: 'J', 10: 'T', 9: '9',\n 8: '8', 7: '7', 6: '6', 5: '5', 4: '4', 3: '3', 2: '2', 1: '1'}\n hv = high_card(l)\n if rating[hv-1] in l:\n if rating[hv-2] in l:\n if rating[hv-3] in l:\n if rating[hv-4] in l:\n return 5\n return 0\n\n\ndef flush(l):\n if len(set(l)) == 1:\n return 6\n return 0\n\n\ndef full_house(l):\n if three_of_a_kind(l):\n if number_of_pairs(l) == 2:\n return 7\n return 0\n\n\ndef four_of_a_kind(l):\n dup = Counter(l) - Counter(set(l))\n for i in dup:\n if dup[i] == 3:\n return 8\n return 0\n\n\ndef straight_flush(l, v):\n if straight(l) and flush(v):\n return 9\n return 0\n\n\ndef royal_flush(l, v):\n if set(['T', 'J', 'Q', 'K', 'A']) == set(l):\n if len(set(v)) == 1:\n return 10\n return 0\n\n\ndef paired_number(l):\n repeated = (Counter(l) - Counter(set(l))).keys()\n rating = {'A': 14, 'K': 13, 'Q': 12, 'J': 11, 'T': 10, '9': 9,\n '8': 8, '7': 7, '6': 6, '5': 5, '4': 4, '3': 3, '2': 2, '1': 1}\n highest = 0\n for i in repeated:\n if rating[i] > highest:\n highest = rating[i]\n return highest\n\nf = open('poker.txt')\n\ngames = f.read()\nf.close()\ntwo_hands = games.strip().split('\\n')\nhands = []\nfor i in two_hands:\n fh = convert_to_lists(i[:14])\n sh = convert_to_lists(i[15:])\n hands.append([fh, sh])\n\nplayer1 = 0\n\nfor i in hands:\n p1v = i[0][0]\n p2v = i[1][0]\n p1s = i[0][1]\n p2s = i[1][1]\n p1 = 0\n p2 = 0\n flag = False\n if number_of_pairs(p1v):\n p1 = number_of_pairs(p1v)\n flag = True\n if three_of_a_kind(p1v):\n p1 = three_of_a_kind(p1v)\n flag = True\n if straight(p1v):\n p1 = straight(p1v)\n if flush(p1s):\n p1 = flush(p1s)\n if full_house(p1v):\n p1 = full_house(p1v)\n flag = True\n if four_of_a_kind(p1v):\n p1 = four_of_a_kind(p1v)\n flag = True\n if straight_flush(p1v, p1s):\n p1 = straight_flush(p1v, p1s)\n if royal_flush(p1v, p1s):\n p1 = royal_flush(p1v, p1s)\n\n if number_of_pairs(p2v):\n p2 = number_of_pairs(p2v)\n flag = True\n if three_of_a_kind(p2v):\n p2 = three_of_a_kind(p2v)\n flag = True\n if straight(p2v):\n p2 = straight(p2v)\n if flush(p2s):\n p2 = flush(p2s)\n if full_house(p2v):\n p2 = full_house(p2v)\n flag = True\n if four_of_a_kind(p2v):\n p2 = four_of_a_kind(p2v)\n flg = True\n if straight_flush(p2v, p2s):\n p2 = straight_flush(p2v, p2s)\n if royal_flush(p2v, p2s):\n p2 = royal_flush(p2v, p2s)\n\n if p1 > p2:\n player1 += 1\n elif p1 == p2:\n if flag:\n if paired_number(i[0][0]) > paired_number(i[1][0]):\n player1 += 1\n elif paired_number(i[0][0]) == paired_number(i[1][0]):\n if high_card(i[0][0]) > high_card(i[1][0]):\n player1 += 1\n else:\n if high_card(i[0][0]) > high_card(i[1][0]):\n player1 += 1\n\nprint( player1)", "repo_name": "Krish-bhardwaj/PROJECT_EULER", "sub_path": "P54.py", "file_name": "P54.py", "file_ext": "py", "file_size_in_byte": 4250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 48, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 81, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "73281367194", "text": "# -*- coding: utf-8 -*-\nimport xbmc\nimport xbmcaddon\nimport xbmcgui\nimport xbmcvfs\nimport json\nimport os\nimport sys\nimport requests.cookies\nimport requests.adapters\nimport requests\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom resources.lib import xml_structure\nfrom resources.lib import channel_selector\nfrom resources.lib import mapper\nfrom resources.lib import filesplit\n\nprovider = 'MAGENTA TV (DE)'\nlang = 'de'\n\nADDON = xbmcaddon.Addon(id=\"service.takealug.epg-grabber\")\naddon_name = ADDON.getAddonInfo('name')\naddon_version = ADDON.getAddonInfo('version')\nloc = ADDON.getLocalizedString\ndatapath = xbmcvfs.translatePath(ADDON.getAddonInfo('profile'))\ntemppath = os.path.join(datapath, \"temp\")\nprovider_temppath = os.path.join(temppath, \"magentaDE\")\n\n## Enable Multithread\nenable_multithread = True if ADDON.getSetting('enable_multithread').upper() == 'TRUE' else False\nif enable_multithread:\n try:\n from multiprocessing import Process\n except:\n pass\n\n## MAPPING Variables Thx @ sunsettrack4\ntkm_genres_url = 'https://raw.githubusercontent.com/sunsettrack4/config_files/master/tkm_genres.json'\ntkm_genres_json = os.path.join(provider_temppath, 'tkm_genres.json')\ntkm_channels_url = 'https://raw.githubusercontent.com/sunsettrack4/config_files/master/tkm_channels.json'\ntkm_channels_json = os.path.join(provider_temppath, 'tkm_channels.json')\n\n## Log Files\nmagentaDE_genres_warnings_tmp = os.path.join(provider_temppath, 'magentaDE_genres_warnings.txt')\nmagentaDE_genres_warnings = os.path.join(temppath, 'magentaDE_genres_warnings.txt')\nmagentaDE_channels_warnings_tmp = os.path.join(provider_temppath, 'magentaDE_channels_warnings.txt')\nmagentaDE_channels_warnings = os.path.join(temppath, 'magentaDE_channels_warnings.txt')\n\n## Read Magenta DE Settings\ndays_to_grab = int(ADDON.getSetting('magentaDE_days_to_grab'))\nepisode_format = ADDON.getSetting('magentaDE_episode_format')\nchannel_format = ADDON.getSetting('magentaDE_channel_format')\ngenre_format = ADDON.getSetting('magentaDE_genre_format')\n\n\n# Make a debug logger\ndef log(message, loglevel=xbmc.LOGDEBUG):\n xbmc.log('[{} {}] {}'.format(addon_name, addon_version, message), loglevel)\n\n\n# Make OSD Notify Messages\nOSD = xbmcgui.Dialog()\n\n\ndef notify(title, message, icon=xbmcgui.NOTIFICATION_INFO):\n OSD.notification(title, message, icon)\n\ndef get_epgLength(days_to_grab):\n # Calculate Date and Time\n today = datetime.today()\n calc_today = datetime(today.year, today.month, today.day, hour=00, minute=00, second=1)\n\n calc_then = datetime(today.year, today.month, today.day, hour=23, minute=59, second=59)\n calc_then += timedelta(days=days_to_grab)\n\n starttime = calc_today.strftime(\"%Y%m%d%H%M%S\")\n endtime = calc_then.strftime(\"%Y%m%d%H%M%S\")\n\n return starttime, endtime\n\n## Channel Files\nmagentaDE_chlist_provider_tmp = os.path.join(provider_temppath, 'chlist_magentaDE_provider_tmp.json')\nmagentaDE_chlist_provider = os.path.join(provider_temppath, 'chlist_magentaDE_provider.json')\nmagentaDE_chlist_selected = os.path.join(datapath, 'chlist_magentaDE_selected.json')\n\nmagentaDE_login_url = 'https://api.prod.sngtv.magentatv.de/EPG/JSON/Login?&T=PC_firefox_75'\nmagentaDE_authenticate_url = 'https://api.prod.sngtv.magentatv.de/EPG/JSON/Authenticate?SID=firstup&T=PC_firefox_75'\nmagentaDE_channellist_url = 'https://api.prod.sngtv.magentatv.de/EPG/JSON/AllChannel?SID=first&T=PC_firefox_75'\nmagentaDE_data_url = 'https://api.prod.sngtv.magentatv.de/EPG/JSON/PlayBillList?userContentFilter=241221015&sessionArea=1&SID=ottall&T=PC_firefox_75'\n\nmagentaDE_login = {'userId': 'Guest', 'mac': '00:00:00:00:00:00'}\nmagentaDE_authenticate = {'terminalid': '00:00:00:00:00:00', 'mac': '00:00:00:00:00:00', 'terminaltype': 'WEBTV','utcEnable': '1', 'timezone': 'UTC', 'userType': '3', 'terminalvendor': 'Unknown','preSharedKeyID': 'PC01P00002', 'cnonce': '5c6ff0b9e4e5efb1498e7eaa8f54d9fb'}\nmagentaDE_get_chlist = {'properties': [{'name': 'logicalChannel','include': '/channellist/logicalChannel/contentId,/channellist/logicalChannel/name,/channellist/logicalChannel/pictures/picture/imageType,/channellist/logicalChannel/pictures/picture/href'}],'metaDataVer': 'Channel/1.1', 'channelNamespace': '2','filterlist': [{'key': 'IsHide', 'value': '-1'}], 'returnSatChannel': '0'}\nmagentaDE_header = {'Host': 'api.prod.sngtv.magentatv.de',\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:75.0) Gecko/20100101 Firefox/75.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'de,en-US;q=0.7,en;q=0.3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1'}\nmagentaDE_session_cookie = os.path.join(provider_temppath, 'cookies.json')\n\n\n## Login and Authenticate to web.magenta.tv\ndef magentaDE_session():\n session = requests.Session()\n session.post(magentaDE_login_url, data=json.dumps(magentaDE_login), headers=magentaDE_header)\n session.post(magentaDE_authenticate_url, data=json.dumps(magentaDE_authenticate), headers=magentaDE_header)\n ## Save Cookies to Disk\n with open(magentaDE_session_cookie, 'w', encoding='utf-8') as f:\n json.dump(requests.utils.dict_from_cookiejar(session.cookies), f)\n\n\n## Get channel list(url)\ndef get_channellist():\n magentaDE_session()\n session = requests.Session()\n ## Load Cookies from Disk\n with open(magentaDE_session_cookie, 'r', encoding='utf-8') as f:\n session.cookies = requests.utils.cookiejar_from_dict(json.load(f))\n\n magenta_CSRFToken = session.cookies[\"CSRFSESSION\"]\n session.headers.update({'X_CSRFToken': magenta_CSRFToken})\n magenta_chlist_url = session.post(magentaDE_channellist_url, data=json.dumps(magentaDE_get_chlist),headers=magentaDE_header)\n magenta_chlist_url.raise_for_status()\n response = magenta_chlist_url.json()\n\n with open(magentaDE_chlist_provider_tmp, 'w', encoding='utf-8') as provider_list_tmp:\n json.dump(response, provider_list_tmp)\n\n #### Transform magentaDE_chlist_provider_tmp to Standard chlist Format as magentaDE_chlist_provider\n\n # Load Channellist from Provider\n with open(magentaDE_chlist_provider_tmp, 'r', encoding='utf-8') as provider_list_tmp:\n magentaDE_channels = json.load(provider_list_tmp)\n\n # Create empty new hznDE_chlist_provider\n with open(magentaDE_chlist_provider, 'w', encoding='utf-8') as provider_list:\n provider_list.write(json.dumps({\"channellist\": []}))\n\n ch_title = ''\n\n # Load New Channellist from Provider\n with open(magentaDE_chlist_provider, encoding='utf-8') as provider_list:\n data = json.load(provider_list)\n\n temp = data['channellist']\n\n for channels in magentaDE_channels['channellist']:\n ch_id = channels['contentId']\n ch_title = channels['name']\n for image in channels['pictures']:\n if image['imageType'] == '15':\n hdimage = image['href']\n # channel to be appended\n y = {\"contentId\": ch_id,\n \"name\": ch_title,\n \"pictures\": [{\"href\": hdimage}]}\n\n # appending channels to data['channellist']\n temp.append(y)\n\n #Save New Channellist from Provider\n with open(magentaDE_chlist_provider, 'w', encoding='utf-8') as provider_list:\n json.dump(data, provider_list, indent=4)\n\ndef select_channels():\n ## Create Provider Temppath if not exist\n if not os.path.exists(provider_temppath):\n os.makedirs(provider_temppath)\n\n ## Create empty (Selected) Channel List if not exist\n if not os.path.isfile(magentaDE_chlist_selected):\n with open((magentaDE_chlist_selected), 'w', encoding='utf-8') as selected_list:\n selected_list.write(json.dumps({\"channellist\": []}))\n\n ## Download chlist_magenta_provider.json\n get_channellist()\n dialog = xbmcgui.Dialog()\n\n with open(magentaDE_chlist_provider, 'r', encoding='utf-8') as o:\n provider_list = json.load(o)\n\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as s:\n selected_list = json.load(s)\n\n ## Start Channel Selector\n user_select = channel_selector.select_channels(provider, provider_list, selected_list)\n\n if user_select is not None:\n with open(magentaDE_chlist_selected, 'w', encoding='utf-8') as f:\n json.dump(user_select, f, indent=4)\n if os.path.isfile(magentaDE_chlist_selected):\n valid = check_selected_list()\n if valid is True:\n ok = dialog.ok(provider, loc(32402))\n if ok:\n log(loc(32402), xbmc.LOGINFO)\n elif valid is False:\n log(loc(32403), xbmc.LOGINFO)\n yn = OSD.yesno(provider, loc(32403))\n if yn:\n select_channels()\n else:\n xbmcvfs.delete(magentaDE_chlist_selected)\n exit()\n else:\n valid = check_selected_list()\n if valid is True:\n ok = dialog.ok(provider, loc(32404))\n if ok:\n log(loc(32404), xbmc.LOGINFO)\n elif valid is False:\n log(loc(32403), xbmc.LOGINFO)\n yn = OSD.yesno(provider, loc(32403))\n if yn:\n select_channels()\n else:\n xbmcvfs.delete(magentaDE_chlist_selected)\n exit()\n\ndef check_selected_list():\n check = 'invalid'\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as c:\n selected_list = json.load(c)\n for user_list in selected_list['channellist']:\n if 'contentId' in user_list:\n check = 'valid'\n if check == 'valid':\n return True\n else:\n return False\n\ndef download_multithread(thread_temppath, download_threads):\n # delete old broadcast files if exist\n for f in os.listdir(provider_temppath):\n if f.endswith('_broadcast.json'):\n xbmcvfs.delete(os.path.join(provider_temppath, f))\n\n magentaDE_session()\n list = os.path.join(provider_temppath, 'list.txt')\n splitname = os.path.join(thread_temppath, 'chlist_magentaDE_selected')\n starttime, endtime = get_epgLength(days_to_grab)\n\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as s:\n selected_list = json.load(s)\n if filesplit.split_chlist_selected(thread_temppath, magentaDE_chlist_selected, splitname, download_threads, enable_multithread):\n multi = True\n needed_threads = sum([len(files) for r, d, files in os.walk(thread_temppath)])\n items_to_download = str(len(selected_list['channellist']))\n log('{} {} {} '.format(provider, items_to_download, loc(32361)), xbmc.LOGINFO)\n pDialog = xbmcgui.DialogProgressBG()\n log('{} Multithread({}) Mode'.format(provider, needed_threads), xbmc.LOGINFO)\n pDialog.create('{} {} '.format(loc(32500), provider), '{} {}'.format('100', loc(32501)))\n\n jobs = []\n for thread in range(0, int(needed_threads)):\n p = Process(target=download_thread, args=('{}_{}.json'.format(splitname, int(thread)), multi, list, starttime, endtime, ))\n jobs.append(p)\n p.start()\n for j in jobs:\n while j.is_alive():\n xbmc.sleep(100)\n try:\n last_line = ''\n with open(list, 'r', encoding='utf-8') as f:\n last_line = f.readlines()[-1]\n except:\n pass\n items = sum(1 for f in os.listdir(provider_temppath) if f.endswith('_broadcast.json'))\n percent_remain = int(100) - int(items) * int(100) / int(items_to_download)\n percent_completed = int(100) * int(items) / int(items_to_download)\n pDialog.update(int(percent_completed), '{} {} '.format(loc(32500), last_line), '{} {} {}'.format(int(percent_remain), loc(32501), provider))\n if int(items) == int(items_to_download):\n log('{} {}'.format(provider, loc(32363)), xbmc.LOGINFO)\n break\n j.join()\n pDialog.close()\n for file in os.listdir(thread_temppath): xbmcvfs.delete(os.path.join(thread_temppath, file))\n\n else:\n multi = False\n log('{} {} '.format(provider, 'Can`t download in Multithreading mode, loading single...'), xbmc.LOGINFO)\n download_thread(magentaDE_chlist_selected, multi, list, starttime, endtime)\n\ndef download_thread(magentaDE_chlist_selected, multi, list, starttime, endtime):\n requests.adapters.DEFAULT_RETRIES = 5\n session = requests.Session()\n\n ## Load Cookies from Disk\n with open(magentaDE_session_cookie, 'r', encoding='utf-8') as f:\n session.cookies = requests.utils.cookiejar_from_dict(json.load(f))\n magenta_CSRFToken = session.cookies[\"CSRFSESSION\"]\n session.headers.update({'X_CSRFToken': magenta_CSRFToken})\n\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as s:\n selected_list = json.load(s)\n\n if not multi:\n items_to_download = str(len(selected_list['channellist']))\n log('{} {} {} '.format(provider, items_to_download, loc(32361)), xbmc.LOGINFO)\n pDialog = xbmcgui.DialogProgressBG()\n pDialog.create('{} {} '.format(loc(32500), provider), '{} {}'.format('100', loc(32501)))\n\n for user_item in selected_list['channellist']:\n contentID = user_item['contentId']\n channel_name = user_item['name']\n magentaDE_data = {'channelid': contentID, 'type': '2', 'offset': '0', 'count': '-1', 'isFillProgram': '1','properties': '[{\"name\":\"playbill\",\"include\":\"ratingForeignsn,id,channelid,name,subName,starttime,endtime,cast,casts,country,producedate,ratingid,pictures,type,introduce,foreignsn,seriesID,genres,subNum,seasonNum\"}]','endtime': endtime, 'begintime': starttime}\n response = session.post(magentaDE_data_url, data=json.dumps(magentaDE_data), headers=magentaDE_header)\n response.raise_for_status()\n tkm_data = response.json()\n broadcast_files = os.path.join(provider_temppath, '{}_broadcast.json'.format(contentID))\n with open(broadcast_files, 'w', encoding='utf-8') as playbill:\n json.dump(tkm_data, playbill)\n\n ## Create a List with downloaded channels\n last_channel_name = '{}\\n'.format(channel_name)\n with open(list, 'a', encoding='utf-8') as f:\n f.write(last_channel_name)\n\n if not multi:\n items = sum(1 for f in os.listdir(provider_temppath) if f.endswith('_broadcast.json'))\n percent_remain = int(100) - int(items) * int(100) / int(items_to_download)\n percent_completed = int(100) * int(items) / int(items_to_download)\n pDialog.update(int(percent_completed), '{} {} '.format(loc(32500), channel_name), '{} {} {}'.format(int(percent_remain), loc(32501), provider))\n if int(items) == int(items_to_download):\n log('{} {}'.format(provider, loc(32363)), xbmc.LOGINFO)\n break\n if not multi:\n pDialog.close()\n\n\ndef create_xml_channels():\n log('{} {}'.format(provider,loc(32362)), xbmc.LOGINFO)\n if channel_format == 'rytec':\n ## Save tkm_channels.json to Disk\n tkm_channels_response = requests.get(tkm_channels_url).json()\n with open(tkm_channels_json, 'w', encoding='utf-8') as tkm_channels:\n json.dump(tkm_channels_response, tkm_channels)\n\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as c:\n selected_list = json.load(c)\n\n items_to_download = str(len(selected_list['channellist']))\n items = 0\n pDialog = xbmcgui.DialogProgressBG()\n pDialog.create('{} {} '.format(loc(32502),provider), '{} {}'.format('100',loc(32501)))\n\n ## Create XML Channels Provider information\n xml_structure.xml_channels_start(provider)\n\n for user_item in selected_list['channellist']:\n items += 1\n percent_remain = int(100) - int(items) * int(100) / int(items_to_download)\n percent_completed = int(100) * int(items) / int(items_to_download)\n channel_name = user_item['name']\n channel_icon = user_item['pictures'][0]['href']\n channel_id = channel_name\n pDialog.update(int(percent_completed), '{} {} '.format(loc(32502),channel_name),'{} {} {}'.format(int(percent_remain),loc(32501),provider))\n if str(percent_completed) == str(100):\n log('{} {}'.format(provider,loc(32364)), xbmc.LOGINFO)\n\n ## Map Channels\n if not channel_id == '':\n channel_id = mapper.map_channels(channel_id, channel_format, tkm_channels_json, magentaDE_channels_warnings_tmp, lang)\n\n ## Create XML Channel Information with provided Variables\n xml_structure.xml_channels(channel_name, channel_id, channel_icon, lang)\n pDialog.close()\n\n\ndef create_xml_broadcast(enable_rating_mapper, thread_temppath, download_threads):\n\n download_multithread(thread_temppath, download_threads)\n log('{} {}'.format(provider, loc(32365)), xbmc.LOGINFO)\n\n if genre_format == 'eit':\n ## Save tkm_genres.json to Disk\n tkm_genres_response = requests.get(tkm_genres_url).json()\n with open(tkm_genres_json, 'w', encoding='utf-8') as tkm_genres:\n json.dump(tkm_genres_response, tkm_genres)\n\n with open(magentaDE_chlist_selected, 'r', encoding='utf-8') as c:\n selected_list = json.load(c)\n\n items_to_download = str(len(selected_list['channellist']))\n items = 0\n pDialog = xbmcgui.DialogProgressBG()\n pDialog.create('{} {} '.format(loc(32503), provider), '{} Prozent verbleibend'.format('100'))\n\n ## Create XML Broadcast Provider information\n xml_structure.xml_broadcast_start(provider)\n\n for user_item in selected_list['channellist']:\n items += 1\n percent_remain = int(100) - int(items) * int(100) / int(items_to_download)\n percent_completed = int(100) * int(items) / int(items_to_download)\n contentID = user_item['contentId']\n channel_name = user_item['name']\n channel_id = channel_name\n pDialog.update(int(percent_completed), '{} {} '.format(loc(32503), channel_name), '{} {} {}'.format(int(percent_remain), loc(32501), provider))\n if str(percent_completed) == str(100):\n log('{} {}'.format(provider, loc(32366)), xbmc.LOGINFO)\n\n broadcast_files = os.path.join(provider_temppath, '{}_broadcast.json'.format(contentID))\n with open(broadcast_files, 'r', encoding='utf-8') as b:\n broadcastfiles = json.load(b)\n\n ### Map Channels\n if not channel_id == '':\n channel_id = mapper.map_channels(channel_id, channel_format, tkm_channels_json, magentaDE_channels_warnings_tmp, lang)\n\n try:\n for playbilllist in broadcastfiles['playbilllist']:\n try:\n item_title = playbilllist['name']\n except (KeyError, IndexError):\n item_title = ''\n try:\n item_starttime = playbilllist['starttime']\n except (KeyError, IndexError):\n item_starttime = ''\n try:\n item_endtime = playbilllist['endtime']\n except (KeyError, IndexError):\n item_endtime = ''\n try:\n item_description = playbilllist['introduce']\n except (KeyError, IndexError):\n item_description = ''\n try:\n item_country = playbilllist['country']\n except (KeyError, IndexError):\n item_country = ''\n try:\n item_picture = playbilllist['pictures'][1]['href']\n except (KeyError, IndexError):\n item_picture = ''\n try:\n item_subtitle = playbilllist['subName']\n except (KeyError, IndexError):\n item_subtitle = ''\n try:\n items_genre = playbilllist['genres']\n except (KeyError, IndexError):\n items_genre = ''\n try:\n item_date = playbilllist['producedate']\n except (KeyError, IndexError):\n item_date = ''\n try:\n item_season = playbilllist['seasonNum']\n except (KeyError, IndexError):\n item_season = ''\n try:\n item_episode = playbilllist['subNum']\n except (KeyError, IndexError):\n item_episode = ''\n try:\n item_agerating = playbilllist['ratingid']\n except (KeyError, IndexError):\n item_agerating = ''\n try:\n items_director = playbilllist['cast']['director']\n except (KeyError, IndexError):\n items_director = ''\n try:\n items_producer = playbilllist['cast']['producer']\n except (KeyError, IndexError):\n items_producer = ''\n try:\n items_actor = playbilllist['cast']['actor']\n except (KeyError, IndexError):\n items_actor = ''\n\n # Transform items to Readable XML Format\n item_starrating = ''\n if not item_date == '':\n item_date = item_date.split('-')\n item_date = item_date[0]\n if (not item_starttime == '' and not item_endtime == ''):\n start = item_starttime.split(' UTC')\n item_starttime = start[0].replace(' ', '').replace('-', '').replace(':', '')\n stop = item_endtime.split(' UTC')\n item_endtime = stop[0].replace(' ', '').replace('-', '').replace(':', '')\n if not item_country == '':\n item_country = item_country.upper()\n if item_agerating == '-1':\n item_agerating = ''\n\n # Map Genres\n if not items_genre == '':\n items_genre = mapper.map_genres(items_genre, genre_format, tkm_genres_json, magentaDE_genres_warnings_tmp, lang)\n\n ## Create XML Broadcast Information with provided Variables\n xml_structure.xml_broadcast(episode_format, channel_id, item_title, item_starttime, item_endtime,\n item_description, item_country, item_picture, item_subtitle, items_genre,\n item_date, item_season, item_episode, item_agerating, item_starrating, items_director,\n items_producer, items_actor, enable_rating_mapper, lang)\n\n except (KeyError, IndexError):\n log('{} {} {} {} {} {}'.format(provider,loc(32367),channel_name,loc(32368),contentID,loc(32369)))\n pDialog.close()\n\n ## Create Channel Warnings Textile\n channel_pull = '\\nPlease Create an Pull Request for Missing Rytec Id´s to https://github.com/sunsettrack4/config_files/blob/master/tkm_channels.json\\n'\n mapper.create_channel_warnings(magentaDE_channels_warnings_tmp, magentaDE_channels_warnings, provider, channel_pull)\n\n ## Create Genre Warnings Textfile\n genre_pull = '\\nPlease Create an Pull Request for Missing EIT Genres to https://github.com/sunsettrack4/config_files/blob/master/tkm_genres.json\\n'\n mapper.create_genre_warnings(magentaDE_genres_warnings_tmp, magentaDE_genres_warnings, provider, genre_pull)\n\n notify(addon_name, '{} {} {}'.format(loc(32370),provider,loc(32371)), icon=xbmcgui.NOTIFICATION_INFO)\n log('{} {} {}'.format(loc(32370),provider,loc(32371), xbmc.LOGINFO))\n xbmc.sleep(4000)\n\n if (os.path.isfile(magentaDE_channels_warnings) or os.path.isfile(magentaDE_genres_warnings)):\n notify(provider, '{}'.format(loc(32372)), icon=xbmcgui.NOTIFICATION_WARNING)\n xbmc.sleep(3000)\n\n ## Delete old Tempfiles, not needed any more\n for file in os.listdir(provider_temppath): xbmcvfs.delete(os.path.join(provider_temppath, file))\n\n\ndef check_provider():\n ## Create Provider Temppath if not exist\n if not os.path.exists(provider_temppath):\n os.makedirs(provider_temppath)\n\n ## Create empty (Selected) Channel List if not exist\n if not os.path.isfile(magentaDE_chlist_selected):\n with open((magentaDE_chlist_selected), 'w', encoding='utf-8') as selected_list:\n selected_list.write(json.dumps({\"channellist\": []}))\n\n ## If no Channellist exist, ask to create one\n yn = OSD.yesno(provider, loc(32405))\n if yn:\n select_channels()\n else:\n xbmcvfs.delete(magentaDE_chlist_selected)\n return False\n\n ## If a Selected list exist, check valid\n valid = check_selected_list()\n if valid is False:\n yn = OSD.yesno(provider, loc(32405))\n if yn:\n select_channels()\n else:\n xbmcvfs.delete(magentaDE_chlist_selected)\n return False\n return True\n\ndef startup():\n if check_provider():\n get_channellist()\n return True\n else:\n return False\n\n# Channel Selector\ntry:\n if sys.argv[1] == 'select_channels_magentaDE':\n select_channels()\nexcept IndexError:\n pass", "repo_name": "DeBaschdi/service.takealug.epg-grabber", "sub_path": "resources/providers/magenta_DE.py", "file_name": "magenta_DE.py", "file_ext": "py", "file_size_in_byte": 25575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "50", "api": [{"api_name": "xbmcaddon.Addon", "line_number": 22, "usage_type": "call"}, {"api_name": "xbmcvfs.translatePath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "xbmc.LOGDEBUG", "line_number": 58, "usage_type": "attribute"}, {"api_name": "xbmc.log", "line_number": 59, "usage_type": "call"}, {"api_name": "xbmcgui.Dialog", "line_number": 63, "usage_type": "call"}, {"api_name": "xbmcgui.NOTIFICATION_INFO", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 107, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 109, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 112, "usage_type": "call"}, {"api_name": "requests.utils.dict_from_cookiejar", "line_number": 112, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 112, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 118, "usage_type": "call"}, {"api_name": "requests.utils.cookiejar_from_dict", "line_number": 121, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 121, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 121, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 125, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "json.load", "line_number": 136, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 140, "usage_type": "call"}, {"api_name": "json.load", "line_number": 146, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 176, "usage_type": "call"}, {"api_name": "xbmcgui.Dialog", "line_number": 180, "usage_type": "call"}, {"api_name": "json.load", "line_number": 183, "usage_type": "call"}, {"api_name": "json.load", "line_number": 186, "usage_type": "call"}, {"api_name": "resources.lib.channel_selector.select_channels", "line_number": 189, "usage_type": "call"}, {"api_name": "resources.lib.channel_selector", "line_number": 189, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 193, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 199, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 201, "usage_type": "attribute"}, {"api_name": "xbmcvfs.delete", "line_number": 206, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 213, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 215, "usage_type": "attribute"}, {"api_name": "xbmcvfs.delete", "line_number": 220, "usage_type": "call"}, {"api_name": "json.load", "line_number": 226, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 237, "usage_type": "call"}, {"api_name": "xbmcvfs.delete", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path", "line_number": 243, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 247, "usage_type": "call"}, {"api_name": "resources.lib.filesplit.split_chlist_selected", "line_number": 248, "usage_type": "call"}, {"api_name": "resources.lib.filesplit", "line_number": 248, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 250, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 252, "usage_type": "attribute"}, {"api_name": "xbmcgui.DialogProgressBG", "line_number": 253, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 254, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 259, "usage_type": "call"}, {"api_name": "xbmc.sleep", "line_number": 264, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 271, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 276, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 280, "usage_type": "call"}, {"api_name": "xbmcvfs.delete", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path", "line_number": 280, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 284, "usage_type": "attribute"}, {"api_name": "requests.adapters", "line_number": 288, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 289, "usage_type": "call"}, {"api_name": "requests.utils.cookiejar_from_dict", "line_number": 293, "usage_type": "call"}, {"api_name": "requests.utils", "line_number": 293, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 293, "usage_type": "call"}, {"api_name": "json.load", "line_number": 298, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 302, "usage_type": "attribute"}, {"api_name": "xbmcgui.DialogProgressBG", "line_number": 303, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 315, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 323, "usage_type": "call"}, {"api_name": "xbmc.LOGINFO", "line_number": 328, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 335, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 338, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 340, "usage_type": "call"}, {"api_name": "json.load", "line_number": 343, "usage_type": "call"}, {"api_name": "xbmcgui.DialogProgressBG", "line_number": 347, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure.xml_channels_start", "line_number": 351, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure", "line_number": 351, "usage_type": "name"}, {"api_name": "xbmc.LOGINFO", "line_number": 362, "usage_type": "attribute"}, {"api_name": "resources.lib.mapper.map_channels", "line_number": 366, "usage_type": "call"}, {"api_name": "resources.lib.mapper", "line_number": 366, "usage_type": "name"}, {"api_name": "resources.lib.xml_structure.xml_channels", "line_number": 369, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure", "line_number": 369, "usage_type": "name"}, {"api_name": "xbmc.LOGINFO", "line_number": 376, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 380, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 382, "usage_type": "call"}, {"api_name": "json.load", "line_number": 385, "usage_type": "call"}, {"api_name": "xbmcgui.DialogProgressBG", "line_number": 389, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure.xml_broadcast_start", "line_number": 393, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure", "line_number": 393, "usage_type": "name"}, {"api_name": "xbmc.LOGINFO", "line_number": 404, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 406, "usage_type": "call"}, {"api_name": "os.path", "line_number": 406, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 408, "usage_type": "call"}, {"api_name": "resources.lib.mapper.map_channels", "line_number": 412, "usage_type": "call"}, {"api_name": "resources.lib.mapper", "line_number": 412, "usage_type": "name"}, {"api_name": "resources.lib.mapper.map_genres", "line_number": 494, "usage_type": "call"}, {"api_name": "resources.lib.mapper", "line_number": 494, "usage_type": "name"}, {"api_name": "resources.lib.xml_structure.xml_broadcast", "line_number": 497, "usage_type": "call"}, {"api_name": "resources.lib.xml_structure", "line_number": 497, "usage_type": "name"}, {"api_name": "resources.lib.mapper.create_channel_warnings", "line_number": 508, "usage_type": "call"}, {"api_name": "resources.lib.mapper", "line_number": 508, "usage_type": "name"}, {"api_name": "resources.lib.mapper.create_genre_warnings", "line_number": 512, "usage_type": "call"}, {"api_name": "resources.lib.mapper", "line_number": 512, "usage_type": "name"}, {"api_name": "xbmcgui.NOTIFICATION_INFO", "line_number": 514, "usage_type": "attribute"}, {"api_name": "xbmc.LOGINFO", "line_number": 515, "usage_type": "attribute"}, {"api_name": "xbmc.sleep", "line_number": 516, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 518, "usage_type": "call"}, {"api_name": "os.path", "line_number": 518, "usage_type": "attribute"}, {"api_name": "xbmcgui.NOTIFICATION_WARNING", "line_number": 519, "usage_type": "attribute"}, {"api_name": "xbmc.sleep", "line_number": 520, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 523, "usage_type": "call"}, {"api_name": "xbmcvfs.delete", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path", "line_number": 523, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 528, "usage_type": "call"}, {"api_name": "os.path", "line_number": 528, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 532, "usage_type": "call"}, {"api_name": "os.path", "line_number": 532, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 534, "usage_type": "call"}, {"api_name": "xbmcvfs.delete", "line_number": 541, "usage_type": "call"}, {"api_name": "xbmcvfs.delete", "line_number": 551, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 564, "usage_type": "attribute"}]} +{"seq_id": "16372538253", "text": "from django import forms\nfrom owner_admin.models import Offers\n\n\nclass AddOrEditOfferForm(forms.ModelForm):\n class Meta:\n model = Offers\n fields = '__all__'\n widgets = {'date_added': forms.DateInput(attrs={'hidden': True, 'readonly': True}),\n 'date': forms.DateInput(attrs={'type': 'date'}),\n 'price': forms.NumberInput(attrs={'step': '0.01', 'min': '0.00', 'max': '9999.99'})}\n", "repo_name": "AbdulAhadKhan/FlyBoredom", "sub_path": "owner_admin/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 438, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "owner_admin.models.Offers", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.forms.DateInput", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "django.forms.NumberInput", "line_number": 11, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "1937721243", "text": "from flask import Flask, render_template, request\nfrom SimilarityAnalyzer import analyseSimilarity\nfrom SimilarityAnalyzer import analyseNodes\nimport json\n\napp = Flask(__name__)\n\ncodeOne = ''\ncodeTwo = ''\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/api/fileOne\", methods=['POST'])\ndef submitFileOne():\n global codeOne\n codeOne = request.get_data().decode(\"utf-8\") \n return \"Success\"\n\n@app.route(\"/api/fileTwo\", methods=['POST'])\ndef submitFileTwo():\n global codeTwo\n codeTwo = request.get_data().decode(\"utf-8\") \n return \"Success\"\n\n@app.route(\"/api/astOne\")\ndef resultsOfASTOne():\n global codeOne\n global codeTwo\n jsonObject = analyseSimilarity(codeOne, codeTwo) \n return json.loads(jsonObject)\n\n@app.route(\"/api/astTwo\")\ndef resultsOfASTTwo():\n global codeOne\n global codeTwo\n jsonObject = analyseSimilarity(codeTwo, codeOne) \n return json.loads(jsonObject)\n\n@app.route(\"/api/compareNodes\")\ndef compareNodes():\n global codeOne\n global codeTwo\n jsonObject = analyseNodes(codeOne, codeTwo) \n return json.loads(jsonObject) ", "repo_name": "hamzahussyn/SimilarityAnalyser", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1103, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "SimilarityAnalyzer.analyseSimilarity", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "SimilarityAnalyzer.analyseSimilarity", "line_number": 38, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "SimilarityAnalyzer.analyseNodes", "line_number": 45, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "27815698806", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('number', models.CharField(max_length=15)),\n ('age', models.IntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ContactList',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=30)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='contact',\n name='contact_list',\n field=models.ForeignKey(to='myapp.ContactList'),\n preserve_default=True,\n ),\n ]\n", "repo_name": "aaj/form-wizardry", "sub_path": "myapp/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "35254944913", "text": "import serial\nimport sys\nimport os\n\ncurrent = os.path.dirname(os.path.realpath(__file__))\nparent_directory = os.path.dirname(current)\nsys.path.append(parent_directory)\nfrom pynmeagps import NMEAReader\nfrom SocketUtils.SocketUtils import SITQueuedUDPSender\nfrom CoTUtils.CoTUtility import CoTUtility\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n\ndef start_formatter(com, baud, host_ip, host_port):\n ser = serial.Serial(com, baud, timeout=1)\n reader = NMEAReader(ser)\n\n with SITQueuedUDPSender(host_ip, default_destination=(host_ip, host_port)) as out:\n out.Debug = False\n\n while True:\n # read data from serial port using NMEA parser\n (raw_data, parsed_data) = reader.read()\n \n # only ingest GPRMC messages\n if parsed_data.msgID == 'RMC':\n # check if message is valid\n if parsed_data.status == 'A':\n # parse out lat, lon\n lat = parsed_data.lat\n lon = parsed_data.lon\n\n # convert to CoT and send over UTPSender\n cot_data = { 'uid': 'GPRMC', 'identity': 'friend', 'dimension': 'land-unit', 'type': 'C', 'lat': lat, 'lon': lon, 'detail': { 'track': { 'course': 0, 'speed': parsed_data.spd }}}\n print(cot_data)\n cot_xml = CoTUtility.toCoT(cot_data)\n out.putitem(cot_xml)\n\n\n# starter code\nif __name__ == '__main__':\n\n #check command line args\n if len(sys.argv) != 5 or not sys.argv[2].isdigit() or not sys.argv[3].isdigit():\n print(f'{bcolors.FAIL}Usage: python NMEAIngest.py ')\n\n # get command line arguments\n host_ip = sys.argv[1] #\"127.0.0.1\"\n host_port = int(sys.argv[2]) # 1870\n baud = int(sys.argv[3]) # 9600\n com = sys.argv[4].upper() # COM5\n\n print(f'{bcolors.OKBLUE}Starting NMEA Formatter on... \\nIP: {host_ip} PORT: {host_port} COM: {com} BAUD: {baud}')\n start_formatter(com, baud, host_ip, host_port)\n \n\n\n \n \n \n \n \n", "repo_name": "Senior-Design-2022/Real-Time-GIS", "sub_path": "NMEAUtils/NMEAIngest.py", "file_name": "NMEAIngest.py", "file_ext": "py", "file_size_in_byte": 2280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 25, "usage_type": "call"}, {"api_name": "pynmeagps.NMEAReader", "line_number": 26, "usage_type": "call"}, {"api_name": "SocketUtils.SocketUtils.SITQueuedUDPSender", "line_number": 28, "usage_type": "call"}, {"api_name": "CoTUtils.CoTUtility.CoTUtility.toCoT", "line_number": 46, "usage_type": "call"}, {"api_name": "CoTUtils.CoTUtility.CoTUtility", "line_number": 46, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 59, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 60, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "31603460799", "text": "#!/usr/bin/python3\n\"\"\"A script to create the State “California” with the City \"San Francisco\"\n from the database hbtn_0e_100_usa\n\"\"\"\n\nfrom relationship_city import City\nfrom relationship_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sys import argv\n\n\nif __name__ == '__main__':\n # Creating and connecting session modal to local database\n DATABASE_URL = 'mysql+mysqldb://{}:{}@localhost:3306/{}'\n engine = create_engine(\n DATABASE_URL.format(argv[1], argv[2], argv[3]),\n pool_pre_ping=True\n )\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Creating new state called California\n new_state = State(name='California')\n\n # Creating new city called San Francisco\n new_city = City(name='San Francisco')\n\n # Making relationship between new_state to new_city\n new_state.cities.append(new_city)\n\n # Adding created City and State to session model\n session.add(new_state)\n session.add(new_city)\n\n # Committing all changes to database\n session.commit()\n # Closing session model\n session.close()\n", "repo_name": "njdam/alx-higher_level_programming", "sub_path": "0x0F-python-object_relational_mapping/100-relationship_states_cities.py", "file_name": "100-relationship_states_cities.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "name"}, {"api_name": "relationship_state.Base.metadata.create_all", "line_number": 20, "usage_type": "call"}, {"api_name": "relationship_state.Base.metadata", "line_number": 20, "usage_type": "attribute"}, {"api_name": "relationship_state.Base", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 21, "usage_type": "call"}, {"api_name": "relationship_state.State", "line_number": 25, "usage_type": "call"}, {"api_name": "relationship_city.City", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "38584043102", "text": "import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\n\ndef back_rule(rule, df_tree,i):\n parent_index = df_tree.loc[i,'parent_feat_index']\n parent_th = df_tree.loc[i,'parent_th']\n\n index_parent = df_tree.loc[np.logical_and(df_tree.feat_index==parent_index,df_tree.th==parent_th)].index[0]\n\n if df_tree.loc[index_parent,'side']=='root':\n rule.append([parent_index,parent_th,'root'])\n return rule\n else:\n rule.append([parent_index,parent_th,df_tree.loc[index_parent,'side']])\n rule = back_rule(rule,df_tree,index_parent)\n return rule\n\ndef test_rules(X,y,rule,value):\n df_data = pd.DataFrame(X)\n y[y!=value]=-1\n df_data['y_true'] = y\n df_data['y_pred'] = -1\n df_rule = pd.DataFrame(columns=['feat_'+str(i)+'_<' for i in range(X.shape[1])]+['feat_'+str(i)+'_>=' for i in range(X.shape[1])],index=[0])\n rule.reverse()\n sub_X = df_data.copy()\n for i,r in enumerate(rule[:-1]):\n\n index = r[0]\n th = r[1]\n side = rule[i+1][2]\n if side=='left':\n df_rule.loc[0,'feat_'+str(index)+'_<'] = th\n\n sub_X = sub_X.loc[sub_X[index]='] = th\n\n sub_X = sub_X.loc[sub_X[index]>=th]\n\n df_data.loc[sub_X.index,'y_pred'] = 0\n tn, fp, fn, tp = confusion_matrix(df_data.y_true,df_data.y_pred,labels=[value,-1]).ravel()\n df_rule['precision'] = tp/(tp+fp)\n df_rule['recall'] = tp/(fn+tp)\n df_rule['accuracy'] = (tp + tn)/ (tp + fn + tn + fp)\n return df_rule\n\ndef get_rules(value,df_forest,X_test,y_test):\n rules = []\n\n for tree in df_forest.tree.unique():\n df_tree = df_forest.loc[df_forest.tree==tree]\n leaf_interest = df_tree.loc[df_tree.value==value]\n\n for i in leaf_interest.index:\n rule = [[df_tree.loc[i,'feat_index'],df_tree.loc[i,'th'],df_tree.loc[i,'side']]]\n rule = back_rule(rule,df_tree,i)\n rules.append(rule)\n\n for i,r in enumerate(rules):\n df_rule = test_rules(X_test,y_test,r,value)\n if i==0:\n df_rules = df_rule\n else:\n df_rules = pd.concat([df_rules,df_rule])\n\n return df_rules", "repo_name": "vlt-ro/random_forest_rules_extraction", "sub_path": "src/extract_rules.py", "file_name": "extract_rules.py", "file_ext": "py", "file_size_in_byte": 2239, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.logical_and", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "9650306436", "text": "import json\nimport requests\n\nfrom http import HTTPStatus\n\nclass RewardsServiceClient:\n\n def __init__(self):\n self.rewards_url = \"http://rewardsservice:7050/rewards\"\n self.customer_rewards_url = \"http://rewardsservice:7050/customerrewards\"\n\n def get_rewards(self):\n response = requests.get(self.rewards_url)\n if response.status_code == HTTPStatus.NO_CONTENT.value:\n return []\n else:\n return response.json()\n\n # Code to get the list of customer rewards\n def get_all_customer_rewards(self):\n response = requests.get(self.customer_rewards_url)\n if response.status_code == HTTPStatus.NO_CONTENT.value:\n return []\n else:\n return response.json()\n\n # Code to get the list of customer rewards\n def get_customer_rewards(self, email):\n query_params = {'email':email}\n response = requests.get(self.customer_rewards_url, params=query_params)\n if response.status_code == HTTPStatus.NO_CONTENT.value:\n return []\n else:\n return response.json()\n\n # Code to get the list of customer rewards\n def add_customer_rewards(self, email, orderTotal):\n data = {'email':email, 'orderTotal':orderTotal}\n headers = {'content-type': 'application/json'}\n response = requests.post(self.customer_rewards_url, data=json.dumps(data), headers=headers)\n if response.status_code == HTTPStatus.NO_CONTENT.value:\n return []\n else:\n return response.json()\n", "repo_name": "getvijayji/platform-services-python-test", "sub_path": "source/RewardsUI/rewards/clients/rewards_service_client.py", "file_name": "rewards_service_client.py", "file_ext": "py", "file_size_in_byte": 1545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NO_CONTENT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NO_CONTENT", "line_number": 22, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 22, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NO_CONTENT", "line_number": 31, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 31, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NO_CONTENT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "542192201", "text": "import time\n\nfrom pytube import YouTube,Playlist\nfrom utils import make_dir\n\nFILE_EXT='mp4'\n\ndef playlist(arg):\n \n playlist_url =Playlist(arg.url)\n\n save_dir = make_dir(arg.dir)\n\n for arg.url in playlist_url:\n start = time.time()\n \n yt = YouTube(arg.url)\n print('now :',yt.title)\n yt.streams.filter(adaptive=True, file_extension=FILE_EXT).order_by('resolution').desc().first().download(save_dir)\n \n end = time.time()\n\n print('done',f\"{end-start:.3f} sec\")\n return\n \ndef not_playlist(arg):\n\n save_dir = make_dir(arg.dir)\n \n start = time.time()\n \n yt = YouTube(arg.url)\n print('now :',yt.title)\n yt.streams.filter(adaptive=True, file_extension=FILE_EXT).order_by('resolution').desc().first().download(save_dir)\n \n end = time.time()\n return print('done',f\"{end-start:.3f} sec\")\n\n", "repo_name": "horcrux22/youtubedownloader", "sub_path": "ytdown.py", "file_name": "ytdown.py", "file_ext": "py", "file_size_in_byte": 880, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pytube.Playlist", "line_number": 10, "usage_type": "call"}, {"api_name": "utils.make_dir", "line_number": 12, "usage_type": "call"}, {"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.make_dir", "line_number": 28, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "8136000528", "text": "import os\r\nfrom werkzeug.utils import secure_filename\r\nfrom flask import Flask , render_template ,request,Response\r\nimport vehicle_counting\r\nimport cv2\r\n\r\nUPLOAD_FOLDER = r'C:\\Users\\AQEEL\\Desktop\\Aqeel\\sem-5\\MP\\flaskkkkk\\test\\flaskk\\fol'\r\n\r\nvidpath=''\r\napp=Flask(__name__)\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef home():\r\n return render_template('index1.html')\r\n@app.route('/v', methods=['GET','POST'])\r\ndef hello_world():\r\n return render_template('vidcap.html')\r\n\r\n@app.route('/vid',methods=['GET','POST'])\r\ndef path():\r\n global vidpath\r\n vidfile=request.files['vidfile']\r\n filename = secure_filename(vidfile.filename)\r\n vidpath = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n vidfile.save(vidpath)\r\n return render_template('display2.html')\r\n@app.route('/display', methods=['GET'])\r\ndef hello():\r\n return render_template('display.html')\r\n\r\n@app.route('/p', methods=['POST','GET'])\r\ndef frames(type,address):\r\n if type==0:\r\n cap=cv2.VideoCapture(address)\r\n if type==1:\r\n cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)\r\n i=0\r\n j=0\r\n while(cap.isOpened()):\r\n flag,frame=cap.read()\r\n if flag==False:\r\n break \r\n if j%30==0: \r\n add='luci'+str(i)+'.jpg'\r\n cv2.imwrite(add,frame)\r\n img_addr=rf\"C:/Users/AQEEL/Desktop/Aqeel/sem-5/MP/flaskkkkk/test/{add}\" \r\n vehicle_counting.count(img_addr)\r\n i+=1\r\n img = cv2.imread(img_addr)\r\n ret, buffer = cv2.imencode('.jpg', img)\r\n frame = buffer.tobytes()\r\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\r\n \r\n j+=1\r\n cap.release()\r\n@app.route('/live_feed')\r\ndef live_feed():\r\n return Response(frames(1,0), mimetype='multipart/x-mixed-replace; boundary=frame')\r\n@app.route('/video_feed')\r\ndef video_feed():\r\n print(vidpath)\r\n return Response(frames(0,vidpath), mimetype='multipart/x-mixed-replace; boundary=frame')\r\n\r\nif __name__==\"__main__\":\r\n app.run(debug='true')", "repo_name": "aftabahmed-09/ATST", "sub_path": "flaskk/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2166, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.CAP_DSHOW", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 46, "usage_type": "call"}, {"api_name": "vehicle_counting.count", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "4561665715", "text": "import tensorflow as tf\nfrom model_helpers import Embedding, build_bidirectional_rnn, xavier\nimport os\nfrom os import makedirs\nfrom os.path import join, dirname\nfrom time import strftime, gmtime\nfrom helpers import build_vocab, build_dis_data, generate_dis_batches, print_out\nimport numpy as np\nimport json\n\nclass TweetDiscriminator(object):\n\n def __init__(self, num_unit, batch_size, vocab_size, embed_size,\n cell_type=tf.nn.rnn_cell.BasicLSTMCell,\n num_gpu=2,\n lr=0.001):\n\n self.label = tf.placeholder(tf.int32, shape=[batch_size], name=\"label\")\n self.text = tf.placeholder(tf.int32, shape=[None, batch_size], name=\"embed-tweet\") # [max_len, batch_size]\n self.len = tf.placeholder(tf.int32, shape=[batch_size], name=\"tweet_length\")\n\n with tf.variable_scope(\"embeddings\"):\n embedding = Embedding(vocab_size, embed_size)\n text_embed = embedding(self.text)\n\n with tf.variable_scope(\"text-encoder\"):\n _, encoder_state = build_bidirectional_rnn(\n num_unit, text_embed, self.len, cell_type, num_gpu)\n text_vec = tf.concat([encoder_state[0], encoder_state[1]], axis=1)\n\n with tf.variable_scope(\"turing-result\"):\n logits = tf.layers.dense(text_vec, 2, activation=None, kernel_initializer=xavier)\n self.prob = tf.nn.softmax(logits)\n\n with tf.variable_scope(\"loss\"):\n self.loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.label, logits=logits))\n\n with tf.variable_scope(\"accuracy\"):\n accuracy = tf.nn.in_top_k(logits, self.label, k=1)\n self.accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))\n\n with tf.variable_scope(\"optimization\"):\n optimizer = tf.train.AdamOptimizer(lr)\n self.update_step = optimizer.minimize(self.loss)\n\n def train_update(self, batch, sess):\n sess = sess or sess.get_default_session()\n text = batch[0]\n length = batch[1]\n label = batch[2]\n\n _, loss, accuracy = sess.run(\n [self.update_step, self.loss, self.accuracy],\n feed_dict={self.text: text, self.label: label, self.len: length})\n return loss, accuracy\n\n def eval(self, batches, sess):\n sess = sess or sess.get_default_session()\n loss_l = []\n accuracy_l = []\n\n for batch in batches:\n text = batch[0]\n length = batch[1]\n label = batch[2]\n\n loss, accuracy = sess.run(\n [self.loss, self.accuracy],\n feed_dict={self.text: text, self.label: label, self.len: length})\n\n loss_l.append(loss)\n accuracy_l.append(accuracy)\n return float(np.mean(loss_l)), float(np.mean(accuracy_l))\n\nif __name__ == '__main__':\n from params.full import *\n num_epoch = 6\n test_step = 50\n\n # for machine samples\n from collections import Counter\n\n c = Counter()\n os.chdir(\"../data/full_64_input/dis_pretrain\")\n output_dir = strftime(\"%m-%d_%H-%M-%S\", gmtime())\n\n # build vocab\n word2index, index2word = build_vocab(\"vocab.ori\")\n start_i, end_i = word2index[''], word2index['']\n vocab_size = len(word2index)\n\n discriminator = TweetDiscriminator(num_unit, batch_size, vocab_size, embed_size,\n cell_type=tf.nn.rnn_cell.GRUCell, num_gpu=2, lr=0.001)\n \n train_data = build_dis_data(\"human_train.txt\", \"machine_train.txt\", word2index)\n test_data = build_dis_data(\"human_test.txt\", \"machine_test.txt\", word2index)\n test_batches = generate_dis_batches(test_data, batch_size, False)\n\n print_out(\"*** DATA READY ***\")\n\n makedirs(dirname(join(output_dir, \"breakpoints/\")), exist_ok=True)\n log_f = open(join(output_dir, \"log.log\"), \"w\")\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n best_f = join(output_dir, \"best_accuracy.txt\")\n\n global_step = best_step = 1\n start_epoch = best_epoch = 1\n best_loss = 1000.\n sess.run(tf.global_variables_initializer())\n\n for epoch in range(start_epoch, num_epoch + 1):\n train_batches = generate_dis_batches(train_data, batch_size, True)\n\n loss_l = []\n accuracy_l = []\n \n for batch in train_batches:\n loss, accuracy = discriminator.train_update(batch, sess)\n loss_l.append(loss)\n accuracy_l.append(accuracy)\n\n if global_step % test_step == 0:\n time_now = strftime(\"%m-%d %H:%M:%S\", gmtime())\n print_out('epoch:\\t%d\\tstep:\\t%d\\tbatch-loss/accuracy:\\t%.3f\\t%.1f\\t\\t%s' %\n (epoch, global_step,\n np.mean(loss_l), np.mean(accuracy_l) * 100, time_now),\n f=log_f)\n if global_step % (test_step * 10) == 0:\n loss, accuracy = discriminator.eval(test_batches, sess)\n print_out('EPOCH-\\t%d\\tSTEP-\\t%d\\tTEST-loss/accuracy/accuracy5-\\t%.3f\\t%.1f' %\n (epoch, global_step,\n loss, accuracy * 100),\n f=log_f)\n\n if best_loss >= loss:\n best_loss = loss\n\n best_epoch = epoch\n best_step = global_step\n\n # save breakpoint\n path = join(output_dir, \"breakpoints/best_test_loss.ckpt\")\n save_path = saver.save(sess, path)\n\n # save best epoch/step\n best_dict = {\n \"loss\": best_loss, \"epoch\": best_epoch, \"step\": best_step, \"accuracy\": accuracy}\n with open(path, \"w\") as f:\n f.write(json.dumps(best_dict, indent=2))\n global_step += 1\n\n loss, accuracy = discriminator.eval(train_batches, sess)\n print_out('EPOCH!\\t%d\\tTRAIN!\\t%d\\tTRAIN-loss/accuracy-\\t%.3f\\t%.1f' %\n (epoch, global_step,\n np.mean(loss_l), np.mean(accuracy_l) * 100),\n f=log_f)\n\n log_f.close()\n", "repo_name": "claude-zhou/emo", "sub_path": "discriminator.py", "file_name": "discriminator.py", "file_ext": "py", "file_size_in_byte": 6325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.nn", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 22, "usage_type": "call"}, {"api_name": "model_helpers.Embedding", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 26, "usage_type": "call"}, {"api_name": "model_helpers.build_bidirectional_rnn", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.variable_scope", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 32, "usage_type": "attribute"}, {"api_name": "model_helpers.xavier", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.nn.softmax", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.nn.in_top_k", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 84, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 85, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 86, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 86, "usage_type": "call"}, {"api_name": "helpers.build_vocab", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "helpers.build_dis_data", "line_number": 96, "usage_type": "call"}, {"api_name": "helpers.build_dis_data", "line_number": 97, "usage_type": "call"}, {"api_name": "helpers.generate_dis_batches", "line_number": 98, "usage_type": "call"}, {"api_name": "helpers.print_out", "line_number": 100, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 112, "usage_type": "call"}, {"api_name": "helpers.generate_dis_batches", "line_number": 115, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 126, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 126, "usage_type": "call"}, {"api_name": "helpers.print_out", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 129, "usage_type": "call"}, {"api_name": "helpers.print_out", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 152, "usage_type": "call"}, {"api_name": "helpers.print_out", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "549032953", "text": "\"\"\"\n Author: Mauro Mendez.\n Date: 02/11/2020.\n\n File to run testing metrics once the model has trained.\n\"\"\"\n\n\nimport glob\nimport pathlib\n\nimport torch\nimport segmentation_models_pytorch as smp\n\nfrom hyperparameters import parameters as params\nfrom models import create_model\nfrom dataset import get_dataloader\nfrom testing import test_report\n\n\ndef main():\n \"\"\"\n main Main function, flow of program.\n \"\"\"\n\n # Model\n model = create_model(model=params['model'], encoder=params['encoder'],\\\n encoder_weights=params['encoder_weights'])\n\n # Running architecture (GPU or CPU)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Using GPU?: ', torch.cuda.is_available())\n\n # Image loader\n proc_fn = smp.encoders.get_preprocessing_fn(params['encoder'], params['encoder_weights'])\n test_loader = get_dataloader(data_file=params['test_file'], img_size=params['img_size'],\\\n batch_size=1, proc_fn=proc_fn, data_split='Testing')\n\n # Creates the criterion (loss function)\n criterion = smp.utils.losses.DiceLoss()\n\n # Weights Load Up\n weights_file = glob.glob(params['weights_path']+'/'+params['save_name']+'*.pth')[0]\n\n checkpoint = torch.load(weights_file)\n model.load_state_dict(checkpoint['model_state_dict'])\n\n print('Model Loaded!\\nDice Loss: {:.4}\\nIoU: {:.4}\\nFscore: {:.4}\\nAccuracy: {:.4}'\\\n .format(checkpoint['dice_loss'], checkpoint['iou_score'],\\\n checkpoint['fscore'], checkpoint['accuracy']))\n\n\n # Create folder for weights\n pathlib.Path(params['report_path']).mkdir(parents=True, exist_ok=True)\n\n # Metrics\n metrics = [\\\n smp.utils.metrics.IoU(threshold=0.5),\\\n smp.utils.metrics.Fscore(threshold=0.5),\\\n smp.utils.metrics.Accuracy(threshold=0.5),\\\n smp.utils.metrics.Recall(threshold=0.5),\\\n smp.utils.metrics.Precision(threshold=0.5)]\n\n # Run test metrics and creates a report\n test_report(model=model, dataloader=test_loader, criterion=criterion,\\\n metrics=metrics, device=device, report_path=params['report_path'],\\\n save_name=params['save_name'])\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "mamemo/CNN-Semantic-Segmentation", "sub_path": "code/run_test.py", "file_name": "run_test.py", "file_ext": "py", "file_size_in_byte": 2235, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "models.create_model", "line_number": 27, "usage_type": "call"}, {"api_name": "hyperparameters.parameters", "line_number": 27, "usage_type": "name"}, {"api_name": "hyperparameters.parameters", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "segmentation_models_pytorch.encoders.get_preprocessing_fn", "line_number": 35, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.encoders", "line_number": 35, "usage_type": "attribute"}, {"api_name": "hyperparameters.parameters", "line_number": 35, "usage_type": "name"}, {"api_name": "dataset.get_dataloader", "line_number": 36, "usage_type": "call"}, {"api_name": "hyperparameters.parameters", "line_number": 36, "usage_type": "name"}, {"api_name": "segmentation_models_pytorch.utils.losses.DiceLoss", "line_number": 40, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 40, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 43, "usage_type": "call"}, {"api_name": "hyperparameters.parameters", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "hyperparameters.parameters", "line_number": 54, "usage_type": "name"}, {"api_name": "segmentation_models_pytorch.utils.metrics.IoU", "line_number": 58, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 58, "usage_type": "attribute"}, {"api_name": "segmentation_models_pytorch.utils.metrics.Fscore", "line_number": 59, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 59, "usage_type": "attribute"}, {"api_name": "segmentation_models_pytorch.utils.metrics.Accuracy", "line_number": 60, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 60, "usage_type": "attribute"}, {"api_name": "segmentation_models_pytorch.utils.metrics.Recall", "line_number": 61, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 61, "usage_type": "attribute"}, {"api_name": "segmentation_models_pytorch.utils.metrics.Precision", "line_number": 62, "usage_type": "call"}, {"api_name": "segmentation_models_pytorch.utils", "line_number": 62, "usage_type": "attribute"}, {"api_name": "testing.test_report", "line_number": 65, "usage_type": "call"}, {"api_name": "hyperparameters.parameters", "line_number": 66, "usage_type": "name"}, {"api_name": "hyperparameters.parameters", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "72128251995", "text": "__author__ = 'yantianyu'\n\nfrom urllib import request\nimport json\nimport time\n\nurl = 'http://ip.taobao.com/service/getIpInfo.php?ip='\n\n\ndef checkTaobaoIP(ip):\n try:\n response = request.urlopen(url + ip, timeout=5)\n result = response.readlines()\n data = json.loads(result[0])\n return \"%15s: %s-%s-%s\" % (ip, data['data']['isp'], data['data']['region'], data['data']['city'])\n except:\n return \"%15s: timeout\" % ip\n\n\nif __name__ == \"__main__\":\n f = open('ip.txt')\n ips = f.readlines()\n f.close()\n\n f = open('ip-check.txt', 'w')\n for ip in ips:\n line = checkTaobaoIP(ip.strip())\n if line:\n print(line.encode('utf-8'))\n f.write(line.encode('utf-8') + '\\n')\n else:\n print(line)\n f.write(line + '\\n')\n f.close()\n print(\"Done!\")\n", "repo_name": "EvanHongYousan/toycode", "sub_path": "python-learning/crash/ipAnalysis.py", "file_name": "ipAnalysis.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "urllib.request.urlopen", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "41070286457", "text": "import mock\n\nfrom .base import BaseTest\nfrom ..runner import Runner\n\n\nclass TestRunner(BaseTest):\n\n @mock.patch('smedley.core.config.DriverConfig.get_task_executor')\n def test__execute(self, get_task_executor, tasks):\n runner = Runner()\n run = mock.Mock(return_value=None)\n executor = mock.Mock(run=run)\n get_task_executor.return_value = executor\n runner._execute(task=tasks[0])\n assert run.called\n\n @mock.patch('smedley.runner.logger.info')\n def test__show_summary(self, info, tasks):\n runner = Runner()\n runner._show_summary(tasks=tasks)\n assert info.called\n\n @mock.patch.object(Runner, '_execute')\n @mock.patch.object(Runner, '_show_summary')\n def test__start(self, _show_summary, _execute, tasks):\n runner = Runner()\n runner._start(tasks=tasks)\n assert _show_summary.called\n assert _execute.call_count == len(tasks)\n\n @mock.patch('smedley.core.loaders.BaseTaskLoader.get_loader')\n @mock.patch.object(Runner, '_start')\n def test_run(self, _start, get_loader, tasks):\n runner = Runner()\n _start.return_value = None\n load_tasks = mock.Mock(return_value=tasks)\n get_loader.return_value = mock.Mock(load_tasks=load_tasks)\n runner.run()\n assert _start.called\n", "repo_name": "luizrabachini/smedley", "sub_path": "smedley/tests/test_runner.py", "file_name": "test_runner.py", "file_ext": "py", "file_size_in_byte": 1320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "base.BaseTest", "line_number": 7, "usage_type": "name"}, {"api_name": "runner.Runner", "line_number": 11, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 12, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 13, "usage_type": "call"}, {"api_name": "runner._execute", "line_number": 15, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 9, "usage_type": "call"}, {"api_name": "runner.Runner", "line_number": 20, "usage_type": "call"}, {"api_name": "runner._show_summary", "line_number": 21, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "runner.Runner", "line_number": 27, "usage_type": "call"}, {"api_name": "runner._start", "line_number": 28, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 24, "usage_type": "call"}, {"api_name": "runner.Runner", "line_number": 24, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mock.patch.object", "line_number": 25, "usage_type": "call"}, {"api_name": "runner.Runner", "line_number": 25, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 25, "usage_type": "attribute"}, {"api_name": "runner.Runner", "line_number": 35, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 37, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 38, "usage_type": "call"}, {"api_name": "runner.run", "line_number": 39, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 32, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 33, "usage_type": "call"}, {"api_name": "runner.Runner", "line_number": 33, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "42007807936", "text": "from transformers import RagTokenizer, RagRetriever, RagSequenceForGeneration\nimport torch\nfrom datasets import load_from_disk\ndevice = torch.device(\"cuda:0\")\n\nds = load_from_disk(\"/workspace/data/filtered_small_embeddings\")[\"train\"]\ndef get_title_examples(examples):\n split_title = [ i.split(\"\\n\")[0] for i in examples[\"sentence_split\"]]\n examples[\"title\"] = split_title\n return examples\nds = ds.map(get_title_examples, batched=True, num_proc=32)\n\nprint(\"Loaded the Ds\")\nprint(ds)\n\ndf = ds.to_pandas()\n\nfrom collections import Counter\ndf[\"sentence_split_list\"] = [ [i for i in i.split(\"\\n\")[1:] if len(i)>0] for i in df[\"non_split\"]]\n\ndf_filterd = df[[\"date\",\"hash\",\"title\",\"sentence_split_list\"]]\nfiltered = []\nfor i in df_filterd.values:\n for j in i[3]:\n filtered.append([i[0],i[1],i[2],j])\nlen(filtered)\n\nimport pandas as pd\nfiltered_df= pd.DataFrame(filtered, columns=[\"date\",\"hash\",\"title\",\"sentence\"])\n\nimport matplotlib.pyplot as plt \nall_sizes = [len(i.split()) for i in filtered_df[\"sentence\"]]\nplt.hist(all_sizes, bins=100)\n\nfrom datasets import Dataset \ndataset = Dataset.from_pandas(filtered_df)\n\n\nfrom transformers import DPRContextEncoder, DPRContextEncoderTokenizer\nimport torch\ntorch.set_grad_enabled(False)\nctx_encoder = DPRContextEncoder.from_pretrained(\"facebook/dpr-ctx_encoder-single-nq-base\")\nctx_tokenizer = DPRContextEncoderTokenizer.from_pretrained(\"facebook/dpr-ctx_encoder-single-nq-base\")\n\nctx_encoder.cuda()\nprint(ctx_encoder.device)\ndevice = torch.device(\"cuda\")\nctx_encoder.eval()\nwith torch.no_grad():\n ds_with_embeddings = dataset.map(lambda example: {'embeddings': ctx_encoder(**ctx_tokenizer(example[\"sentence\"], return_tensors=\"pt\",padding=True ,truncation=True).to(device))[\"pooler_output\"].detach().cpu().numpy()},num_proc=1)\n print(ds_with_embeddings)\n ds_with_embeddings.save_to_disk(\"/workspace/data/filtered_split_dprc_sentences_embeddings\")", "repo_name": "saisurbehera/dller_NLG", "sub_path": "notebooks/convert_rag.py", "file_name": "convert_rag.py", "file_ext": "py", "file_size_in_byte": 1912, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 4, "usage_type": "call"}, {"api_name": "datasets.load_from_disk", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "datasets.Dataset.from_pandas", "line_number": 36, "usage_type": "call"}, {"api_name": "datasets.Dataset", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.set_grad_enabled", "line_number": 41, "usage_type": "call"}, {"api_name": "transformers.DPRContextEncoder.from_pretrained", "line_number": 42, "usage_type": "call"}, {"api_name": "transformers.DPRContextEncoder", "line_number": 42, "usage_type": "name"}, {"api_name": "transformers.DPRContextEncoderTokenizer.from_pretrained", "line_number": 43, "usage_type": "call"}, {"api_name": "transformers.DPRContextEncoderTokenizer", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "33456378552", "text": "#!/usr/bin/python\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.link import TCLink\nfrom mininet.cli import CLI\nimport threading as thd\nimport os\nimport time\nimport random\nimport argparse\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--test-time', '-t', \n type=int, \n default=60, \n dest='test_time',\n help='Test time in seconds.')\n args = parser.parse_args()\n return args\nargs = parse_args()\n\nenter_time = str(int(time.time()))\n\nclass Single(Topo):\n def build(self):\n h1 = self.addHost(\"h1\")\n h2 = self.addHost(\"h2\")\n self.addLink(h1, h2, bw=100, delay=\"100ms\", loss=1)\n\n\ndef change_link_state():\n h2 = net.get(\"h2\")\n intf = h2.intf()\n\n # disconnect simulation\n disconnect_prob = 0.05 # randomly disconnect\n disconnect = random.random() < disconnect_prob\n if disconnect:\n # disc_time = random.uniform(0.1, 1)\n disc_time = random.choice([i/10. for i in range(1, 11)])\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n intf.ifconfig(\"down\")\n with open(\"./log/link_state_log_\" + enter_time, 'a') as f:\n conn_state = \"DISCONN\" if disconnect else \"CONN\"\n line = \"{}\\t{}\\t{}\\t{}%\\t{}\\n\".format(timestamp, \"0\", \"INFms\", \"100\", conn_state)\n f.write(line)\n time.sleep(disc_time)\n intf.ifconfig(\"up\")\n return\n\n # set new link arguments\n # new_loss = random.choice([i/10. for i in range(1, 11)])\n new_loss = random.choice([i for i in range(1, 6)])\n new_bw = random.randint(100, 200) # new bandwidth\n new_delay = \"{}ms\".format(random.randint(20, 100)) # new delay\n\n # change link state\n timestamp = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n intf.config(\n bw=new_bw, \n delay=new_delay, \n loss=new_loss, \n # max_queue_size=500\n )\n \n # record link state change into log\n with open(\"./log/link_state_log_\" + enter_time, 'a') as f:\n conn_state = \"DISCONN\" if disconnect else \"CONN\"\n line = \"{}\\t{}\\t{}\\t{}%\\t{}\\n\".format(timestamp, new_bw, new_delay, new_loss, conn_state)\n f.write(line)\n\ndef change_link_state_thread():\n start_time = time.time()\n while time.time() - start_time < args.test_time:\n print(\"Link changed!\")\n change_link_state()\n stable_time = random.randint(1, 10)\n time.sleep(stable_time)\n\n\nif __name__ == \"__main__\":\n if \"log\" not in os.listdir(\".\"):\n os.mkdir(\"./log\")\n\n single_topo = Single()\n net = Mininet(topo=single_topo, link=TCLink, controller=None)\n\n h1_addr, h2_addr = \"10.0.0.1\", \"10.0.0.2\"\n h1, h2 = net.get(\"h1\", \"h2\")\n h1.cmd(\"ifconfig h1-eth0 \" + h1_addr + \"/8\")\n h2.cmd(\"ifconfig h2-eth0 \" + h2_addr + \"/8\")\n h1.cmd(\"sysctl net.ipv4.ip_forward=1\")\n h2.cmd(\"sysctl net.ipv4.ip_forward=1\")\n\n net.start()\n # h1.popen(\"python server.py -i \" + h1_addr)\n # h2.popen(\"python client.py -i \" + h1_addr + \" -t \" + str(args.test_time - 10))\n CLI(net)\n t = thd.Thread(target=change_link_state_thread) # change link state\n print(\"Link state is changable!\")\n t.start()\n t.join()\n net.stop()", "repo_name": "ponedo/satellite-link-simulation", "sub_path": "single.py", "file_name": "single.py", "file_ext": "py", "file_size_in_byte": 3266, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "mininet.topo.Topo", "line_number": 27, "usage_type": "name"}, {"api_name": "random.random", "line_number": 40, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 43, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 44, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 44, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 58, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 61, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 85, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 86, "usage_type": "call"}, {"api_name": "mininet.net.Mininet", "line_number": 89, "usage_type": "call"}, {"api_name": "mininet.link.TCLink", "line_number": 89, "usage_type": "name"}, {"api_name": "mininet.cli.CLI", "line_number": 101, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "607553898", "text": "import sys\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\nfrom .statsutils import get_logger\n\nclass Fail2banNgStatsAppConfig(AppConfig):\n name = 'Fail2banNgStatsApp'\n\n def ready(self):\n if settings.REFRESH_ON or not self.__server_starting():\n return\n\n self.logger = get_logger('Fail2banNgStatsAppConfig')\n lock = settings.REFRESH_LOCK\n if lock.acquire(blocking=False):\n settings.REFRESH_ON = True\n try:\n self.logger.info('Run scheduled refresh')\n self.__run_scheduled_refresh()\n except Exception as e:\n self.logger.warning('Failed to run shceduled refresh')\n self.logger.debug(str(e))\n lock.release()\n\n def __server_starting(self):\n if len(sys.argv) >= 2:\n return sys.argv[1] == 'runserver'\n return False\n\n def __refresh_thread_job(self, exit_event):\n from .statsreader import read_config, run_scheduled_refresh\n from .statsutils import ArgsMock\n args = ArgsMock(config_path=settings.REFRESH_CONFIG, database=True)\n _config = dict(read_config(args.config))\n _config['event'] = exit_event\n self.logger.debug(str(_config))\n run_scheduled_refresh(args, _config)\n\n def __run_scheduled_refresh(self):\n from threading import Event, Thread\n import atexit\n\n exit_event = Event()\n thread = Thread(target=self.__refresh_thread_job, args=(exit_event,), daemon=True)\n thread.start()\n\n atexit.register(self.__join_refresh_thread, thread, exit_event)\n\n def __join_refresh_thread(self, thread, exit_event):\n exit_event.set()\n self.logger.debug('Join refresh thread')\n thread.join(timeout=float(15))\n if thread.is_alive():\n self.logger.warning('Refreash thread join timed out.')\n else:\n self.logger.info('Refresh thread terminated')\n", "repo_name": "Mini2Inz/fail2ban-stats", "sub_path": "Fail2banNgStatsApp/apps.py", "file_name": "apps.py", "file_ext": "py", "file_size_in_byte": 1973, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.apps.AppConfig", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.settings.REFRESH_ON", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "statsutils.get_logger", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.settings.REFRESH_LOCK", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.settings.REFRESH_ON", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 29, "usage_type": "attribute"}, {"api_name": "statsutils.ArgsMock", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.REFRESH_CONFIG", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 35, "usage_type": "name"}, {"api_name": "statsreader.read_config", "line_number": 36, "usage_type": "call"}, {"api_name": "statsreader.run_scheduled_refresh", "line_number": 39, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 45, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 46, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "11241123932", "text": "# 2020-05-09 Bokyong Seo\nimport sentry_sdk\nfrom db.query import get_query\nfrom db.connector import MssqlConnector\nimport messages\n\n\ndef update_item_parameter(df):\n \"\"\"\n 문항 난이도, 변별도 DB 저장\n\n Parameters\n ----------\n df: pandas.DataFrame\n 문항 난이도, 변별도 결과 데이터프레임\n\n Returns\n -------\n None\n \"\"\"\n param_list = []\n for _, row in df.iterrows():\n param_list.append((\n row['item_id'],\n row['item_difficulty'],\n row['item_difficulty_code'],\n row['item_discrimination'],\n row['item_discrimination_code'],\n ))\n\n num_of_items = len(param_list)\n sentry_sdk.capture_message(\n messages.MSG_NUM_OF_ITEM.format(str(num_of_items)))\n\n if num_of_items > 0:\n _set_list('item', param_list)\n\n\ndef update_student_parameter(df):\n \"\"\"\n 학생 능력 DB 저장\n\n Parameters\n ----------\n df: pandas.DataFrame\n 학생 능력 결과 데이터프레임\n\n Returns\n -------\n None\n \"\"\"\n param_list = []\n for _, row in df.iterrows():\n param_list.append((\n row['student_id'],\n row['student_ability'],\n ))\n\n num_of_students = len(param_list)\n sentry_sdk.capture_message(\n messages.MSG_NUM_OF_STUDENT.format(str(num_of_students)))\n\n if num_of_students > 0:\n _set_list('student', param_list)\n\n\ndef _set_list(query_name, data_list):\n \"\"\"\n 데이터 리스트를 DB에 저장\n\n Parameters\n ----------\n query_name: str\n 학생 능력 결과 데이터프레임\n data_list: list\n 저장할 데이터 튜플 리스트\n\n Returns\n -------\n None\n \"\"\"\n query = get_query(query_name, 'write')\n\n with MssqlConnector().connect() as conn:\n cursor = conn.cursor()\n cursor.executemany(query, data_list)\n conn.commit()\n", "repo_name": "kendricklee91/portfolio", "sub_path": "dbdk/ai_based_learner_knowledge_state_prediction_model/irt/irt-batch/model/irt/dataupdater.py", "file_name": "dataupdater.py", "file_ext": "py", "file_size_in_byte": 1933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sentry_sdk.capture_message", "line_number": 32, "usage_type": "call"}, {"api_name": "messages.MSG_NUM_OF_ITEM.format", "line_number": 33, "usage_type": "call"}, {"api_name": "messages.MSG_NUM_OF_ITEM", "line_number": 33, "usage_type": "attribute"}, {"api_name": "sentry_sdk.capture_message", "line_number": 60, "usage_type": "call"}, {"api_name": "messages.MSG_NUM_OF_STUDENT.format", "line_number": 61, "usage_type": "call"}, {"api_name": "messages.MSG_NUM_OF_STUDENT", "line_number": 61, "usage_type": "attribute"}, {"api_name": "db.query.get_query", "line_number": 82, "usage_type": "call"}, {"api_name": "db.connector.MssqlConnector", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "15490533072", "text": "\"\"\"\nEspecificar os tipos de objetos a serem criados\nusando uma instância-protótipo e criar novos objetos\npela cópia desse protótipo\n\"\"\"\n#import for correction anotations future in order creation \nfrom __future__ import annotations\n\nfrom typing import List\nfrom copy import deepcopy\n\nname1 = 'Luiz'\nname2 = name1 \n\nname1 = 'Outra Coisa'\nprint(name1)\nprint(name2)\n\nclass StringReprMixin:\n def __str__(self):\n params = ','.join([f'{k}={v}' for k,v in self.__dict__.items()])\n return f'{self.__class__.__name__}({params})'\n\n def __repr__(self):\n return self.__str__()\n\nclass Tv(StringReprMixin):\n def __init__(self,name: str,polegadas: int)->None:\n self.name = name\n self.polegadas = polegadas\n self.especifications: List[Specification] = []\n\n def add_especifications(self,specification:Specification)-> None:\n self.especifications.append(specification)\n\n def clone(self) -> Tv:\n return deepcopy(self)\n\nclass Specification(StringReprMixin):\n def __init__(self,modelo,resolucao):\n self.modelo = modelo\n self.resolucao = resolucao\n\nif __name__ == \"__main__\":\n tv_samsung = Tv('Tv Qled',60)\n specification_tu7000 = Specification('TU8000','4k')\n tv_samsung.add_especifications(specification_tu7000)\n\n tv_samsung_55 = tv_samsung.clone()\n tv_samsung_55.polegadas = 55\n\n print(tv_samsung)\n print(tv_samsung_55)\n\n\n", "repo_name": "Diego07101985/pytreino", "sub_path": "design_patterns/criacionais/prototype/prototype.py", "file_name": "prototype.py", "file_ext": "py", "file_size_in_byte": 1415, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "11626942451", "text": "from typing import Callable, Dict, Type, TypeVar, List\n\nfrom brt.runtime import log\n\nlogger = log.get_logger(__file__)\n\nT = TypeVar(\"T\")\n\n\nclass Registry:\n sub_cls_registries: Dict[T, Dict[str, T]] = {}\n func_registries: Dict[str, Callable] = {}\n cls_registries: Dict[str, List[T]] = {}\n\n @classmethod\n def register_sub_cls(cls, sub_cls_type: str, base_cls: T) -> Callable:\n def register_func(sub_cls) -> Type[T]:\n if base_cls not in cls.sub_cls_registries:\n cls.sub_cls_registries[base_cls] = {}\n\n if sub_cls_type in cls.sub_cls_registries[base_cls]:\n logger.warning(f\"{sub_cls_type} is already registered, overwrite it.\")\n\n if not issubclass(sub_cls, base_cls):\n raise ValueError(f\"{sub_cls} is not a subclass of {base_cls}.\")\n sub_cls.brt_abs_type = sub_cls_type\n cls.sub_cls_registries[base_cls][sub_cls_type] = sub_cls\n return sub_cls\n\n return register_func\n\n @classmethod\n def get_sub_cls(cls, sub_cls_type, base_cls) -> Type[T]:\n if base_cls not in cls.sub_cls_registries:\n raise ValueError(f\"{base_cls} is not registered.\")\n\n if sub_cls_type not in cls.sub_cls_registries[base_cls]:\n return None\n\n sub_cls = cls.sub_cls_registries[base_cls][sub_cls_type]\n\n return sub_cls\n\n @classmethod\n def sub_cls_exists_and_registered(cls, sub_cls, base_cls) -> bool:\n if base_cls not in cls.sub_cls_registries:\n raise ValueError(\n f\"No base class of {base_cls} exists in the registry, register the base class first.\"\n )\n if (\n issubclass(sub_cls, base_cls)\n and sub_cls in cls.sub_cls_registries[base_cls].values()\n ):\n return True\n\n return False\n\n @classmethod\n def register_cls(cls, cls_type: str) -> Callable:\n def register_func(registered_cls) -> Type[T]:\n if cls_type not in cls.cls_registries:\n cls.cls_registries[cls_type] = []\n\n if registered_cls in cls.cls_registries[cls_type]:\n logger.warning(f\"{cls_type} is already registered, overwrite it.\")\n\n cls.cls_registries[cls_type].append(registered_cls)\n\n return registered_cls\n\n return register_func\n\n @classmethod\n def register_cls_type(cls, cls_type: str):\n if cls_type not in cls.cls_registries:\n cls.cls_registries[cls_type] = []\n\n @classmethod\n def cls_exists_and_registered(cls, registered_cls, cls_type: str) -> bool:\n\n if cls_type not in cls.cls_registries:\n cls.register_cls_type(cls_type)\n\n if registered_cls in cls.cls_registries[cls_type]:\n return True\n\n return False\n", "repo_name": "Raphael-Hao/brainstorm", "sub_path": "python/brt/runtime/registry.py", "file_name": "registry.py", "file_ext": "py", "file_size_in_byte": 2800, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "50", "api": [{"api_name": "brt.runtime.log.get_logger", "line_number": 5, "usage_type": "call"}, {"api_name": "brt.runtime.log", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.TypeVar", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "2511436372", "text": "#!/usr/bin/env python\n\n############################################\n# Module to prepare the folder structure for processing with gamma\n############################################\n\nimport os\nimport re\nimport argparse\nfrom functions import *\nfrom datetime import date\n\n##################\n# parse option to work locally or on server\n##################\n\n# parse some arguments\nparser = argparse.ArgumentParser(description=\"Decide whether you want to print the folder structur or create it\")\n# get positional arguments\nparser.add_argument(\"-p\", \"--print\", dest=\"print\", help=\"only print cmd call\", action=\"store_const\", const=True)\nargs =parser.parse_args()\n\n\n# parse the arguments in a list\nargs = parser.parse_args()\n\n\ndef get_directories():\n pass\n\ndef make_folder_struc(data_dir = \"../data\",\n slc_dir = \"../data/SLC\",\n dem_dir = \"../data/DEM\",\n dir_tuples = \"../data/tuples\"):\n\n \"\"\"\n Function to create the Folder Structure\n\n :param data_dir: Directory with all the Zip-files\n :param slc_dir: Directory where all \"single-date\" data will be stored\n :param dem_dir: Directory where all DEM-related data will be stored\n :param tuples_dir: Directory where all \"dual-date\"-data will be stored\n :return:\n \"\"\"\n\n zips = [x for x in os.listdir(data_dir) if x.endswith(\".zip\")]\n # get all the dates\n dates = []\n for d in zips:\n # match the first date in the zip files\n m = re.match(\".*__1SDV_(\\d{4})(\\d{2})(\\d{2})\", d)\n da = m.groups()\n d_str = ''.join(da)\n dt = date(year=int(d_str[0:4]), month=int(d_str[4:6]), day=int(d_str[6:8]))\n dates.append(dt)\n\n\n date_combs = []\n for i in dates:\n for j in dates:\n if i != j:\n if abs((i-j).days) < 13:\n if i < j:\n\n main = \"\".join(str(i).split(\"-\"))\n second = \"\".join(str(j).split(\"-\"))\n tuple = main + \"_\" + second\n date_combs.append(tuple)\n\n # this modifies the actual element date_combs\n date_combs.sort()\n\n # # make a list for summer and one for winter ??\n # summer = []\n # winter = []\n # for d in dates:\n # if int(d[4:6]) in [12,1,2]:\n # winter.append(d)\n # else:\n # summer.append(d)\n #\n # # for every date in the summer\n # date_combs_summer = []\n # for d in summer:\n # # get the year\n # year = int(d[0:4])\n # for o in summer:\n # # for every other date check if the year is the same\n # if o != d:\n # year_o = int(o[0:4])\n # if year_o == year:\n # mas = min(int(d), int(o))\n # sl = max(int(d), int(o))\n # dir = str(mas) + \"_\" + str(sl)\n #\n # # will produce the double amount\n # if not dir in date_combs_summer:\n # date_combs_summer.append(dir)\n #\n # # print(\"Summer Combinations\")\n # sorted_date_combs_summer = sorted(date_combs_summer)\n # # print(sorted_date_combs_summer)\n #\n #\n # # for every date in the winter\n # date_combs_winter = []\n # for d in winter:\n # # get the year\n # year = int(d[0:4])\n # for o in winter:\n # # for every other date check if the year is the same\n # if o != d:\n # year_o = int(o[0:4])\n # if year_o == year:\n # mas = min(int(d), int(o))\n # sl = max(int(d), int(o))\n # dir = str(mas) + \"_\" + str(sl)\n #\n # # will produce the double amount\n # if not dir in date_combs_winter:\n # date_combs_winter.append(dir)\n #\n #\n # # print(\"Winter Combinations\")\n # sorted_date_combs_winter = sorted(date_combs_winter)\n # # print(sorted_date_combs_winter)\n #\n # # add them both to one list\n # comb_dates_all = sorted_date_combs_summer + sorted_date_combs_winter\n # print(comb_dates_all)\n # print()\n # print(\"There are {} pairs\".format(len(comb_dates_all)))\n # print()\n\n\n modes = [\"intensity\", \"fringe\"]\n\n # create subdirectory for all files that come in tuples\n tuple_dir = dir_tuples; os.makedirs(tuple_dir) if not os.path.isdir(tuple_dir) else print(\".....\")\n\n # create both if not existent\n for d in date_combs:\n for m in modes:\n dirs = os.path.join(tuple_dir,d, m)\n if args.print:\n print(\"-----------------------------\")\n print(\"Creating Tuple Directory: {}\".format(dirs),end=\"\")\n print(\" \" + TRED + \"<-- Already Exists\" + ENDC) if os.path.isdir(dirs) else print()\n print(\"-----------------------------\")\n else:\n print(\"-----------------------------\")\n print(\"Creating Tuple Directory: {}\".format(dirs))\n print(\" \" + TRED + \"<-- Already Exists\" + ENDC) if os.path.isdir(dirs) else print()\n print(\"-----------------------------\")\n os.makedirs(dirs) if not os.path.isdir(dirs) else print()\n\n ###########################\n # Create directories for DEM and SLCs\n ###########################\n aux_dirs = [dem_dir, slc_dir]\n for dir in aux_dirs:\n if args.print:\n print(\"==========\")\n print(\"Creating Directory \\n {}\".format(dir), end=\"\")\n print(\" \" + TRED + \"<-- Already exists\" + ENDC) if os.path.isdir(dir) else print()\n print(\"==========\")\n else:\n print(\"==========\")\n print(\"Creating Directory \\n {}\".format(dir))\n print(\" \" + TRED + \"<-- Already exists (not creating)\" + ENDC) if os.path.isdir(dir) else print()\n print(\"==========\")\n os.makedirs(dir) if not os.path.isdir(dir) else print()\n\ndef main():\n #data_dir, slc_dir, dem_dir, tuples_dir = get_directories()\n make_folder_struc()\n\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "RobinKohrs/gammaGlacierOffset", "sub_path": "gamma_scripts/00_makeFolderStructure.py", "file_name": "00_makeFolderStructure.py", "file_ext": "py", "file_size_in_byte": 6111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 46, "usage_type": "call"}, {"api_name": "re.match", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 139, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "13586169003", "text": "import dbus\nimport dbus.glib\n\n# core interface stuff\nimport gtk\nimport gtk.glade\n\n# timer\nimport gobject\n\n# file loading\nimport os\n\nglobal win_position # store the window position on the screen\n\nglobal playing\nplaying = False\n\nglobal shuffle # playlist will play randomly\nglobal repeat # repeat the playlist\nglobal loop # loop the current element\n\n# mpris doesn't support getting the status of these (at the moment)\nshuffle = False\nrepeat = False\nloop = False\n\n# these are defined on the mpris detected unique name\nglobal root # / org.freedesktop.MediaPlayer\nglobal player # /Player org.freedesktop.MediaPlayer\nglobal tracklist # /Tracklist org.freedesktop.MediaPlayer\n\nglobal bus # Connection to the session bus\nglobal identity # MediaPlayer Identity\n\n\n# If a Media Player connects to the bus, we'll use it\n# Note that we forget the previous Media Player we were connected to\ndef NameOwnerChanged(name, new, old):\n if old != \"\" and \"org.mpris.\" in name:\n Connect(name)\n\n# Callback for when \"TrackChange\" signal is emitted\ndef TrackChange(Track):\n # the only mandatory metadata is \"location\"\n try:\n a = Track[\"artist\"]\n except:\n a = \"\"\n try:\n t = Track[\"title\"]\n except:\n t = Track[\"location\"]\n try:\n length = Track[\"length\"]\n except:\n length = 0\n if length > 0:\n time_s.set_range(0,Track[\"length\"])\n time_s.set_sensitive(True)\n else:\n # disable the position scale if length isn't available\n time_s.set_sensitive(False)\n # update the labels\n l_artist.set_text(a)\n l_title.set_text(t)\n\n# Connects to the Media Player we detected\ndef Connect(name):\n global root, player, tracklist\n global playing, identity\n\n # first we connect to the objects\n root_o = bus.get_object(name, \"/\")\n player_o = bus.get_object(name, \"/Player\")\n tracklist_o = bus.get_object(name, \"/TrackList\")\n\n # there is only 1 interface per object\n root = dbus.Interface(root_o, \"org.freedesktop.MediaPlayer\")\n tracklist = dbus.Interface(tracklist_o, \"org.freedesktop.MediaPlayer\")\n player = dbus.Interface(player_o, \"org.freedesktop.MediaPlayer\")\n\n # connect to the TrackChange signal\n player_o.connect_to_signal(\"TrackChange\", TrackChange, dbus_interface=\"org.freedesktop.MediaPlayer\")\n\n # determine if the Media Player is playing something\n if player.GetStatus() == 0:\n playing = True\n TrackChange(player.GetMetadata())\n\n # gets its identity (name and version)\n identity = root.Identity()\n window.set_title(identity)\n\n#plays an element\ndef AddTrack(widget):\n mrl = e_mrl.get_text()\n if mrl != None and mrl != \"\":\n tracklist.AddTrack(mrl, True)\n e_mrl.set_text('')\n else:\n mrl = bt_file.get_filename()\n if mrl != None and mrl != \"\":\n tracklist.AddTrack(\"directory://\" + mrl, True)\n update(0)\n\n# basic control\n\ndef Next(widget):\n player.Next(reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n update(0)\n\ndef Prev(widget):\n player.Prev(reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n update(0)\n\ndef Stop(widget):\n player.Stop(reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n update(0)\n\ndef Quit(widget):\n root.Quit(reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n l_title.set_text(\"\")\n\ndef Pause(widget):\n player.Pause()\n status = player.GetStatus()\n if status == 0:\n img_bt_toggle.set_from_stock(gtk.STOCK_MEDIA_PAUSE, gtk.ICON_SIZE_SMALL_TOOLBAR)\n else:\n img_bt_toggle.set_from_stock(gtk.STOCK_MEDIA_PLAY, gtk.ICON_SIZE_SMALL_TOOLBAR)\n update(0)\n\ndef Repeat(widget):\n global repeat\n repeat = not repeat\n player.Repeat(repeat)\n\ndef Shuffle(widget):\n global shuffle\n shuffle = not shuffle\n tracklist.SetRandom(shuffle)\n\ndef Loop(widget):\n global loop\n loop = not loop\n tracklist.SetLoop(loop)\n\n# update status display\ndef update(widget):\n Track = player.GetMetadata()\n vol.set_value(player.VolumeGet())\n try: \n a = Track[\"artist\"]\n except:\n a = \"\"\n try:\n t = Track[\"title\"]\n except: \n t = \"\"\n if t == \"\":\n try:\n t = Track[\"location\"]\n except:\n t = \"\"\n l_artist.set_text(a)\n l_title.set_text(t)\n try:\n length = Track[\"length\"]\n except:\n length = 0\n if length > 0:\n time_s.set_range(0,Track[\"length\"])\n time_s.set_sensitive(True)\n else:\n # disable the position scale if length isn't available\n time_s.set_sensitive(False)\n GetPlayStatus(0)\n\n# callback for volume change\ndef volchange(widget):\n player.VolumeSet(vol.get_value_as_int(), reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n\n# callback for position change\ndef timechange(widget, x=None, y=None):\n player.PositionSet(int(time_s.get_value()), reply_handler=(lambda *args: None), error_handler=(lambda *args: None))\n\n# refresh position change\ndef timeset():\n global playing\n if playing == True:\n try:\n time_s.set_value(player.PositionGet())\n except:\n playing = False\n return True\n\n# toggle simple/full display\ndef expander(widget):\n if exp.get_expanded() == False:\n exp.set_label(\"Less\")\n else:\n exp.set_label(\"More\")\n\n# close event : hide in the systray\ndef delete_event(self, widget):\n self.hide()\n return True\n\n# shouldn't happen\ndef destroy(widget):\n gtk.main_quit()\n\n# hide the controller when 'Esc' is pressed\ndef key_release(widget, event):\n if event.keyval == gtk.keysyms.Escape:\n global win_position\n win_position = window.get_position()\n widget.hide()\n\n# callback for click on the tray icon\ndef tray_button(widget):\n global win_position\n if window.get_property('visible'):\n # store position\n win_position = window.get_position()\n window.hide()\n else:\n # restore position\n window.move(win_position[0], win_position[1])\n window.show()\n\n# hack: update position, volume, and metadata\ndef icon_clicked(widget, event):\n update(0)\n\n# get playing status, modify the Play/Pause button accordingly\ndef GetPlayStatus(widget):\n global playing\n global shuffle\n global loop\n global repeat\n status = player.GetStatus()\n\n playing = status[0] == 0\n if playing:\n img_bt_toggle.set_from_stock(\"gtk-media-pause\", gtk.ICON_SIZE_SMALL_TOOLBAR)\n else:\n img_bt_toggle.set_from_stock(\"gtk-media-play\", gtk.ICON_SIZE_SMALL_TOOLBAR)\n shuffle = status[1] == 1\n bt_shuffle.set_active( shuffle )\n loop = status[2] == 1\n bt_loop.set_active( loop )\n repeat = status[3] == 1\n bt_repeat.set_active( repeat )\n# loads glade file from the directory where the script is,\n# so we can use /path/to/mpris.py to execute it.\nimport sys\nxml = gtk.glade.XML(os.path.join(os.path.dirname(sys.argv[0]) , 'mpris.glade'))\n\n# ui setup\nbt_close = xml.get_widget('close')\nbt_quit = xml.get_widget('quit')\nbt_file = xml.get_widget('ChooseFile')\nbt_next = xml.get_widget('next')\nbt_prev = xml.get_widget('prev')\nbt_stop = xml.get_widget('stop')\nbt_toggle = xml.get_widget('toggle')\nbt_mrl = xml.get_widget('AddMRL')\nbt_shuffle = xml.get_widget('shuffle')\nbt_repeat = xml.get_widget('repeat')\nbt_loop = xml.get_widget('loop')\nl_artist = xml.get_widget('l_artist')\nl_title = xml.get_widget('l_title')\ne_mrl = xml.get_widget('mrl')\nwindow = xml.get_widget('window1')\nimg_bt_toggle=xml.get_widget('image6')\nexp = xml.get_widget('expander2')\nexpvbox = xml.get_widget('expandvbox')\naudioicon = xml.get_widget('eventicon')\nvol = xml.get_widget('vol')\ntime_s = xml.get_widget('time_s')\ntime_l = xml.get_widget('time_l')\n\n# connect to the different callbacks\n\nwindow.connect('delete_event', delete_event)\nwindow.connect('destroy', destroy)\nwindow.connect('key_release_event', key_release)\n\ntray = gtk.status_icon_new_from_icon_name(\"audio-x-generic\")\ntray.connect('activate', tray_button)\n\nbt_close.connect('clicked', destroy)\nbt_quit.connect('clicked', Quit)\nbt_mrl.connect('clicked', AddTrack)\nbt_toggle.connect('clicked', Pause)\nbt_next.connect('clicked', Next)\nbt_prev.connect('clicked', Prev)\nbt_stop.connect('clicked', Stop)\nbt_loop.connect('clicked', Loop)\nbt_repeat.connect('clicked', Repeat)\nbt_shuffle.connect('clicked', Shuffle)\nexp.connect('activate', expander)\nvol.connect('changed', volchange)\ntime_s.connect('adjust-bounds', timechange)\naudioicon.set_events(gtk.gdk.BUTTON_PRESS_MASK) # hack for the bottom right icon\naudioicon.connect('button_press_event', icon_clicked) \ntime_s.set_update_policy(gtk.UPDATE_DISCONTINUOUS)\n\nlibrary = \"/media/mp3\" # editme\n\n# set the Directory chooser to a default location\ntry:\n os.chdir(library)\n bt_file.set_current_folder(library)\nexcept:\n bt_file.set_current_folder(os.path.expanduser(\"~\"))\n\n# connect to the bus\nbus = dbus.SessionBus()\ndbus_names = bus.get_object( \"org.freedesktop.DBus\", \"/org/freedesktop/DBus\" )\ndbus_names.connect_to_signal(\"NameOwnerChanged\", NameOwnerChanged, dbus_interface=\"org.freedesktop.DBus\") # to detect new Media Players\n\ndbus_o = bus.get_object(\"org.freedesktop.DBus\", \"/\")\ndbus_intf = dbus.Interface(dbus_o, \"org.freedesktop.DBus\")\nname_list = dbus_intf.ListNames()\n\n# connect to the first Media Player found\nfor n in name_list:\n if \"org.mpris.\" in n:\n Connect(n)\n window.set_title(identity)\n vol.set_value(player.VolumeGet())\n update(0)\n break\n\n# run a timer to update position\ngobject.timeout_add( 1000, timeset)\n\nwindow.set_icon_name('audio-x-generic')\nwindow.show()\n\nicon_theme = gtk.icon_theme_get_default()\ntry:\n pix = icon_theme.load_icon(\"audio-x-generic\",24,0)\n window.set_icon(pix)\nexcept:\n True\n\nwin_position = window.get_position()\n\ngtk.main() # execute the main loop\n", "repo_name": "shaobin0604/faplayer", "sub_path": "jni/vlc/extras/misc/mpris.py", "file_name": "mpris.py", "file_ext": "py", "file_size_in_byte": 10092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dbus.Interface", "line_number": 79, "usage_type": "call"}, {"api_name": "dbus.Interface", "line_number": 80, "usage_type": "call"}, {"api_name": "dbus.Interface", "line_number": 81, "usage_type": "call"}, {"api_name": "gtk.STOCK_MEDIA_PAUSE", "line_number": 129, "usage_type": "attribute"}, {"api_name": "gtk.ICON_SIZE_SMALL_TOOLBAR", "line_number": 129, "usage_type": "attribute"}, {"api_name": "gtk.STOCK_MEDIA_PLAY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "gtk.ICON_SIZE_SMALL_TOOLBAR", "line_number": 131, "usage_type": "attribute"}, {"api_name": "gtk.main_quit", "line_number": 212, "usage_type": "call"}, {"api_name": "gtk.keysyms", "line_number": 216, "usage_type": "attribute"}, {"api_name": "gtk.ICON_SIZE_SMALL_TOOLBAR", "line_number": 247, "usage_type": "attribute"}, {"api_name": "gtk.ICON_SIZE_SMALL_TOOLBAR", "line_number": 249, "usage_type": "attribute"}, {"api_name": "gtk.glade.XML", "line_number": 259, "usage_type": "call"}, {"api_name": "gtk.glade", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 259, "usage_type": "call"}, {"api_name": "os.path", "line_number": 259, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 259, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 259, "usage_type": "attribute"}, {"api_name": "gtk.status_icon_new_from_icon_name", "line_number": 291, "usage_type": "call"}, {"api_name": "gtk.gdk", "line_number": 307, "usage_type": "attribute"}, {"api_name": "gtk.UPDATE_DISCONTINUOUS", "line_number": 309, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 315, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "dbus.SessionBus", "line_number": 321, "usage_type": "call"}, {"api_name": "dbus.Interface", "line_number": 326, "usage_type": "call"}, {"api_name": "gobject.timeout_add", "line_number": 339, "usage_type": "call"}, {"api_name": "gtk.icon_theme_get_default", "line_number": 344, "usage_type": "call"}, {"api_name": "gtk.main", "line_number": 353, "usage_type": "call"}]} +{"seq_id": "13130127905", "text": "import torch\nimport torch.nn as nn\nfrom model.net.base import Base\nfrom model.net.convbnrelu import ConvBNRelu\nfrom model.net.gatedresidualblock import GatedResidualBlock\nclass SoftGatedSkipConnection(nn.Module):\n def __init__(self,stack,feature,num_ch,num_key):\n super(SoftGatedSkipConnection, self).__init__()\n self.out_ch= 2 * num_key + 1\n self.stack=stack\n self.prelayer=nn.Sequential(ConvBNRelu(num_ch,feature,7,2,3),nn.MaxPool2d(2),GatedResidualBlock(feature))\n self.bases=nn.ModuleList([Base(depth=4, feature=feature) for i in range(stack)])\n self.out=nn.ModuleList([nn.Conv2d(feature,num_key*2+1,1) for _ in range(stack)])\n def forward(self,x):\n x=self.prelayer(x)\n ret=[]\n for s in range(self.stack):\n x=self.bases[s](x)\n out=self.out[s](x)\n center,kps=torch.split(out, [1, self.out_ch - 1], 1)\n ret+=[(torch.sigmoid(center),torch.tanh(kps))]\n return ret\n\nif __name__=='__main__':\n from torchviz import make_dot\n model=SoftGatedSkipConnection(stack=4,feature=256,num_ch=3,num_key=12)\n optim=torch.optim.Adam(model.parameters())\n input=torch.randn(2,3,128,128)\n output=model(input)\n dot=make_dot(output[0][0],params=dict(model.named_parameters()))\n dot.format='png'\n dot.render('graph_image')\n [print(f'{input.shape} -> {output[i][0].shape},{output[i][1].shape}') for i in range(len(output))]\n loss=torch.stack([o[0] for o in output]).mean()\n optim.step()\n optim.zero_grad()", "repo_name": "sikidaten/single_stage_pose_estimation", "sub_path": "model/softgatedskipconnection.py", "file_name": "softgatedskipconnection.py", "file_ext": "py", "file_size_in_byte": 1538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "model.net.convbnrelu.ConvBNRelu", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 11, "usage_type": "call"}, {"api_name": "model.net.gatedresidualblock.GatedResidualBlock", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "model.net.base.Base", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.tanh", "line_number": 21, "usage_type": "call"}, {"api_name": "model.net.base", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 27, "usage_type": "attribute"}, {"api_name": "model.net.base.parameters", "line_number": 27, "usage_type": "call"}, {"api_name": "model.net.base", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 28, "usage_type": "call"}, {"api_name": "model.net.base", "line_number": 29, "usage_type": "call"}, {"api_name": "torchviz.make_dot", "line_number": 30, "usage_type": "call"}, {"api_name": "model.net.base.named_parameters", "line_number": 30, "usage_type": "call"}, {"api_name": "model.net.base", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "43578630642", "text": "import csv\nimport datetime\nimport random\n\nfrom typing import Any, Iterable\n\ncountries = ['RU', 'US', 'UK', 'ES']\ntotal_uids = 1000\ndays = 90\ntotal_days = datetime.timedelta(days=days)\nnow = datetime.datetime.today()\nfrom_date = now - total_days - datetime.timedelta(minutes=now.minute, hours=now.hour, seconds=now.second, microseconds=now.microsecond)\n\ndef gen_regs():\n yield [\"user_id\",\"email\",\"country\",\"ref\",\"created_at\"]\n reg_date = from_date\n total = 0\n print(\"Generating registrations...\")\n for uid in range(1, total_uids):\n ref = \"\"\n if uid%30 == 0:\n ref = \"http://bro.bro?bro=%d\" % (uid-15)\n if uid%200 == 0:\n reg_date += datetime.timedelta(days=1)\n total +=1\n yield [uid, 'foo%d@bar.com' % uid, countries[uid%len(countries)], ref, reg_date + datetime.timedelta(seconds=random.randint(0, total_days.total_seconds()))]\n print(\"Generated %d registrations events\" % total)\n\ndef gen_logins():\n yield [\"user_id\",\"created_at\"]\n total = 0\n print(\"Generating logins...\")\n for dt in [from_date - datetime.timedelta(days=x) for x in range(days)]:\n for _ in range(50):\n total +=1\n yield [random.randint(1, total_uids), dt + datetime.timedelta(seconds=random.randint(0, total_days.total_seconds()))]\n print(\"Generated %d logins events\" % total)\n\ndef gen_bro():\n yield [\"from_bro\",\"to_bro\",\"created_at\"]\n total = 0\n print(\"Generating bros...\")\n for dt in [from_date - datetime.timedelta(days=x) for x in range(days)]:\n for _ in range(250):\n total +=1\n yield [random.randint(1, total_uids), random.randint(1, total_uids), dt + datetime.timedelta(seconds=random.randint(0, total_days.total_seconds()))]\n print(\"Generated %d bros events\" % total)\n\ndef write_csv(filename: str, rows: Iterable[Iterable[Any]]):\n with open(filename, 'w+', newline='') as f:\n w = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n w.writerows(rows)\n\n\nif __name__ == \"__main__\":\n write_csv(\"./data/events_registration.csv\", gen_regs())\n write_csv(\"./data/events_login.csv\", gen_logins())\n write_csv(\"./data/events_bro.csv\", gen_bro())\n", "repo_name": "chapsuk/dbt-vertica-example", "sub_path": "scripts/generate_datasets.py", "file_name": "generate_datasets.py", "file_ext": "py", "file_size_in_byte": 2215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datetime.timedelta", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 26, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 43, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Iterable", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 49, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 51, "usage_type": "call"}, {"api_name": "csv.QUOTE_MINIMAL", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "37589385993", "text": "#!/usr/bin/env python\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef label_histograms(Y, use_log_scale=False):\n\tH = plt.hist(Y, bins=20)\n\tplt.ylabel('Probability')\n\tplt.xlabel('value')\n\tplt.title('Label Histogram')\n\tif use_log_scale: plt.xscale('log')\n\n\tplt.savefig('label_histo.png')\n\tplt.show()\n\tplt.clf()\n\n\ndata_name = '../data/car'\nY = np.loadtxt(data_name + '_label.csv', delimiter=',', dtype=np.int32)\t\t\t\nlabel_histograms(Y)\n", "repo_name": "endsley/sklearn_example", "sub_path": "DataStats/label_functions.py", "file_name": "label_functions.py", "file_ext": "py", "file_size_in_byte": 441, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.hist", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xscale", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "30362846253", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport os\nimport numpy as np\nimport torch\n\n\n# In[5]:\n\n\nfrom torch.utils.data import DataLoader\n\nfrom utils.functions import load_model\nfrom vrp.problem_vrp import CVRPTW\n\n\n# In[1]:\n\n\n# get_ipython().run_line_magic('matplotlib', 'inline')\nfrom matplotlib import pyplot as plt\n\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.lines import Line2D\n\n# Code inspired by Google OR Tools plot:\n# https://github.com/google/or-tools/blob/fb12c5ded7423d524fc6c95656a9bdc290a81d4d/examples/python/cvrptw_plot.py\n\ndef discrete_cmap(N, base_cmap=None):\n \"\"\"\n Create an N-bin discrete colormap from the specified input map\n \"\"\"\n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n\n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\ndef plot_vehicle_routes(data, route, t, ax1, markersize=5, visualize_demands=False, demand_scale=1, round_demand=False):\n \"\"\"\n Plot the vehicle routes on matplotlib axis ax1.\n \"\"\"\n print(route)\n print(data['time_window'])\n print(t)\n # route is one sequence, separating different routes with 0 (depot)\n routes = [r[r!=0] for r in np.split(route.cpu().numpy(), np.where(route==0)[0]) if (r != 0).any()]\n depot = data['depot'].cpu().numpy()\n locs = data['loc'].cpu().numpy()\n demands = data['demand'].cpu().numpy() * demand_scale\n capacity = demand_scale # Capacity is always 1\n \n x_dep, y_dep = depot\n ax1.plot(x_dep, y_dep, 'sk', markersize=markersize*4)\n# ax1.set_xlim(10, 60)\n# ax1.set_ylim(40, 90)\n ax1.set_xlim(0, 1)\n ax1.set_ylim(0, 1)\n \n legend = ax1.legend(loc='upper center')\n \n cmap = discrete_cmap(len(routes) + 2, 'nipy_spectral')\n dem_rects = []\n used_rects = []\n cap_rects = []\n qvs = []\n total_dist = 0\n for veh_number, r in enumerate(routes):\n color = cmap(len(routes) - veh_number) # Invert to have in rainbow order\n \n route_demands = demands[r - 1]\n coords = locs[r - 1, :]\n xs, ys = coords.transpose()\n\n total_route_demand = sum(route_demands)\n print(total_route_demand,capacity)\n assert total_route_demand <= capacity+1\n if not visualize_demands:\n ax1.plot(xs, ys, 'o', mfc=color, markersize=markersize, markeredgewidth=0.0)\n \n dist = 0\n x_prev, y_prev = x_dep, y_dep\n cum_demand = 0\n for (x, y), d in zip(coords, route_demands):\n dist += np.sqrt((x - x_prev) ** 2 + (y - y_prev) ** 2)\n \n cap_rects.append(Rectangle((x, y), 0.01, 0.1))\n used_rects.append(Rectangle((x, y), 0.01, 0.1 * total_route_demand / capacity))\n dem_rects.append(Rectangle((x, y + 0.1 * cum_demand / capacity), 0.01, 0.1 * d / capacity))\n \n x_prev, y_prev = x, y\n cum_demand += d\n \n dist += np.sqrt((x_dep - x_prev) ** 2 + (y_dep - y_prev) ** 2)\n total_dist += dist\n qv = ax1.quiver(\n xs[:-1],\n ys[:-1],\n xs[1:] - xs[:-1],\n ys[1:] - ys[:-1],\n scale_units='xy',\n angles='xy',\n scale=1,\n color=color,\n label='R{}, # {}, c {} / {}, d {:.2f}'.format(\n veh_number, \n len(r), \n int(total_route_demand) if round_demand else total_route_demand, \n int(capacity) if round_demand else capacity,\n dist\n )\n )\n \n qvs.append(qv)\n \n ax1.set_title('{} routes, total distance {:.2f}'.format(len(routes), total_dist))\n ax1.legend(handles=qvs)\n \n pc_cap = PatchCollection(cap_rects, facecolor='whitesmoke', alpha=1.0, edgecolor='lightgray')\n pc_used = PatchCollection(used_rects, facecolor='lightgray', alpha=1.0, edgecolor='lightgray')\n pc_dem = PatchCollection(dem_rects, facecolor='black', alpha=1.0, edgecolor='black')\n \n if visualize_demands:\n ax1.add_collection(pc_cap)\n ax1.add_collection(pc_used)\n ax1.add_collection(pc_dem)\n\n\n# In[10]:\n\nif __name__ == \"__main__\":\n model, _ = load_model('outputs\\\\25\\\\cvrptw25_rollout_20211115T021240\\\\epoch-104.pt')\n torch.manual_seed(4321)\n # datapath = 'data/cvrptw/cvrptw25_solomon.pkl'\n # dataset = CVRPTW.make_dataset(filename=datapath, size=25, num_samples=5)\n dataset = CVRPTW.make_dataset(size=25, num_samples=1)\n # Need a dataloader to batch instances\n dataloader = DataLoader(dataset, batch_size=1)\n\n # Make var works for dicts\n batch = next(iter(dataloader))\n\n # Run the model\n model.eval()\n model.set_decode_type('greedy')\n with torch.no_grad():\n length, log_p, pi, time = model(batch, return_pi=True)\n tours = pi\n times = time\n for i in dataset:\n print(np.array(i['loc']))\n print(np.array(i['demand']))\n print(np.array(tours))\n print(np.array(times))\n\n\n\n\n\n", "repo_name": "bepolygonum/AttentionVRP", "sub_path": "plot_cvrptw_25.py", "file_name": "plot_cvrptw_25.py", "file_ext": "py", "file_size_in_byte": 5202, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.patches.Rectangle", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.collections.PatchCollection", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.collections.PatchCollection", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.collections.PatchCollection", "line_number": 129, "usage_type": "call"}, {"api_name": "utils.functions.load_model", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 141, "usage_type": "call"}, {"api_name": "vrp.problem_vrp.CVRPTW.make_dataset", "line_number": 144, "usage_type": "call"}, {"api_name": "vrp.problem_vrp.CVRPTW", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "11234780801", "text": "\"\"\"circinus URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.Home.as_view(), name='index'),\n # product\n path('product/create/', views.ProductCreate.as_view(), name='product-create'),\n path('product/list/', views.ProductList.as_view(), name='product-list'),\n path('product/detail//', views.ProductDetail.as_view(), name='product-detail'),\n path('product/update//', views.ProductUpdate.as_view(), name='product-update'),\n path('product/delete//', views.ProductDelete.as_view(), name='product-delete'),\n # provider\n path('provider/create/', views.ProviderCreate.as_view(), name='provider-create'),\n path('provider/list/', views.ProviderList.as_view(), name='provider-list'),\n path('provider/detail//', views.ProviderDetail.as_view(), name='provider-detail'),\n path('provider/update//', views.ProviderUpdate.as_view(), name='provider-update'),\n path('provider/delete//', views.ProviderDelete.as_view(), name='provider-delete'),\n # entry\n path('entry/create/', views.EntryCreate.as_view(), name='entry-create'),\n path('entry/list/', views.EntryList.as_view(), name='entry-list'),\n path('entry/detail//', views.EntryDetail.as_view(), name='entry-detail'),\n # path('entry/update//', views.EntryUpdate.as_view(), name='entry-update'),\n path('entry/delete//', views.EntryDelete.as_view(), name='entry-delete'),\n\n path('entry/csv//', views.some_view, name='entry-csv')\n]\n", "repo_name": "dementedshaman/cepheus", "sub_path": "warehouse/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2147, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 39, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "30422241455", "text": "from kmeans import assign_data_to_closest_centroid\nfrom utils import load_centroids, read_data\n\n\ndef update_assignment(data, labels, centroids):\n \"\"\"Assign all data points to the closest centroids and keep track of their\n labels. The i-th point in \"data\" corresponds to the i-th label in \"label\".\n\n Arguments:\n data: a list of lists representing all data points\n labels: a list of ints representing all data labels\n centroids: the centroid dictionary\n\n Returns: a new dictionary whose keys are the centroids' key names and\n values are a list of labels of the data points that are assigned\n to that centroid.\n \"\"\"\n point_assignment = {}\n for index in range(len(data)):\n closest_centroid = \\\n assign_data_to_closest_centroid(data[index], centroids)\n if closest_centroid not in point_assignment.keys():\n point_assignment[closest_centroid] = []\n point_assignment[closest_centroid].append(labels[index])\n return point_assignment\n\n\ndef majority_count(labels):\n \"\"\"Return the count of the majority labels in the label list\n\n Arguments:\n labels: a list of labels\n\n Returns: the count of the majority labels in the list\n \"\"\"\n counter = 0\n for num in labels:\n frequency = labels.count(num)\n if frequency > counter:\n counter = frequency\n return counter\n\n\ndef accuracy(data, labels, centroids):\n \"\"\"Calculate the accuracy of the algorithm. You should use\n update_assignment and majority_count (that you previously implemented)\n\n Arguments:\n data: a list of lists representing all data points\n labels: a list of ints representing all data labels\n centroids: the centroid dictionary\n\n Returns: a float representing the accuracy of the algorithm\n \"\"\"\n label_dict = update_assignment(data, labels, centroids)\n sum = 0\n for label_list in label_dict.values():\n sum += majority_count(label_list)\n accuracy = sum / len(labels)\n return accuracy\n\n\nif __name__ == '__main__':\n centroids = load_centroids(\"mnist_final_centroids.csv\")\n # print(len(centroids))\n data, label = read_data(\"data/mnist.csv\")\n print(accuracy(data, label, centroids))\n", "repo_name": "micozmei/Python-Data-Analysis", "sub_path": "analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 2258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "kmeans.assign_data_to_closest_centroid", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.load_centroids", "line_number": 64, "usage_type": "call"}, {"api_name": "utils.read_data", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "10238679295", "text": "from utils.aesutils import *\nfrom utils.aes import AES\nfrom utils.binutils import FileHelper\nfrom vulnexamples.aesvulns import SimpleInputECB, ExampleECBCBCOracle, ECBOrCBC, CBCBitFlip\n\n\nclass TestAES:\n def test_encrypt(self):\n assert True\n\n def test_decrypt(self):\n with open('../challenge_files/challenge7_decrypted.txt', 'rb') as f:\n plaintext = f.read()\n ciphertext = FileHelper.readb64filetohex('../challenge_files/challenge7.txt')\n cipher = AES(b'YELLOW SUBMARINE', 'ECB')\n assert cipher.decrypt(ciphertext) == plaintext\n\n def test_cbcdecrypt(self):\n with open('../challenge_files/challenge10_decrypted.txt', 'rb') as f:\n plaintext = f.read()\n ciphertext = FileHelper.readb64filetohex('../challenge_files/challenge10.txt')\n cipher = AES(b'YELLOW SUBMARINE', 'CBC')\n assert cipher.decrypt('00' * 16 + ciphertext) == plaintext\n\n\nclass Test_ECBOrCBC:\n def test_test(self):\n oracle = ExampleECBCBCOracle()\n isECBORCBC = ECBOrCBC(oracle.oracle)\n assert isECBORCBC.test() == oracle.isECB\n\n\nclass TestByteAtATimeDecryptECB:\n\n # def __init__(self):\n # ecboracle = SimpleInputECB(FileHelper.readb64filetobytes('../challenge_files/challenge12.txt'))\n # self.baatecb = ByteAtATimeDecryptionECB(ecboracle.oracle)\n def test_detect_blocklength(self):\n ecboracle = SimpleInputECB(FileHelper.readb64filetobytes('../challenge_files/challenge12.txt'))\n baatecb = ByteAtATimeDecryptionECB(ecboracle.oracle)\n assert baatecb.detect_blocklength() == 16\n\n def test_detect_ecb(self):\n ecboracle = SimpleInputECB(FileHelper.readb64filetobytes('../challenge_files/challenge12.txt'))\n baatecb = ByteAtATimeDecryptionECB(ecboracle.oracle)\n baatecb.detect_blocklength()\n assert baatecb.detect_ECB()\n\n def test_crack(self):\n assert True\n\n\nclass TestCBCbitflipblocks:\n def test_cbcbitflipblocks(self):\n cbcbitflip = CBCBitFlip()\n ciphertext = cbcbitflip.create_ciphertext('A'*16)\n newctext = CBCbitflipblocks(ciphertext,3,b'A'*16,b'A;admin=true;A=A')\n assert cbcbitflip.parse_ciphertext(newctext)\n", "repo_name": "nullpsifer/cryptopals_python", "sub_path": "tests/test_aesutils.py", "file_name": "test_aesutils.py", "file_ext": "py", "file_size_in_byte": 2212, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "utils.binutils.FileHelper.readb64filetohex", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.aes.AES", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper.readb64filetohex", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.aes.AES", "line_number": 22, "usage_type": "call"}, {"api_name": "vulnexamples.aesvulns.ExampleECBCBCOracle", "line_number": 28, "usage_type": "call"}, {"api_name": "vulnexamples.aesvulns.ECBOrCBC", "line_number": 29, "usage_type": "call"}, {"api_name": "vulnexamples.aesvulns.SimpleInputECB", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper.readb64filetobytes", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper", "line_number": 39, "usage_type": "name"}, {"api_name": "vulnexamples.aesvulns.SimpleInputECB", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper.readb64filetobytes", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.binutils.FileHelper", "line_number": 44, "usage_type": "name"}, {"api_name": "vulnexamples.aesvulns.CBCBitFlip", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "3495704600", "text": "import pyglet\nimport mathlib\nfrom mathlib import Vector\nimport physicalobject, resources\nimport math, time, inspect, sys\n\ngroup2 = pyglet.graphics.OrderedGroup(2)\n\nclass Component(object):\n\tcategory = \"\" \t\t#Broad, used by shop tab, like \"weapon\", \"engine\", \"sensor\"\n\ttype = \"\" \t\t\t#Narrow, describes how the component works, like \"lasercannon\", \"laserturret\", \"ionthruster\"\n\tsubType = \"\" \t\t#Unique to a particular combination of attributes, like \"lasercannonsmall\", \"gravgun100\"\n\tname = \"\"\t\t\t#Player viewable pretty name, like \"Pulse Laser Turret - 10W\", \"Singularity Launcher\"\n\timg = \"items/base.png\" #Used by the shop\n\tlicenseCost = 0\t\t#In credits\n\tgoodsCost = {}\t\t#Keys are materials, values are # of tons of that material required for crafting\n\tdef __init__(self, ship=None):\n\t\tself.ship = ship\n\t\t\n\tdef addToShip(self, ship):\t\t#add a component to a ship\n\t\tself.ship = ship\n\t\tslot = ship.slots[self.category]\n\t\tfor i in range(len(slot)):\n\t\t\tif slot[i] == 0:\n\t\t\t\tslot[i] = self\n\t\t\t\tbreak\n\nclass Gun(Component):\n\tcategory = \"mainGuns\"\n\t\n\tdelay = 0.25\n\t\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Gun, self).__init__(*args, **kwargs)\n\t\tself.window = pyglet.window.get_platform().get_default_display().get_windows()[0]\t\n\t\tself.shootTime = time.time()\n\nclass Cannon(Gun):\n\ttype = \"cannon\"\n\tsubType = \"laserGun1\"\n\tname = \"Pulse Laser Cannon - 10W\"\n\timg = \"items/laserGun1.png\"\n\tlicenseCost = 3000\n\tgoodsCost = {\"steel\": 5, \"lithium\": 5}\n\t\n\tdef fire(self):\t\n\t\tif time.time() > self.shootTime:\t\t\n\t\t\tbulletImg = resources.loadImage(\"bullet.png\", center=True)\n\t\t\tbullet = physicalobject.Bullet(ship=self.ship, x=self.ship.x, y=self.ship.y, img=bulletImg, batch=self.window.currentSystem.batch, group=group2)\n\t\t\tbullet.rotation = self.ship.rotation\n\t\t\tangleRadians = -math.radians(self.ship.rotation)\n\t\t\tbullet.vel.x = (self.ship.vel.x + math.cos(angleRadians) * bullet.maxSpeed)\n\t\t\tbullet.vel.y = (self.ship.vel.y + math.sin(angleRadians) * bullet.maxSpeed)\t\n\t\t\tself.window.currentSystem.tempObjs.append(bullet)\n\t\t\tself.shootTime = time.time() + self.delay\n\t\t\nclass Turret(Gun):\n\ttype = \"turret\"\n\tsubType = \"laserTurret1\"\n\tname = \"Pulse Laser Turret - 10W\"\n\timg = \"items/laserGun1.png\"\t\n\tlicenseCost = 5000\n\tgoodsCost = {\"steel\": 7, \"lithium\": 5}\n\t\n\tdef fire(self):\t\n\t\tif time.time() > self.shootTime:\t\t\n\t\t\t#direction = tar.normalized()\n\t\t\t#direction.x *= 100\n\t\t\t#direction.y *= 100\n\t\t\t#target = Vector(direction.x - self.ship.x, direction.y - self.ship.y)\t\n\t\t\tbulletImg = resources.loadImage(\"bullet.png\", center=True)\n\t\t\tbullet = physicalobject.Bullet(ship=self.ship, x=self.ship.x, y=self.ship.y, img=bulletImg, batch=self.window.currentSystem.batch, group=group2)\n\t\t\tbullet.vel.x = ((self.ship.vel.x/2) + tar.x - self.ship.x) * bullet.turretSpeed\n\t\t\tbullet.vel.y = ((self.ship.vel.y/2) + tar.y - self.ship.y) * bullet.turretSpeed\n\t\t\tself.window.currentSystem.tempObjs.append(bullet)\n\t\t\tself.shootTime = time.time() + self.delay\t\n\t\t\t\t\nclass GravGun(Gun):\n\tcategory = \"secondaryGuns\"\n\ttype = \"gravgun\"\n\tsubType = \"gravgun\"\n\timg = \"items/gravGun.png\"\n\tname = \"Singularity Launcher\"\n\tlicenseCost = 7000 #TODO: This should be balanced to be way more expensive\n\tgoodsCost = {\"steel\": 1, \"lithium\": 1, \"medicine\": 1}\n\t\n\tdelay = 2\n\t\n\tdef fire(self):\t\n\t\tif time.time() > self.shootTime:\t\t\n\t\t\tbulletImg = resources.loadImage(\"bullet.png\", center=True)\n\t\t\tbullet = physicalobject.GravBullet(ship=self.ship, x=self.ship.x, y=self.ship.y, img=bulletImg, batch=self.window.mainBatch, group=group2, deathTime=0.5)\n\t\t\tbullet.rotation = self.ship.rotation\n\t\t\tangleRadians = -math.radians(self.ship.rotation)\n\t\t\tbullet.vel.x = (self.ship.vel.x + math.cos(angleRadians) * bullet.maxSpeed)\n\t\t\tbullet.vel.y = (self.ship.vel.y + math.sin(angleRadians) * bullet.maxSpeed)\n\t\t\tself.window.currentSystem.tempObjs.append(bullet)\n\t\t\tself.shootTime = time.time() + self.delay\t\n\t\t\t\nclass MissileGun(Gun):\n\tcategory = \"secondaryGuns\"\n\ttype = \"missilegun\"\n\tsupType = \"missilegun\"\n\timg = \"items/laserGun1.png\"\n\tname = \"Missile Launcher\"\n\tlicenseCost = 4500\n\tgoodsCost = {\"steel\": 3, \"lithium\": 1}\n\t\n\tdelay = 2\n\tmissileImg = resources.loadImage(\"missile.png\", center=True)\n\t\n\tdef fire(self):\n\t\tif time.time() > self.shootTime:\t\t\n\t\t\tbullet = physicalobject.Missile(ship=self.ship, x=self.ship.x, y=self.ship.y, img=self.missileImg, batch=self.window.mainBatch, group=group2, deathTime=5)\n\t\t\tbullet.scale = 0.15\n\t\t\tbullet.rotation = self.ship.rotation\n\t\t\tangleRadians = -math.radians(self.ship.rotation)\n\t\t\tbullet.vel.x = (self.ship.vel.x + math.cos(angleRadians) * bullet.maxSpeed)/2\n\t\t\tbullet.vel.y = (self.ship.vel.y + math.sin(angleRadians) * bullet.maxSpeed)/2\n\t\t\tself.window.currentSystem.tempObjs.append(bullet)\n\t\t\tself.shootTime = time.time() + self.delay\t\n\t\t\t\nclass Engine(Component):\n\tcategory = \"engines\"\n\ttype = \"engine\"\n\tsubType = \"engine\"\n\tname = \"Engine\"\n\t\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Engine, self).__init__(*args, **kwargs)\n\t\tself.strength = 200\nclass Engine2(Component):\n\tcategory = \"engines\"\n\ttype = \"engine2\"\n\tsubType = \"engine2\"\n\tname = \"Engine2\"\n\t\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Engine2, self).__init__(*args, **kwargs)\n\t\tself.strength = 300\n\nclass Battery(Component):\n\tcategory = \"batteries\"\n\timg = \"items/battery.png\"\n\ttype = \"battery\"\n\tsubType = \"battery\"\n\tname = \"Battery\"\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(Battery, self).__init__(*args, **kwargs)\n\t\tself.capacity = 100\n\nComponents = [] #To populate the shop\ndef init():\n\tfor name, Cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n\t\tif issubclass(Cls, Component):\n\t\t\t#Create a new instance of each component\n\t\t\tinstance = Cls()\n\t\t\tif instance.name: \n\t\t\t\t#Don't put it in the shop unless it has a name (to block parent classes)\n\t\t\t\tComponents.append(instance)\n", "repo_name": "Nebual/spad", "sub_path": "components.py", "file_name": "components.py", "file_ext": "py", "file_size_in_byte": 5760, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pyglet.graphics.OrderedGroup", "line_number": 7, "usage_type": "call"}, {"api_name": "pyglet.graphics", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pyglet.window.get_platform", "line_number": 35, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 35, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "resources.loadImage", "line_number": 48, "usage_type": "call"}, {"api_name": "physicalobject.Bullet", "line_number": 49, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 51, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 52, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 66, "usage_type": "call"}, {"api_name": "resources.loadImage", "line_number": 71, "usage_type": "call"}, {"api_name": "physicalobject.Bullet", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "resources.loadImage", "line_number": 91, "usage_type": "call"}, {"api_name": "physicalobject.GravBullet", "line_number": 92, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 94, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 95, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "resources.loadImage", "line_number": 110, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "physicalobject.Missile", "line_number": 114, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 117, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 118, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 119, "usage_type": "call"}, {"api_name": "time.time", "line_number": 121, "usage_type": "call"}, {"api_name": "inspect.getmembers", "line_number": 154, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 154, "usage_type": "attribute"}, {"api_name": "inspect.isclass", "line_number": 154, "usage_type": "attribute"}]} +{"seq_id": "4113328681", "text": "import string\n\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup, KeyboardButton\nfrom emoji import emojize\n\nfrom config import available_groups\nfrom defs import transform_into_user_groups\n\n\nasync def groups_markup(self_id):\n markup = InlineKeyboardMarkup()\n user_groups = await transform_into_user_groups()\n for button in await available_groups():\n if button in user_groups[self_id]:\n display_button = button + f' {emojize(\":check_mark_button:\")}'\n else:\n display_button = button\n markup.row(InlineKeyboardButton(string.capwords(display_button), callback_data=button))\n return markup\n\n\nasync def menu_markup():\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n group_button = KeyboardButton(emojize('Выбор группы:family_man_man_girl_boy:'))\n ping_button = KeyboardButton(emojize('Пинг | Созыв:telephone:'))\n nickname_button = KeyboardButton(emojize('Изменить ник:radioactive:'))\n markup.row(ping_button).row(group_button).row(nickname_button)\n return markup", "repo_name": "DarkWood312/ping_group_of_players_bot", "sub_path": "keyboards.py", "file_name": "keyboards.py", "file_ext": "py", "file_size_in_byte": 1104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "aiogram.types.InlineKeyboardMarkup", "line_number": 11, "usage_type": "call"}, {"api_name": "defs.transform_into_user_groups", "line_number": 12, "usage_type": "call"}, {"api_name": "config.available_groups", "line_number": 13, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 15, "usage_type": "call"}, {"api_name": "aiogram.types.InlineKeyboardButton", "line_number": 18, "usage_type": "call"}, {"api_name": "string.capwords", "line_number": 18, "usage_type": "call"}, {"api_name": "aiogram.types.ReplyKeyboardMarkup", "line_number": 23, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 24, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 24, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 25, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 25, "usage_type": "call"}, {"api_name": "aiogram.types.KeyboardButton", "line_number": 26, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "70864324325", "text": "# coding: utf-8\n\nimport re\nimport download\nimport urlparse\n\ndef link_crawler(seed_url, link_regex, max_depth=2):\n \"\"\"Crawl from the given seed URL following links matched by link_regex\"\"\"\n # 增加深度\n max_depth = 2\n seen = {}\n crawl_queue = [seed_url]\n\n\n\n while crawl_queue:\n url = crawl_queue.pop()\n html = download.download(url)\n\n depth = seen[url]\n if depth != max_depth:\n for link in get_links(html):\n if re.match(link_regex, link):\n link = urlparse.urljoin(seed_url, link)\n if link not in seen:\n seen[link] = depth + 1\n crawl_queue.append(link)\n\ndef get_links(html):\n \"\"\"Return a list of links from html\"\"\"\n webpage_regex = re.compile(']+href=[\"\\'](.*?)[\"\\']', re.IGNORECASE)\n return webpage_regex.findall(html)\n\nlink_crawler('http://example.webscraping.com', '/(index|view)')", "repo_name": "BoyuZhou/learn_python_crawler", "sub_path": "link_crwaler.py", "file_name": "link_crwaler.py", "file_ext": "py", "file_size_in_byte": 955, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "download.download", "line_number": 18, "usage_type": "call"}, {"api_name": "re.match", "line_number": 23, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 24, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 31, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "32527389798", "text": "import os\nimport sys\nimport subprocess\nimport time\nimport ruamel.yaml\nimport matplotlib.pyplot as plt\nimport csv\n\nyaml = ruamel.yaml.YAML()\nyaml.preserve_quotes = True\n\ndef get_pod_yaml(name: str, file = None):\n return subprocess.run(\n [\"kubectl\", \"get\", \"pod\", name, \"--output\", \"yaml\"],\n stdout=(subprocess.PIPE if file is None else file)\n ).stdout\n\n\ndef get_pod_resources(name: str):\n resources = subprocess.run(\n [\"grep\", \"limits:\", \"-A\", \"5\"],\n input=get_pod_yaml(name),\n stdout=subprocess.PIPE,\n )\n resources_obj = resources.stdout.decode(\"utf-8\").split()\n return {\n \"limits\": {\n \"cpu\": int(resources_obj[2].replace(\"m\", \"\")),\n },\n \"requests\": {\n \"cpu\": int(resources_obj[7].replace(\"m\", \"\")),\n }\n }\n\n\ndef get_pod_usage(name: str):\n usage_output = subprocess.run(\n [\"kubectl\", \"top\", \"pod\", name, \"--use-protocol-buffers\"],\n stdout=subprocess.PIPE\n ).stdout\n\n usage = subprocess.run(\n [\"grep\", name],\n input=usage_output,\n stdout=subprocess.PIPE,\n )\n usage_obj = usage.stdout.decode(\"utf-8\").split()\n return {\n \"cpu\": int(usage_obj[1].replace(\"m\", \"\")),\n }\n\n\ndef get_vpa_pod_name():\n name_output = subprocess.run(\n [\"kubectl\", \"get\", \"pods\", \"-l\", \"app=vpa-container\"],\n stdout=subprocess.PIPE\n ).stdout\n\n name_output_split = subprocess.run(\n [\"grep\", \"vpa-deployment\"],\n input=name_output,\n stdout=subprocess.PIPE,\n )\n\n name_split = name_output_split.stdout.decode(\"utf-8\").split()\n return name_split[0]\n\n\ndef get_vpa_recommendations(name: str):\n usage_output = subprocess.run(\n [\"kubectl\", \"get\", \"vpa\", \"vpa\", \"--output\", \"yaml\"],\n stdout=subprocess.PIPE\n ).stdout\n data = yaml.load(usage_output.decode(\"utf-8\"))\n\n upper_cpu = 0\n lower_cpu = 0\n target_cpu = 0\n recommendation = data[\"status\"][\"recommendation\"][\"containerRecommendations\"]\n for pod in recommendation:\n if pod[\"containerName\"] == \"vpa-container\":\n upper_cpu = pod[\"upperBound\"][\"cpu\"][:-1]\n lower_cpu = pod[\"lowerBound\"][\"cpu\"][:-1]\n target_cpu = pod[\"target\"][\"cpu\"][:-1]\n\n return {\n \"upperBound\": {\n \"cpu\": upper_cpu\n },\n \"lowerBound\": {\n \"cpu\": lower_cpu\n },\n \"target\": {\n \"cpu\": target_cpu\n },\n }\n\n\ndef uniquify(path):\n filename, extension = os.path.splitext(path)\n counter = 1\n\n while os.path.exists(path):\n path = filename + \" (\" + str(counter) + \")\" + extension\n counter += 1\n\n return path\n\n\nname = sys.argv[1]\nvpa_type = sys.argv[2]\nif vpa_type == \"kube\":\n name = get_vpa_pod_name()\nusages = []\nlimits = []\nrequests = []\nvpa_upper_bound = []\nvpa_lower_bound = []\nvpa_target = []\ntime_list = []\ncurrent_time = 0\n\nstart = time.time()\n\nwhile True:\n try:\n resources = get_pod_resources(name)\n usage = get_pod_usage(name)\n if vpa_type == \"kube\":\n recommendation = get_vpa_recommendations(name)\n vpa_upper_bound.append(int(recommendation[\"upperBound\"][\"cpu\"]))\n vpa_lower_bound.append(int(recommendation[\"lowerBound\"][\"cpu\"]))\n vpa_target.append(int(recommendation[\"target\"][\"cpu\"]))\n usages.append(usage[\"cpu\"])\n time_list.append(current_time)\n requests.append(resources[\"requests\"][\"cpu\"])\n limits.append(resources[\"limits\"][\"cpu\"])\n time.sleep(5)\n current_time += 5\n except KeyboardInterrupt:\n print(\"Stopped Data collector\")\n break\n except:\n while True:\n is_created = subprocess.run(\n [\"kubectl\", \"get\", \"pod\", name],\n stdout=subprocess.PIPE\n ).stdout.decode(\"utf-8\")\n if vpa_type == \"kube\":\n name = get_vpa_pod_name()\n if \"NotFound\" not in is_created:\n break\n usages.append(0)\n vpa_upper_bound.append(0)\n vpa_lower_bound.append(0)\n vpa_target.append(0)\n time_list.append(current_time)\n requests.append(0)\n limits.append(0)\n time.sleep(5)\n current_time += 5\n\nwith open(uniquify(f\"results/{name}{vpa_type}graph.csv\"), \"w\") as f:\n writer = csv.writer(f)\n writer.writerows([\n limits, requests, usages, time_list, vpa_upper_bound, vpa_lower_bound, vpa_target\n ])\n\nplt.plot(time_list, limits, label = \"Limits\")\nplt.plot(time_list, requests, label = \"Requests\")\nif vpa_type == \"kube\":\n plt.plot(time_list, vpa_lower_bound, label = \"Lower Bound\")\n plt.plot(time_list, vpa_target, label = \"Target\")\nplt.plot(time_list, usages, label = \"Usage\")\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"CPU (m)\")\nplt.title(\"Pod - Usage, limits and requests\")\nplt.legend()\nplt.savefig(uniquify(f\"results/{name}{vpa_type}graph.png\"))\n\nplt.show()\n\n# Used to re-plot data if something went wrong\n# \n# with open(\"results/vpa-deployment-5b8fb9c68b-hpcjrkubegraph.csv\", \"r\") as f:\n# # reader = csv.reader(f, delimiter=',')\n# # rows = list(reader)\n# # for i in range(0, 7):\n# # print(len(rows[i]))\n# # sys.exit()\n# reader = csv.reader(f, delimiter=',')\n# rows = list(reader)\n# for i in range(0, 7):\n# rows[i] = [int(x) for x in rows[i][:500]]\n# plt.plot(rows[3], rows[0], label = \"Limits\", color='#1f77b4')\n# plt.plot(rows[3], rows[1], label = \"Requests\", color='#ff7f0e')\n# plt.plot(rows[3], rows[2], label = \"Usage\", color='#2ca02c')\n# if vpa_type == \"kube\":\n# plt.plot(rows[3], rows[5], label = \"Lower Bound\", color='#d62728')\n# plt.plot(rows[3], rows[6], label = \"Target\", color='#9467bd')\n# plt.xlabel(\"Time (s)\")\n# plt.ylabel(\"CPU (m)\")\n# plt.title(\"Pod - Usage, limits and requests\")\n# plt.legend()\n# plt.savefig(\"results/vpa-deployment-5b8fb9c68b-hpcjrkubegraph (1).png\")\n# \n# plt.show()\n", "repo_name": "SanderBuK/Kubernetes-VPA", "sub_path": "collect-vpa-data.py", "file_name": "collect-vpa-data.py", "file_ext": "py", "file_size_in_byte": 6009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "50", "api": [{"api_name": "ruamel.yaml.yaml.YAML", "line_number": 9, "usage_type": "call"}, {"api_name": "ruamel.yaml.yaml", "line_number": 9, "usage_type": "attribute"}, {"api_name": "ruamel.yaml", "line_number": 9, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 20, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 37, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 42, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 54, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 59, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 70, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 110, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 123, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 138, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 145, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 147, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}]} +{"seq_id": "24243814594", "text": "import requests\r\nimport os\r\nimport re\r\nimport pprint\r\n\r\n# appid = '0fb87e9c7207bb8b3d51cb647269f5c9'\r\n#s_city = \"Kiev, UA\" # name of city and country\r\n# city_id = 703448 # city id of Kiev\r\n\r\nappid = os.getenv('TOKEN')\r\n\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef help():\r\n print(\r\n '----------------------------------------------------------------\\n'\r\n 'Лузер, то есть Юзер!Следуй по инструкции, программа простая.\\n'\r\n 'Эта прога ищет для тебя прогноз погоды по городу \\n'\r\n 'и по дате с временем которую ты потом выберешь из\\n'\r\n 'предлагаемого списка.\\n'\r\n '----------------------------------------------------------------\\n'\r\n 'Мини пошаговая инструкция:\\n'\r\n '----------------------------------------------------------------\\n'\r\n '1. Введи название города в спец формате(Kiev, UA)\\n'\r\n 'или цифру из выпадающего списка предложеных городов.\\n'\r\n '2. Выбери и введи дату цифру-ответ из выпадающего списка\\n'\r\n 'предложенных дат.\\n'\r\n '3. Выбери и введи цифру ответ с предложенным временем.\\n'\r\n '4. Получи ответ, дальше или удовлетворись либо пробуй по-новой\\n'\r\n '5. Для выхода напиши в ответе exit'\r\n '----------------------------------------------------------------\\n '\r\n\r\n ' Если что то не понятно, то мне очень жаль.\\n '\r\n '!!Вдохни поглубже и давай еще разок!!\\n'\r\n )\r\n\r\n\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef returned_geolocation_id(s_city):\r\n '''\r\n '''\r\n try:\r\n\r\n res = requests.get(\"http://api.openweathermap.org/data/2.5/find\",\r\n params={'q': s_city, 'type': 'like', 'units': 'metric', 'APPID': appid})\r\n data = res.json()\r\n cities = [\"{} ({})\".format(d['name'], d['sys']['country'])\r\n for d in data['list']]\r\n city_id = data['list'][0]['id']\r\n\r\n return city_id\r\n\r\n except Exception as e:\r\n print(\"Exception (find):\", e)\r\n\r\n\r\n\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef question_location():\r\n '''Начало программы, приветсвие с выбором города для погоды\r\n '''\r\n print('Привет, я могу узнать прогноз погоды для тебя.\\n'\r\n 'Напиши ЦИФРУ-ОТВЕТ или ГОРОД СО СТРАНОЙ в формате: Kiev, UA\\n'\r\n 'P.S.Если ты будешь писать название г��рода то не забудь то что\\n'\r\n 'оно должно быть написано англ. буквами как в оригинале\\n'\r\n 'Если это украинский город, то не Lvov а Lviv.\\n\\n'\r\n '1.) Kiev, UA\\n'\r\n '2.) Lviv, UA\\n'\r\n '3.) Odessa, UA\\n'\r\n '4.) Kharkov, UA\\n'\r\n '5.) Yalta, UA\\n'\r\n '6.) Help\\n'\r\n '7.) Да ну его все!'\r\n )\r\n\r\n while True:\r\n answer = input('-------------------------input answer ----------------------------\\n')\r\n if not answer.isalpha() and not answer.isdigit():\r\n re_answer = re.findall(r'[A-Z][a-z]+,.[A-Z]{2}', answer)\r\n if re_answer:\r\n if str(returned_geolocation_id(re_answer)).isdigit():\r\n\r\n return re_answer[0]\r\n break\r\n else:\r\n print('Wrong name of city or format0!You need: Lviv, UA ')\r\n continue\r\n\r\n else:\r\n print('Wrong name of city or format#!You need: Lviv, UA ')\r\n continue\r\n\r\n elif answer.isdigit():\r\n if answer == '1':\r\n return 'Kiev, UA'\r\n break\r\n elif answer == '2':\r\n return 'Lviv, UA'\r\n break\r\n elif answer == '3':\r\n return 'Odessa, UA'\r\n break\r\n elif answer == '4':\r\n return 'Kharkiv, UA'\r\n break\r\n elif answer == '5':\r\n return 'Yalta, UA'\r\n break\r\n elif answer == '6':\r\n help()\r\n continue\r\n elif answer == '7':\r\n print('Ну и вали!!!')\r\n exit()\r\n else:\r\n print('Wrong name of city or format!!You need: Lviv, UA ')\r\n continue\r\n\r\n elif answer == 'exit':\r\n print('Ну и вали!!!')\r\n exit()\r\n else:\r\n print('input corrected name of city!?You need: Lviv, UA')\r\n continue\r\n\r\ns_city = question_location()\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef get_request():\r\n '''\r\n The function sends a request to API openweathermap\r\n '''\r\n\r\n try:\r\n # res = requests.get(\"http://api.openweathermap.org/data/2.5/weather\",\r\n # params={'id': question_location(), 'units': 'metric',\r\n # 'lang': 'ru', 'APPID': appid})\r\n res = requests.get(\"https://api.openweathermap.org/data/2.5/forecast?q={}\\\r\n &appid={}\".format(s_city, appid))\r\n data = {}\r\n data = res.json()\r\n\r\n\r\n # print(\"conditions:\", data['weather'][0]['description'])\r\n # print(\"temp:\", data['main']['temp'])\r\n # print(\"temp_min:\", data['main']['temp_min'])\r\n # print(\"temp_max:\", data['main']['temp_max'])\r\n # print(\"Weather:\", data['weather'][0]['description'])\r\n # five_days_weather = [pprint.pprint(data)]\r\n\r\n # pprint.pprint(data)\r\n\r\n\r\n\r\n\r\n except Exception as e:\r\n print(\"Exception (weather):\", e)\r\n pass\r\n return data\r\n\r\n\r\ndata = get_request()\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef new_dict():\r\n DT_txt = {}\r\n for index, value in enumerate(data['list']):\r\n for key, val in value.items():\r\n if key == 'dt_txt':\r\n DT_txt[val] = {}\r\n DT_txt[val].update({'pozition': index})\r\n\r\n for index, value in DT_txt.items():\r\n DT_txt[index].update({'clouds': data['list'][DT_txt.get(index).get('pozition'\r\n )].get('clouds').get('all')})\r\n DT_txt[index].update({'humidity': data['list'][DT_txt.get(index).get('pozition'\r\n )].get('main').get('humidity')})\r\n DT_txt[index].update({'temp': data['list'][DT_txt.get(index).get('pozition'\r\n )].get('main').get('temp')})\r\n\r\n DT_txt[index].update({'weather': data['list'][DT_txt.get(index).get('pozition'\r\n )].get('weather')[0].get('description')})\r\n DT_txt[index].update({'wind': data['list'][DT_txt.get(index).get('pozition'\r\n )].get('wind').get('speed')})\r\n DT_txt[index].update({'dt': index})\r\n\r\n return DT_txt\r\n\r\nnew_dict = new_dict()\r\n#----------------------------------------------------------------------------------------------------------------------\r\n\r\ndef questions_time():\r\n '''\r\n '''\r\n print('Я тебя понял, есть такой город как {0}.\\n\\n'\r\n 'Теперь давай выбери дату и время с прогнозом\\n'\r\n 'из этого огромного списка.\\n\\n'\r\n 'Если ты готов его увидить,\\n'\r\n 'то жми ENTER.\\n'.format(re.findall('([A-Za-z]+),',s_city)[0]))\r\n\r\n answer_to_contin = input('-------------------------input answer ----------------------------\\n')\r\n if answer_to_contin or not answer_to_contin:\r\n\r\n for index, value in enumerate(new_dict.items()):\r\n print(str(index + 1) + ').',value[0])\r\n print('------------------------------------------------------------------\\n\\n')\r\n while True:\r\n print('И снова я ожидаю от тебя ЦИФРУ-ОТВЕТ:\\n\\n'\r\n 'Которая в диапазоне данного списка.\\n')\r\n answer_the_time = input('-------------------------input answer ----------------------------\\n')\r\n time_dict = {}\r\n if answer_the_time.isdigit():\r\n for index, value in enumerate(new_dict.items()):\r\n time_dict.update({index + 1: value[0]})\r\n\r\n if time_dict.get(int(answer_the_time)):\r\n td = time_dict.get(int(answer_the_time))\r\n break\r\n elif answer_the_time == 'exit':\r\n exit()\r\n else:\r\n continue\r\n elif answer_to_contin == 'exit':\r\n exit()\r\n\r\n return td\r\n\r\nquestions_time = questions_time()\r\n\r\n\r\n#-----------------------------------------------------------------------------------------------------------\r\ndef weather_to_screen(arg):\r\n print('\\nИ так прогноз погоды на {0}!\\n'\r\n 'Небо будет на {1} % покрыто облаками.\\n'\r\n 'И как сказал бы англичанин:\\n'\r\n '\"And in general, at this time the weather will be {2}\"\\n'\r\n 'Влажность воздуха {3} %\\n'\r\n 'А на термометрах обесчают {4} C*\\n'\r\n 'Скорость ветра будет {5} м/с\\n'\r\n 'На этом все!\\n'.format(\r\n arg.get('dt'),\r\n arg.get('clouds'),\r\n arg.get('weather'),\r\n arg.get('humidity'),\r\n int(arg.get('temp')) - 273,\r\n arg.get('wind')\r\n ))\r\n exit()\r\n\r\n\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\n\r\nTOKEN=0fb87e9c7207bb8b3d51cb647269f5c9\r\n\"\"\"", "repo_name": "AleksanderYa/weather-forecast", "sub_path": "work.py", "file_name": "work.py", "file_ext": "py", "file_size_in_byte": 10749, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 140, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "32300036304", "text": "'''\nHangman\n'''\nimport time\nimport os\nimport json\nclear_screen = lambda: os.system('cls')\ndef menu():\n '''\n Hangman game in command line with python !!\n How to play\n 1. Choose category to play\n 2. Play it !!\n\n score calculator just your Life x 25\n have fun have fun\n '''\n #Setup variable\n directory_category = []\n category_list = []\n chosen_category = 0\n\n #Setup menu\n for x in os.listdir('category'):\n directory_category.append(\"category/\" + x)\n file = open(\"category/\" + x).read()\n json_file = json.loads(file)\n print(json_file[\"name\"])\n category_list.append(json_file[\"name\"])\n\n #Choose category\n category_list_len = len(category_list)\n print(\"Select Category:\")\n for i in range(category_list_len):\n print(str(i+1) + \".\", category_list[i])\n\n #Take input and heck selection\n while chosen_category == 0 and (chosen_category <= category_list_len):\n try:\n chosen_category = int(input(\"Choose : \"))\n except ValueError:\n print(\"Error!! Please enter number from 1 - \" + str(category_list_len))\n if chosen_category > category_list_len:\n print(\"It not have this menu please try again\")\n chosen_category = 0\n print(\"You choose\", category_list[chosen_category-1])\n print(\"Have fun with hangman, please wait\", end=\"\", flush=True)\n for _ in range(0,3):\n print(\".\", end=\"\", flush=True)\n time.sleep(1)\n print(\"\")\n clear_screen()\n #Play game\n play(directory_category[chosen_category-1])\n\ndef play(category):\n '''\n this is function to play this game \n '''\n #Setup game\n life = 10\n score = 0\n file = open(category).read()\n json_file = json.loads(file)\n item = 1\n questions = json_file[\"questions\"]\n\n #Start game\n for question in questions:\n print(\"Number\", item)\n print(\"Hint: \" '\"' + questions[question] + '\"')\n user_answer = []\n user_wrong = []\n\n #Setup word\n correct_answer = question\n for i in correct_answer:\n if i == \" \":\n user_answer += \" \"\n else:\n user_answer += \"_\"\n for i in user_answer:\n print(i, end=\" \")\n print(\"score %d, remaining wrong guess %d\" % (score, life), end=\"\")\n if len(user_wrong) != 0:\n print(\", wrong guessed: \", end=\"\")\n for wrong_character in user_wrong: print(wrong_character, end=\" \")\n print(\"\")\n\n #Play and Check answer\n while True:\n #Recieve user Input\n user_char = input(\"Enter your Character : \")\n while(len(user_char) != 1 or user_char == \" \"):\n print(\"Please enter only one Character or not have spacebar\")\n user_char = input(\"Enter your Character : \")\n if user_char.lower() in correct_answer.lower():\n char_count = 0\n for char in correct_answer:\n if(user_char.lower() == char.lower()):\n #get score protect repeat\n if(user_answer[char_count] == \"_\"):\n score += (life*25)\n user_answer[char_count] = correct_answer[char_count]\n char_count += 1\n else:\n user_wrong.append(user_char)\n life -= 1\n for i in user_answer:\n print(i, end=\" \")\n print(\"score %d, remaining wrong guess %d\" % (score, life), end=\"\")\n if len(user_wrong) != 0:\n print(\", wrong guessed: \", end=\"\")\n for wrong_character in user_wrong: print(wrong_character, end=\" \")\n print(\"\")\n\n #Check state\n if life <= 0:\n return print(\"Your lose with %d score\" %(score))\n try:\n user_answer.index(\"_\")\n except:\n print(\"Your pass !! please wait to next level\", end=\"\", flush=True)\n for _ in range(0,3):\n print(\".\", end=\"\", flush=True)\n time.sleep(1)\n item += 1\n print(\"\")\n clear_screen()\n break\n\n print(\"You win %d score in %s category. Thank you for play this I hope you will play another category.\" % (score, json_file[\"name\"]))\nmenu()\n", "repo_name": "startmt/theinternship-developer", "sub_path": "hangman/hangman.py", "file_name": "hangman.py", "file_ext": "py", "file_size_in_byte": 4396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.system", "line_number": 7, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "3808543646", "text": "import cv2\r\nimport numpy as np\r\n\r\n\r\ndef find_color(frame, color):\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n lower_color = np.array([color[0] - 10, 100, 100])\r\n upper_color = np.array([color[0] + 10, 255, 255])\r\n\r\n mask = cv2.inRange(hsv, lower_color, upper_color)\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for contour in contours:\r\n area = cv2.contourArea(contour)\r\n if area > 1000:\r\n cv2.drawContours(frame, contour, -1, (0, 255, 0), 2)\r\n\r\n\r\ndef main():\r\n color = (0, 255, 0) # Зеленый цвет\r\n cap = cv2.VideoCapture(0)\r\n\r\n while True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n\r\n find_color(frame, color)\r\n\r\n cv2.imshow('frame', frame)\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n\r\n if key == ord('q'):\r\n break\r\n elif key == ord('r'):\r\n color = (0, 0, 255) # Красный\r\n elif key == ord('g'):\r\n color = (60, 255, 0) # Зеленый\r\n elif key == ord('b'):\r\n color = (120, 255, 255) # Синий\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()", "repo_name": "GavriilFursov/RGBFindButton", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1230, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "cv2.cvtColor", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 6, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.contourArea", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.drawContours", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "74722826075", "text": "\"\"\"\nFetching supplimentary model input from the Planetary Computer for STAC project\nThis notebook produces additional input layers for the training data used in the\nsentinel 1 flood detection competition run by DrivenData.\n\nIf fetches JRC Global Surface Water and NASADEM elevation data from the Planetary\nComputer (PC) STAC API and creates pixel-aligned chips that match what will be\nused in the evaluation process for the competition.\n\nThe notebook will iterate through chip paths and query the PC STAC API for the\nnasadem and jrc-gsw Collections. It then creates a set of GeoTiffs by\n\"coregistering\" the raster data with the chip GeoTIFF, so that all of the\nadditional input layers have the same CRS, bounds, and resolution as the chip.\nThese additional layers are then saved alongside the training chip.\nhttps://github.com/wonderdong11/PlanetaryComputerExamples/blob/main/competitions/s1floods/generate_auxiliary_input.ipynb\n\n\"\"\"\n\nfrom dataclasses import dataclass\nimport os\nfrom tempfile import TemporaryDirectory\nfrom typing import List, Any, Dict\n\nfrom shapely.geometry import box, mapping\nimport rasterio\nfrom rasterio.warp import reproject, Resampling\nimport pyproj\nfrom osgeo import gdal\n\nfrom pystac_client import Client\nimport planetary_computer as pc\n\n\n# Define functions and classes\n# Define a ChipInfo dataclass to encapsulate the required data for the target chip.\n# This includes geospatial information that will be used to coregister the\n# incoming jrc-gsw and nasadem data.\n@dataclass\nclass ChipInfo:\n \"\"\"\n Holds information about a training chip, including geospatial info for coregistration\n \"\"\"\n\n path: str\n prefix: str\n crs: Any\n shape: List[int]\n transform: List[float]\n bounds: rasterio.coords.BoundingBox\n footprint: Dict[str, Any]\n\n\ndef get_footprint(bounds, crs):\n \"\"\"Gets a GeoJSON footprint (in epsg:4326) from rasterio bounds and CRS\"\"\"\n transformer = pyproj.Transformer.from_crs(crs, \"epsg:4326\", always_xy=True)\n minx, miny = transformer.transform(bounds.left, bounds.bottom)\n maxx, maxy = transformer.transform(bounds.right, bounds.top)\n return mapping(box(minx, miny, maxx, maxy))\n\ndef get_chip_info(chip_path):\n \"\"\"Gets chip info from a GeoTIFF file\"\"\"\n with rasterio.open(chip_path) as ds:\n chip_crs = ds.crs\n chip_shape = ds.shape\n chip_transform = ds.transform\n chip_bounds = ds.bounds\n\n # Use the first part of the chip filename as a prefix\n prefix = os.path.basename(chip_path).split(\"_\")[0]\n\n return ChipInfo(\n path=chip_path,\n prefix=prefix,\n crs=chip_crs,\n shape=chip_shape,\n transform=chip_transform,\n bounds=chip_bounds,\n footprint=get_footprint(chip_bounds, chip_crs),\n )\n\n\ndef reproject_to_chip(\n chip_info, input_path, output_path, resampling=Resampling.nearest\n):\n \"\"\"\n reprojects coregisters raster data to the bounds, CRS and resolution\n described by the ChipInfo.\n\n Reproject a raster at input_path to chip_info, saving to output_path.\n\n Use Resampling.nearest for classification rasters. Otherwise use something\n like Resampling.bilinear for continuous data.\n \"\"\"\n with rasterio.open(input_path) as src:\n kwargs = src.meta.copy()\n kwargs.update(\n {\n \"crs\": chip_info.crs,\n \"transform\": chip_info.transform,\n \"width\": chip_info.shape[1],\n \"height\": chip_info.shape[0],\n \"driver\": \"GTiff\",\n }\n )\n\n with rasterio.open(output_path, \"w\", **kwargs) as dst:\n for i in range(1, src.count + 1):\n reproject(\n source=rasterio.band(src, i),\n destination=rasterio.band(dst, i),\n src_transform=src.transform,\n src_crs=src.crs,\n dst_transform=chip_info.transform,\n dst_crs=chip_info.crs,\n resampling=Resampling.nearest,\n )\n\n# This method will take in a set of items and a asset key and write a VRT using\n# signed HREFs. This is useful when there's multiple results from the query, so we\n# can treat the resulting rasters as a single set of raster data. It uses the\n# planetary_computer.sign method to sign the HREFs with a SAS token generated by\n# the PC Data Auth API.\ndef write_vrt(items, asset_key, dest_path):\n \"\"\"Write a VRT with hrefs extracted from a list of items for a specific asset.\"\"\"\n hrefs = [pc.sign(item.assets[asset_key].href) for item in items]\n vsi_hrefs = [f\"/vsicurl/{href}\" for href in hrefs]\n gdal.BuildVRT(dest_path, vsi_hrefs).FlushCache()\n\n# This method ties it all together - for a given ChipInfo, Collection, and Asset,\n# write an auxiliary input chip with the given file name.\ndef create_chip_aux_file(\n chip_info, collection_id, asset_key, file_name, resampling=Resampling.nearest\n):\n \"\"\"\n Write an auxiliary chip file.\n\n The auxiliary chip file includes chip_info for the Collection and Asset, and is\n saved in the same directory as the original chip with the given file_name.\n \"\"\"\n\n # Create the STAC API client¶\n # This will be used in the methods below to query the PC STAC API.\n STAC_API = \"https://planetarycomputer.microsoft.com/api/stac/v1\"\n catalog = Client.open(STAC_API)\n\n output_path = os.path.join(\n os.path.dirname(chip_info.path), f\"{chip_info.prefix}_{file_name}\"\n )\n search = catalog.search(collections=[collection_id], intersects=chip_info.footprint)\n items = list(search.get_items())\n with TemporaryDirectory() as tmp_dir:\n vrt_path = os.path.join(tmp_dir, \"source.vrt\")\n write_vrt(items, asset_key, vrt_path)\n reproject_to_chip(chip_info, vrt_path, output_path, resampling=resampling)\n return output_path\n\n\n# Download the flood-train-images.tgz file from competition Data Download page and\n# upload it to the Hub in the same directory as this notebook.\n\n# Then run the following code to uncompress this. Afterwards you should see an\n# train_features directory containing all of the training chips ending in .tif.\n# !tar -xvf flood-train-images.tgz\n\n# Use this directory to define the location of the chips, or if you have\n# already uncompressed the chips elsewhere set the location here:\nTRAINING_DATA_DIR = \"training_data/train_features\"\n\n# Gather chip paths\n# These chip paths will be used later in the notebook to process the chips.\n# These paths should be to only one GeoTIFF per chip; for example, if both\n# VV.tif and VH.tif are available for a chip, use only one of these paths.\n# The GeoTIFFs at these paths will be read to get the bounds, CRS and resolution\n# that will be used to fetch auxiliary input data. These can be relative paths.\n# The auxiliary input data will be saved in the same directory as the GeoTIFF\n# files at these paths.\nchip_paths = []\nfor file_name in os.listdir(TRAINING_DATA_DIR):\n if file_name.endswith(\"_vv.tif\"):\n chip_paths.append(os.path.join(TRAINING_DATA_DIR, file_name))\nprint(f\"{len(chip_paths)} chips found.\")\n\n# Configurate the auxiliary input files that we will generate.\n# Define a set of parameters to pass into create_chip_aux_file\naux_file_params = [\n (\"nasadem\", \"elevation\", \"nasadem.tif\", Resampling.bilinear),\n (\"jrc-gsw\", \"extent\", \"jrc-gsw-extent.tif\", Resampling.nearest),\n (\"jrc-gsw\", \"occurrence\", \"jrc-gsw-occurrence.tif\", Resampling.nearest),\n (\"jrc-gsw\", \"recurrence\", \"jrc-gsw-recurrence.tif\", Resampling.nearest),\n (\"jrc-gsw\", \"seasonality\", \"jrc-gsw-seasonality.tif\", Resampling.nearest),\n (\"jrc-gsw\", \"transitions\", \"jrc-gsw-transitions.tif\", Resampling.nearest),\n (\"jrc-gsw\", \"change\", \"jrc-gsw-change.tif\", Resampling.nearest),\n]\n\n# Generate auxiliary input chips for NASADEM and JRC\n# Iterate over the chips and generate all aux input files.\ncount = len(chip_paths)\nfor i, chip_path in enumerate(chip_paths):\n if i < 446:\n continue\n print(f\"({i+1} of {count}) {chip_path}\")\n chip_info = get_chip_info(chip_path)\n for collection_id, asset_key, file_name, resampling_method in aux_file_params:\n print(f\" ... Creating chip data for {collection_id} {asset_key}\")\n create_chip_aux_file(\n chip_info, collection_id, asset_key, file_name, resampling=resampling_method )\n", "repo_name": "dahaorendrm/STAC_overflow", "sub_path": "fetch_additional_data.py", "file_name": "fetch_additional_data.py", "file_ext": "py", "file_size_in_byte": 8356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "typing.Any", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 48, "usage_type": "name"}, {"api_name": "rasterio.coords", "line_number": 49, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 38, "usage_type": "name"}, {"api_name": "pyproj.Transformer.from_crs", "line_number": 55, "usage_type": "call"}, {"api_name": "pyproj.Transformer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "shapely.geometry.mapping", "line_number": 58, "usage_type": "call"}, {"api_name": "shapely.geometry.box", "line_number": 58, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 83, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 83, "usage_type": "name"}, {"api_name": "rasterio.open", "line_number": 94, "usage_type": "call"}, {"api_name": "rasterio.open", "line_number": 106, "usage_type": "call"}, {"api_name": "rasterio.warp.reproject", "line_number": 108, "usage_type": "call"}, {"api_name": "rasterio.band", "line_number": 109, "usage_type": "call"}, {"api_name": "rasterio.band", "line_number": 110, "usage_type": "call"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 115, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 115, "usage_type": "name"}, {"api_name": "planetary_computer.sign", "line_number": 125, "usage_type": "call"}, {"api_name": "osgeo.gdal.BuildVRT", "line_number": 127, "usage_type": "call"}, {"api_name": "osgeo.gdal", "line_number": 127, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 132, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 132, "usage_type": "name"}, {"api_name": "pystac_client.Client.open", "line_number": 144, "usage_type": "call"}, {"api_name": "pystac_client.Client", "line_number": 144, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling.bilinear", "line_number": 186, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 186, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 187, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 187, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 188, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 188, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 189, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 189, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 190, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 190, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 191, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 191, "usage_type": "name"}, {"api_name": "rasterio.warp.Resampling.nearest", "line_number": 192, "usage_type": "attribute"}, {"api_name": "rasterio.warp.Resampling", "line_number": 192, "usage_type": "name"}]} +{"seq_id": "1081897736", "text": "import streamlit as st\nfrom PIL import Image\nimport stylize_image\n\nst.title(\"Fast Neural Style Transfer Web-app.\")\nst.write(\"Choose an existing image from the sidebar or upload your own....\")\n\nimg = st.sidebar.selectbox(\n 'Select Image',\n ('Captain America',\n 'Batman',\n 'Deadpool',\n 'Dragon',\n 'Godzilla',\n 'Iron Man',\n 'One Punch Man',\n 'Spider Man',\n 'Superman',\n 'Thor',\n 'Wonder Woman')\n)\nimg = img+'.jpg'\n\nstyle_name = st.sidebar.selectbox(\n 'Select Style',\n ('Candy', 'Mosaic', 'Rain Princess', 'Udnie')\n)\n\nuploaded_file = st.file_uploader(\"Choose an image...\", type=\"jpg\")\n\nif uploaded_file:\n input_image = uploaded_file\nelse:\n input_image = \"content_images/\" + img\n\nmodel = \"models/\" + style_name + \".pth\"\noutput_image = \"output_images/\" + style_name + \"-\" + img\n\nst.write(\"### Source image:\")\nimage = Image.open(input_image)\nst.image(image, width=400)\n\nclicked = st.button('Stylize')\n\nif clicked:\n model = stylize_image.load_model(model)\n stylize_image.stylize(model, input_image, output_image)\n\n st.write('#### Output image:')\n image = Image.open(output_image)\n st.image(image, width=400)\n", "repo_name": "mrinmoy2000/End-to-End-Neural-Style-Transferring-Web-App", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1173, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "streamlit.title", "line_number": 5, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 6, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 8, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 8, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 24, "usage_type": "attribute"}, {"api_name": "streamlit.file_uploader", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 43, "usage_type": "call"}, {"api_name": "stylize_image.load_model", "line_number": 46, "usage_type": "call"}, {"api_name": "stylize_image.stylize", "line_number": 47, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 50, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "18526071606", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport openpyxl\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate',\n 'Connection': 'keep-alive'}\n\ndef parser_city():\n html=open('./city.html','r').read()\n table=BeautifulSoup(html,'lxml').find_all('a')\n f=open('city.txt','w')\n for item in table:\n city=item.get_text()\n url=item.get('href')\n f.write(city+'--'+url+'\\n')\n f.close()\n\ndef parser(city,url):\n html=requests.get(url,headers=headers).text\n table=BeautifulSoup(html,'lxml').find('div',{'class':'api_month_list'}).find('table').find_all('tr')\n result=[]\n for item in table:\n line=[city]\n for td in item.find_all('td'):\n try:\n line.append(td.get_text().replace('\\r\\n','').replace(' ',''))\n except:\n line.append('')\n result.append(line)\n return result\n\ndef get_air_report(city,city_url):\n year=2013\n month=11\n result=[]\n while(year<2018):\n while(month%13!=0):\n url='http://www.tianqihoubao.com'+city_url.replace('.html','-%s%02d.html'%(year,month))\n try:\n result+=parser(city,url)\n except:\n print(city,year,month,'failed')\n continue\n print(city,year,month,'ok')\n if year==2017 and month==3:\n break\n month+=1\n month=1\n year+=1\n return result\n\ndef main():\n for line in open('city.txt','r'):\n line=line.replace('\\n','').split('--')\n result=get_air_report(line[0],line[1])\n f=open('result.txt','a')\n for line in result:\n f.write(str(line)+'\\n')\n f.close()\n\nmain()\n", "repo_name": "19js/Nyspider", "sub_path": "www.tianqihoubao.com/air_report_history.py", "file_name": "air_report_history.py", "file_ext": "py", "file_size_in_byte": 1940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "50", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "34081725625", "text": "# downloads stock data for specified date range\n# mind that this script outputs nothing for bank holidays\n\nfrom datetime import date, timedelta\nimport csv\nimport os\nimport pandas_datareader.data as web\nfrom time import sleep\n#############\n### SETUP ###\n\n# start and end date of required stock data (YYYY, M, D)\nstart = date(2017, 9, 27) # including\nend = date(2017, 9, 29) # including\n\n# input file (relative path)\ninput_file = 'companies.csv'\n\n# output file (relative path)\noutput_dir = 'stock_data'\noutput_file = output_dir + '/' + 'stock_' + str(start) + '_' + str(end) + '.csv'\n\n##############\n#### CODE ####\n\n# create output dir\nos.makedirs(output_dir, exist_ok=True)\n\n# create output file with header\nwith open(output_file, 'w') as f:\n f.write('Date,Open,High,Low,Close,Adj Close,Volume,Company\\n')\n\n# open input file\nwith open(input_file, 'r') as comp:\n reader = csv.reader(comp)\n\n # for each company\n for row in reader:\n name = str(row[0])\n ticker = str(row[1]).strip()\n print('\\n' + name)\n\n try:\n # download stock data\n df = web.DataReader(ticker, 'yahoo', start, end)\n sleep(5)\n # add company column\n df['Company'] = name\n # append to output file\n with open(output_file, 'a') as f:\n df.to_csv(f, header=False)\n except Exception as e:\n print(e)\n", "repo_name": "bromjiri/StockLoad", "sub_path": "download_stock_yahoo/download_stock_range.py", "file_name": "download_stock_range.py", "file_ext": "py", "file_size_in_byte": 1418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.date", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 35, "usage_type": "call"}, {"api_name": "pandas_datareader.data.DataReader", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas_datareader.data", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "12560281101", "text": "import cv2\nimport numpy as np\n\ndef draw_circle(event,x,y,flags, params):\n if event == cv2.EVENT_RBUTTONDOWN:\n cv2.circle(img, (x,y), 70, (0,0,255), 3)\n\ncv2.namedWindow(winname='my_dog')\ncv2.setMouseCallback('my_dog', draw_circle)\n\nimg = cv2.imread('data/dog_backpack.jpg')\n\nwhile True:\n cv2.imshow('my_dog', img)\n \n if cv2.waitKey(1) & 0xFF == 27:\n break\n\ncv2.destroyAllWindows() \n \n ", "repo_name": "Manjeete/ImageProccessing", "sub_path": ".ipynb_checkpoints/Assessmentpart-checkpoint.py", "file_name": "Assessmentpart-checkpoint.py", "file_ext": "py", "file_size_in_byte": 445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.EVENT_RBUTTONDOWN", "line_number": 5, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "31564299858", "text": "import pandas as pd\nimport re\nimport pickle\nimport os\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom wordcloud import WordCloud\nfrom sklearn.decomposition import LatentDirichletAllocation, NMF\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier\n\n\ndef print_topic(model, feature_names, n_top_word):\n topics = []\n for topic_idx, topic in enumerate(model.components_):\n topic_dict = [{\"keyword\": feature_names[i], \"score\": topic[i]}\n for i in topic.argsort()[:-n_top_word - 1:-1]]\n topic_dict.sort(key=lambda x: x.get(\"score\"), reverse=True)\n topics.append(topic_dict)\n\n file_name = 'Topic.json'\n json_output_dir = os.path.join(os.getcwd(), file_name)\n with open(json_output_dir, 'w') as f:\n json.dump(topics, f)\n return topics\n\n\ndef print_top_words(data, n_top_words=20):\n for topic_idx, topic in enumerate(data):\n plt.figure()\n plt.imshow(WordCloud().fit_words(\n [(i[\"keyword\"], i[\"score\"])\n for i in topic[:-n_top_words - 1:-1]]\n ))\n plt.axis(\"off\")\n plt.title(\"Topic #%d\" % (topic_idx + 1))\n plt.savefig(\"Topic_\" + str(topic_idx + 1) + \".png\")\n print()\n\nstops = stop_words = [line.strip() for line in open(\"indonesia.txt\")]\ntrain_anies = pd.read_csv(\n \"DATA_ANIES_FIX.csv\"\n)\n\ntrain_anies = train_anies.replace(\"1\", \"anies+\")\ntrain_anies = train_anies.replace(\"0\", \"anies-\")\n\ntrain_ahok = pd.read_csv(\n \"data_training_ahok.csv\"\n)\n\ntrain_ahok = train_ahok.replace(\"1\", \"ahok+\")\ntrain_ahok = train_ahok.replace(\"0\", \"ahok-\")\n\ntrain_ahy = pd.read_csv(\n \"data_training_AHY.csv\"\n)\n\ntrain_ahy = train_ahy.replace(\"1\", \"ahy+\")\ntrain_ahy = train_ahy.replace(\"0\", \"ahy-\")\n\nframes = [train_anies, train_ahok, train_ahy]\n\ndata_total = pd.concat(frames)\n\ndata_train = data_total[(data_total[\"Sentimen\"] == \"ahok+\") | (data_total[\"Sentimen\"] == \"ahok-\") | (data_total[\"Sentimen\"] == \"ahy+\") | (data_total[\"Sentimen\"] == \"ahy-\") | (data_total[\"Sentimen\"] == \"anies+\") | (data_total[\"Sentimen\"] == \"anies-\")]\ndata_train = data_train.reset_index(drop=True)\n\nclean_train_tweet = []\nsentiment_all = []\n\nfor i in range(len(data_train[\"_id\"])):\n clean_train_tweet.append(' '.join(\n re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|RT\", \" \",\n data_train[\"_id\"][i]).split()))\n\nfor i in range(len(data_train[\"Sentimen\"])):\n sentiment_all.append(data_train[\"Sentimen\"][i])\n\nvectorizer = TfidfVectorizer(analyzer=\"word\",\n stop_words=stops, min_df=0.001, max_df=0.85)\n\ntrain_data_features = vectorizer.fit_transform(clean_train_tweet)\nx_train, x_test, y_train, y_test = train_test_split(\n train_data_features, sentiment_all, test_size=0.3, random_state=42\n)\ntrain_data_features = x_train.toarray()\n\nmodel = OneVsRestClassifier(LinearSVC(random_state=0))\nmodel_fit = model.fit(train_data_features, y_train)\nresult = model.predict(x_test)\nprint(accuracy_score(result, y_test))\n\nfilename = 'model.sav'\npickle.dump(model, open(filename, 'wb'))\n\nfilename = 'vectorizer.sav'\npickle.dump(vectorizer, open(filename, 'wb'))\n\n#load\n#loaded_model = pickle.load(open(filename, 'rb'))\n\nprint(\"Fitting LDA models with tf \")\nlda_array = nmf = NMF(n_components=10)\nlda_array.fit(train_data_features)\n\ntfidf_feature_names = vectorizer.get_feature_names()\ntopics = print_topic(lda_array, tfidf_feature_names, 200)\nprint_top_words(topics, 200)\n\ntrain_unique = pd.read_csv(\n \"data_unique_users.csv\"\n)\ntrain_unique = train_unique.replace(np.nan, \" \")\nclean_train_tweet_unique = []\n\nfor i in range(len(train_unique[\"_id\"])):\n clean_train_tweet_unique.append(' '.join(\n re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)|RT\", \" \",\n train_unique[\"text\"][i]).split()))\n\ntest_data_features = vectorizer.transform(clean_train_tweet_unique)\ntest_data_features = test_data_features.toarray()\n\nresult_unique = model.predict(test_data_features)\n\nres_unique = result_unique.tolist()\npro_ahok = res_unique.count(\"ahok+\")\npro_anies = res_unique.count(\"anies+\")\npro_ahy = res_unique.count(\"ahy+\")\n\nadd_ahok = (res_unique.count(\"ahy-\")*res_unique.count(\"ahok+\")/(res_unique.count(\"ahok+\") + res_unique.count(\"anies+\"))) + (res_unique.count(\"anies-\")*res_unique.count(\"ahok+\")/(res_unique.count(\"ahok+\") + res_unique.count(\"ahy+\")))\nadd_anies = (res_unique.count(\"ahy-\")*res_unique.count(\"anies+\")/(res_unique.count(\"anies+\") + res_unique.count(\"ahok+\"))) + (res_unique.count(\"ahok-\")*res_unique.count(\"anies+\")/(res_unique.count(\"anies+\") + res_unique.count(\"ahy+\")))\nadd_ahy = (res_unique.count(\"anies-\")*res_unique.count(\"ahy+\")/(res_unique.count(\"ahy+\") + res_unique.count(\"ahok+\"))) + (res_unique.count(\"ahok-\")*res_unique.count(\"ahy+\")/(res_unique.count(\"ahy+\") + res_unique.count(\"anies+\")))\n\ntotal = (pro_ahok + add_ahok) + (pro_anies + add_anies) + (pro_ahy + add_ahy)\n\npres_ahok = (pro_ahok + add_ahok)/total*100\npres_anies = (pro_anies + add_anies)/total*100\npres_ahy = (pro_ahy + add_ahy)/total*100\n\nprint(\"elektabilitas ahy-sylvi \" + str(pres_ahy))\nprint(\"elektabilitas ahok-djarot \" + str(pres_ahok))\nprint(\"elektabilitas anies-sandi \" + str(pres_anies))\n", "repo_name": "okids/tweetserpent", "sub_path": "classify.py", "file_name": "classify.py", "file_ext": "py", "file_size_in_byte": 5399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 69, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.multiclass.OneVsRestClassifier", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 94, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 97, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 100, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 103, "usage_type": "call"}, {"api_name": "sklearn.decomposition.NMF", "line_number": 109, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 119, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "30019384436", "text": "from util import (Config, JsonHandler, DynamicDataclass, TerminalQuiz as tq, \n ApplicationFlow as af)\nimport typing as type \nfrom dataclasses import dataclass\n\n@dataclass(init=False)\nclass ManageDatabaseArgs(DynamicDataclass):\n choice: type.Literal[\"Add database\", \"Remove database\"]\n filepath_to_add: str | None\n filepaths_to_remove: list\n \n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\nclass ManageDatabase:\n def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated\")\n \n @staticmethod\n def run(config: Config):\n try:\n arguments = ManageDatabase.__show_terminal_quiz(config)\n if arguments.choice == \"Add database\":\n config.db_filepath_list.append(arguments.filepath_to_add)\n\n elif arguments.choice == \"Remove database\":\n for filepath in arguments.filepaths_to_remove:\n config.db_filepath_list.remove(filepath)\n \n JsonHandler.save_json_file(config.config_filepath, config.dict())\n \n except KeyboardInterrupt:\n af.print_returning_to_menu()\n \n\n\n @staticmethod\n def __show_terminal_quiz(config: Config):\n choice = tq.ask_for_list(\"Choose an option\", [\"Add database\", \"Remove database\"])\n\n filepath_to_add = None\n filepaths_to_remove = []\n\n if choice == \"Add database\":\n filepath_to_add = tq.ask_for_string(\"Enter the filepath of the database you want to add: \")\n elif choice == \"Remove database\":\n filepaths_to_remove = tq.ask_for_checkbox(\"Choose the database you want to remove\", config.db_filepath_list)\n return ManageDatabaseArgs(choice = choice, filepath_to_add = filepath_to_add, filepaths_to_remove = filepaths_to_remove)", "repo_name": "Guisilcol/milho-sqlite3", "sub_path": "src/commands/manage_databases.py", "file_name": "manage_databases.py", "file_ext": "py", "file_size_in_byte": 1845, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "util.DynamicDataclass", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 8, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 6, "usage_type": "call"}, {"api_name": "util.Config", "line_number": 20, "usage_type": "name"}, {"api_name": "util.JsonHandler.save_json_file", "line_number": 30, "usage_type": "call"}, {"api_name": "util.JsonHandler", "line_number": 30, "usage_type": "name"}, {"api_name": "util.ApplicationFlow.print_returning_to_menu", "line_number": 33, "usage_type": "call"}, {"api_name": "util.ApplicationFlow", "line_number": 33, "usage_type": "name"}, {"api_name": "util.Config", "line_number": 38, "usage_type": "name"}, {"api_name": "util.TerminalQuiz.ask_for_list", "line_number": 39, "usage_type": "call"}, {"api_name": "util.TerminalQuiz", "line_number": 39, "usage_type": "name"}, {"api_name": "util.TerminalQuiz.ask_for_string", "line_number": 45, "usage_type": "call"}, {"api_name": "util.TerminalQuiz", "line_number": 45, "usage_type": "name"}, {"api_name": "util.TerminalQuiz.ask_for_checkbox", "line_number": 47, "usage_type": "call"}, {"api_name": "util.TerminalQuiz", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "39710499493", "text": "\"\"\"\r\nBernoulli Likelihood with Hierarchical Prior!\r\n\"\"\"\r\nimport numpy as np\r\nimport pymc3 as pm\r\nimport sys\r\nfrom scipy.stats import beta, binom\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('seaborn-darkgrid')\r\n\r\n\r\n# Data for figure 9.11\r\nN = [10, 10, 10] # Number of flips per coin\r\nz = [5, 5, 5] # Number of heads per coin\r\n## Data for figure 9.12\r\n#N = [10, 10, 10] # Number of flips per coin\r\n#z = [1, 5, 9] # Number of heads per coin\r\n\r\n## Data for exercise 9.1\r\n#ncoins = 50\r\n#nflipspercoin = 5\r\n#mu_act = .7\r\n#kappa_act = 20\r\n#theta_act = beta.rvs(mu_act*kappa_act+1, (1-mu_act)*kappa_act+1, size=ncoins)\r\n#z = binom.rvs(n=nflipspercoin, p=theta_act, size=ncoins)\r\n#N = [nflipspercoin] * ncoins\r\n\r\n\r\n# Arrange the data into a more convenient way to feed the PyMC model.\r\ncoin = [] # list/vector index for each coins (from 0 to number of coins)\r\ny = [] # list/vector with head (1) or tails (0) for each flip.\r\nfor i, flips in enumerate(N):\r\n heads = z[i]\r\n if heads > flips:\r\n sys.exit(\"The number of heads can't be greater than the number of flips\")\r\n else:\r\n y = y + [1] * heads + [0] * (flips-heads)\r\n coin = coin + [i] * flips\r\n\r\n\r\n# Specify the model in PyMC\r\nwith pm.Model() as model:\r\n# define the hyperparameters\r\n mu = pm.Beta('mu', 2, 2)\r\n kappa = pm.Gamma('kappa', 1, 0.1)\r\n # define the prior\r\n theta = pm.Beta('theta', mu * kappa, (1 - mu) * kappa, shape=len(N))\r\n # define the likelihood\r\n y = pm.Bernoulli('y', p=theta[coin], observed=y)\r\n\r\n# Generate a MCMC chain\r\n\r\n trace = pm.sample(1000, progressbar=False)\r\n\r\n\r\n## Check the results.\r\n\r\n## Print summary for each trace\r\n#pm.df_summary(trace)\r\n#pm.df_summary(trace)\r\n\r\n## Check for mixing and autocorrelation\r\npm.autocorrplot(trace, varnames=['mu', 'kappa'])\r\n#pm.autocorrplot(trace, varnames =[mu, kappa])\r\n\r\n## Plot KDE and sampled values for each parameter.\r\npm.traceplot(trace)\r\n#pm.traceplot(trace)\r\n\r\n# Create arrays with the posterior sample\r\ntheta1_sample = trace['theta'][:,0]\r\ntheta2_sample = trace['theta'][:,1]\r\ntheta3_sample = trace['theta'][:,2]\r\nmu_sample = trace['mu']\r\nkappa_sample = trace['kappa']\r\n\r\n\r\n# Scatter plot hyper-parameters\r\nfig, ax = plt.subplots(4, 3, figsize=(12,12))\r\nax[0, 0].scatter(mu_sample, kappa_sample, marker='o', color='skyblue')\r\nax[0, 0].set_xlim(0,1)\r\nax[0, 0].set_xlabel(r'$\\mu$')\r\nax[0, 0].set_ylabel(r'$\\kappa$')\r\n\r\n# Plot mu histogram\r\n#plot_post(mu_sample, xlab=r'$\\mu$', show_mode=False, labelsize=9, framealpha=0.5)\r\n\r\npm.plot_posterior(mu_sample, ax=ax[0, 1], color='skyblue')\r\nax[0, 1].set_xlabel(r'$\\mu$')\r\nax[0, 1].set_xlim(0,1)\r\n\r\n# Plot kappa histogram\r\n#plot_post(kappa_sample, xlab=r'$\\kappa$', show_mode=False, labelsize=9, framealpha=0.5)\r\npm.plot_posterior(kappa_sample, ax=ax[0, 2], color='skyblue')\r\nax[0, 2].set_xlabel(r'$\\kappa$')\r\n\r\n# Plot theta 1\r\n\r\n#plot_post(theta1_sample, xlab=r'$\\theta1$', show_mode=False, labelsize=9, framealpha=0.5)\r\npm.plot_posterior(theta1_sample, ax=ax[1, 0], color='skyblue')\r\nax[1, 0].set_xlabel(r'$\\theta1$')\r\nax[1, 0].set_xlim(0,1)\r\n\r\n# Scatter theta 1 vs mu\r\nax[1, 1].scatter(theta1_sample, mu_sample, marker='o', color='skyblue')\r\nax[1, 1].set_xlim(0,1)\r\nax[1, 1].set_ylim(0,1)\r\nax[1, 1].set_xlabel(r'$\\theta1$')\r\nax[1, 1].set_ylabel(r'$\\mu$')\r\n\r\n# Scatter theta 1 vs kappa\r\nax[1, 2].scatter(theta1_sample, kappa_sample, marker='o', color='skyblue')\r\nax[1, 2].set_xlim(0,1)\r\nax[1, 2].set_xlabel(r'$\\theta1$')\r\nax[1, 2].set_ylabel(r'$\\kappa$')\r\n\r\n# Plot theta 2\r\n#plot_post(theta2_sample, xlab=r'$\\theta2$', show_mode=False, labelsize=9, framealpha=0.5)\r\npm.plot_posterior(theta2_sample, ax=ax[2, 0], color='skyblue')\r\nax[2, 0].set_xlabel(r'$\\theta2$')\r\nax[2, 0].set_xlim(0,1)\r\n\r\n# Scatter theta 2 vs mu\r\nax[2, 1].scatter(theta2_sample, mu_sample, marker='o', color='skyblue')\r\nax[2, 1].set_xlim(0,1)\r\nax[2, 1].set_ylim(0,1)\r\nax[2, 1].set_xlabel(r'$\\theta2$')\r\nax[2, 1].set_ylabel(r'$\\mu$')\r\n\r\n# Scatter theta 2 vs kappa\r\nax[2, 2].scatter(theta2_sample, kappa_sample, marker='o', color='skyblue')\r\nax[2, 2].set_xlim(0,1)\r\nax[2, 2].set_xlabel(r'$\\theta2$')\r\nax[2, 2].set_ylabel(r'$\\kappa$')\r\n\r\n# Plot theta 3\r\n\r\n#plot_post(theta3_sample, xlab=r'$\\theta3$', show_mode=False, labelsize=9, framealpha=0.5)\r\npm.plot_posterior(theta3_sample, ax=ax[3, 0], color='skyblue')\r\nax[3, 0].set_xlabel(r'$\\theta3$')\r\nax[3, 0].set_xlim(0,1)\r\n\r\n# Scatter theta 3 vs mu\r\nax[3, 1].scatter(theta3_sample, mu_sample, marker='o', color='skyblue')\r\nax[3, 1].set_xlim(0,1)\r\nax[3, 1].set_ylim(0,1)\r\nax[3, 1].set_xlabel(r'$\\theta3$')\r\nax[3, 1].set_ylabel(r'$\\mu$')\r\n\r\n# Scatter theta 3 vs kappa\r\nax[3, 2].scatter(theta3_sample, kappa_sample, marker='o', color='skyblue')\r\nax[3, 2].set_xlim(0,1)\r\nax[3, 2].set_xlabel(r'$\\theta3$')\r\nax[3, 2].set_ylabel(r'$\\kappa$')\r\n\r\nplt.tight_layout()\r\nplt.savefig('Figure_9.11.png')\r\nplt.show()\r\n\r\n", "repo_name": "aloctavodia/Doing_bayesian_data_analysis", "sub_path": "09_BernBetaMuKappaPyMC.py", "file_name": "09_BernBetaMuKappaPyMC.py", "file_ext": "py", "file_size_in_byte": 4851, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 878, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "pymc3.Model", "line_number": 42, "usage_type": "call"}, {"api_name": "pymc3.Beta", "line_number": 44, "usage_type": "call"}, {"api_name": "pymc3.Gamma", "line_number": 45, "usage_type": "call"}, {"api_name": "pymc3.Beta", "line_number": 47, "usage_type": "call"}, {"api_name": "pymc3.Bernoulli", "line_number": 49, "usage_type": "call"}, {"api_name": "pymc3.sample", "line_number": 53, "usage_type": "call"}, {"api_name": "pymc3.autocorrplot", "line_number": 63, "usage_type": "call"}, {"api_name": "pymc3.traceplot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "pymc3.plot_posterior", "line_number": 88, "usage_type": "call"}, {"api_name": "pymc3.plot_posterior", "line_number": 94, "usage_type": "call"}, {"api_name": "pymc3.plot_posterior", "line_number": 100, "usage_type": "call"}, {"api_name": "pymc3.plot_posterior", "line_number": 119, "usage_type": "call"}, {"api_name": "pymc3.plot_posterior", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}]} +{"seq_id": "26871756379", "text": "import requests\nfrom typing import Optional, List, Type, Union\nimport json\nfrom fastapi.testclient import TestClient\n\nfrom src.main import API_VERSION_PREFIX\n\n\ndef header(uid, role=None):\n headers = {\"uid\": uid, \"api_key\": \"key\"}\n if role is not None:\n headers[\"role\"] = role\n return headers\n\n\ndef post_user(\n client,\n uid,\n user_name=\"generic_user_name\",\n location=\"location\",\n interests=\"interests\",\n include_pfp=False,\n):\n data = {\n \"name\": user_name,\n \"location\": location,\n \"interests\": interests,\n }\n if include_pfp:\n with open(\"./pfp.img\", \"wb\") as f:\n f.write(b\"test\")\n with open(\"./pfp.img\", \"rb\") as f:\n files = {\"img\": (\"pfp.img\", f, \"plain/text\")}\n response_post = client.post(\n API_VERSION_PREFIX + \"/users/\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n data=data,\n files=files,\n )\n assert response_post.status_code == 200\n else:\n response_post = client.post(\n API_VERSION_PREFIX + \"/users/\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n data=data,\n )\n\n return response_post\n\n\ndef post_song(\n client,\n uid: Optional[str] = None,\n name: Optional[str] = \"song_name\",\n description: Optional[str] = \"song_desc\",\n artists: Optional[List[str]] = None,\n genre: Optional[str] = \"song_genre\",\n sub_level: Optional[int] = 0,\n file: Optional[str] = \"./tests/test.song\",\n blocked: Optional[bool] = False,\n headers: Optional[dict] = None,\n role: Optional[str] = \"artist\",\n album_id: Optional[int] = None,\n unwrap_id: bool = True,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n if headers is None:\n headers = header(uid, role=role)\n if artists is None:\n artists = [\"song_artist_name\"]\n\n with open(file, \"wb\") as f:\n f.write(b\"test\")\n with open(file, \"rb\") as f:\n response_post = client.post(\n API_VERSION_PREFIX + \"/songs/\",\n data={\n \"name\": name,\n \"description\": description,\n \"artists\": json.dumps(artists),\n \"genre\": genre,\n \"sub_level\": sub_level,\n \"album_id\": album_id,\n },\n files={\"file\": (\"song.txt\", f, \"plain/text\")},\n headers=headers,\n )\n\n if blocked:\n client.put(\n f\"{API_VERSION_PREFIX}/songs/{response_post.json()['id']}\",\n data={\"blocked\": True},\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": \"admin\"},\n )\n\n if unwrap_id:\n return response_post.json()[\"id\"]\n return response_post\n\n\ndef post_album(\n client,\n uid: Optional[str] = None,\n name: Optional[str] = \"album_name\",\n description: Optional[str] = \"album_desc\",\n genre: Optional[str] = \"album_genre\",\n songs_ids: Optional[List[str]] = None,\n cover: Optional[str] = \"./tests/test.cover\",\n blocked: bool = False,\n headers: Optional[dict] = None,\n role: Optional[str] = \"artist\",\n unwrap_id: bool = True,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n if headers is None:\n headers = header(uid, role=role)\n if songs_ids is None:\n songs_ids = []\n\n with open(cover, \"wb\") as f:\n f.write(b\"test\")\n with open(cover, \"rb\") as f:\n response_post = client.post(\n API_VERSION_PREFIX + \"/albums/\",\n data={\n \"name\": name,\n \"description\": description,\n \"songs_ids\": json.dumps(songs_ids),\n \"genre\": genre,\n },\n files={\"cover\": (\"cover.txt\", f, \"plain/text\")},\n headers=headers,\n )\n\n if blocked:\n response_put = client.put(\n f\"{API_VERSION_PREFIX}/albums/{response_post.json()['id']}\",\n data={\"blocked\": True},\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": \"admin\"},\n )\n assert response_put.status_code == 200\n if unwrap_id:\n return response_post.json()[\"id\"]\n return response_post\n\n\ndef post_album_with_song(\n client,\n uid: Optional[str] = None,\n album_name=\"album_name\",\n album_genre=\"album_genre\",\n song_name=\"song_name\",\n song_genre=\"song_genre\",\n song_sub_level=0,\n):\n song_id = post_song(\n client, uid=uid, name=song_name, genre=song_genre, sub_level=song_sub_level\n )\n return post_album(\n client,\n uid=uid,\n name=album_name,\n genre=album_genre,\n songs_ids=[song_id],\n )\n\n\ndef post_playlist(\n client,\n uid: Optional[str] = None,\n playlist_name: Optional[str] = \"playlist_name\",\n description: Optional[str] = \"playlist_desc\",\n songs_ids: Optional[List[str]] = None,\n colabs_ids: Optional[List[str]] = None,\n blocked: Optional[bool] = False,\n headers: Optional[dict] = None,\n unwrap_id: bool = True,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n if headers is None:\n headers = {\n \"api_key\": \"key\",\n \"uid\": uid,\n }\n if songs_ids is None:\n songs_ids = []\n if colabs_ids is None:\n colabs_ids = []\n\n response_post = client.post(\n f\"{API_VERSION_PREFIX}/playlists/\",\n data={\n \"name\": playlist_name,\n \"description\": description,\n \"songs_ids\": json.dumps(songs_ids),\n \"colabs_ids\": json.dumps(colabs_ids),\n },\n headers=headers,\n )\n\n if blocked:\n response_put = client.put(\n f\"{API_VERSION_PREFIX}/playlists/{response_post.json()['id']}\",\n data={\"blocked\": True},\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": \"admin\"},\n )\n assert response_put.status_code == 200\n\n if unwrap_id:\n return response_post.json()[\"id\"]\n return response_post\n\n\ndef wrap_post_playlist(client, unwrap_id: bool = True):\n post_user(client, \"user_playlist_owner\", user_name=\"Ricardito\")\n post_user(client, \"user_playlist_colab\", user_name=\"Fernandito\")\n song_id_1 = post_song(client, uid=\"user_playlist_owner\", name=\"song_for_playlist1\")\n song_id_2 = post_song(client, uid=\"user_playlist_owner\", name=\"song_for_playlist2\")\n colabs_id = [\"user_playlist_colab\"]\n songs_id = [song_id_1, song_id_2]\n return post_playlist(\n client,\n uid=\"user_playlist_owner\",\n playlist_name=\"playlist_name\",\n description=\"playlist_description\",\n colabs_ids=colabs_id,\n songs_ids=songs_id,\n unwrap_id=unwrap_id,\n )\n\n\ndef block_song(client, song_id: int):\n post_user(client, \"__blocker__id__\", user_name=\"__blocker__name__\")\n response_put = client.put(\n f\"{API_VERSION_PREFIX}/songs/{song_id}\",\n data={\"blocked\": True},\n headers={\"api_key\": \"key\", \"uid\": \"__blocker__id__\", \"role\": \"admin\"},\n )\n assert response_put.status_code == 200\n return response_put\n\n\ndef post_review(\n client,\n album_id: int,\n uid: Optional[str] = None,\n text: Optional[str] = \"review text\",\n score: Optional[int] = 5,\n role: Optional[str] = \"listener\",\n):\n return post_json(\n client,\n f\"/albums/{album_id}/reviews/\",\n json={\"text\": text, \"score\": score},\n uid=uid,\n role=role,\n )\n\n\ndef add_song_to_favorites(client, uid, song_id):\n return post(\n client, f\"/users/{uid}/favorites/songs/?song_id={song_id}\", uid=uid, data={}\n )\n\n\ndef get_favorite_songs(client, uid, role=\"listener\"):\n return get(client, f\"/users/{uid}/favorites/songs/\", uid=uid, role=role)\n\n\ndef delete_song_from_favorites(client, uid, song_id):\n response_delete = client.delete(\n f\"{API_VERSION_PREFIX}/users/{uid}/favorites/songs/?song_id={song_id}\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n )\n return response_delete\n\n\ndef get_favorite_albums(client, uid, role: Optional[str] = \"listener\"):\n return get(client, f\"/users/{uid}/favorites/albums/\", uid=uid, role=role)\n\n\ndef add_album_to_favorites(client, uid, album_id):\n response_post = client.post(\n f\"{API_VERSION_PREFIX}/users/{uid}/favorites/albums/?album_id={album_id}\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n )\n return response_post\n\n\ndef add_song_to_album(client, uid: str, song_id: int, album_id: int):\n response_get = client.get(\n f\"{API_VERSION_PREFIX}/albums/{album_id}/songs/\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n )\n songs_ids = [song[\"id\"] for song in response_get.json()]\n songs_ids.append(song_id)\n response_put = client.put(\n f\"{API_VERSION_PREFIX}/albums/{album_id}/songs/\",\n data={\"songs_ids\": json.dumps(songs_ids)},\n headers={\"api_key\": \"key\", \"uid\": uid},\n )\n return response_put\n\n\ndef block_album(client, id: int):\n post_user(client, \"__blocker__id__\", user_name=\"__blocker__name__\")\n return put(\n client,\n f\"/albums/{id}\",\n data={\"blocked\": True},\n uid=\"__blocker__id__\",\n role=\"admin\",\n )\n\n\ndef remove_album_from_favorites(client, uid, album_id):\n return delete(\n client, f\"/users/{uid}/favorites/albums/?album_id={album_id}\", uid=uid\n )\n\n\ndef get_favorite_playlists(client, uid, role=\"listener\"):\n return get(client, f\"/users/{uid}/favorites/playlists/\", uid=uid, role=role)\n\n\ndef add_playlist_to_favorites(client, uid, playlist_id, role=\"listener\"):\n return post(\n client,\n f\"/users/{uid}/favorites/playlists/?playlist_id={playlist_id}\",\n uid=uid,\n role=role,\n data={},\n )\n\n\ndef block_playlist(client, playlist_id: int):\n post_user(client, \"__blocker__id__\", user_name=\"__blocker__name__\")\n response_put = client.put(\n f\"{API_VERSION_PREFIX}/playlists/{playlist_id}\",\n data={\"blocked\": True},\n headers={\"api_key\": \"key\", \"uid\": \"__blocker__id__\", \"role\": \"admin\"},\n )\n assert response_put.status_code == 200\n return response_put\n\n\ndef remove_playlist_from_favorites(client, uid, playlist_id):\n response_delete = client.delete(\n f\"{API_VERSION_PREFIX}/users/{uid}/favorites/playlists/?playlist_id={playlist_id}\",\n headers={\"api_key\": \"key\", \"uid\": uid},\n )\n return response_delete\n\n\ndef post_streaming(client, uid: str, name=\"streaming_name\", include_img=False):\n data = {\"name\": name}\n\n if include_img:\n with open(\"./streaming.img\", \"wb\") as f:\n f.write(b\"test\")\n with open(\"./streaming.img\", \"rb\") as f:\n files = {\"img\": (\"streaming.img\", f, \"plain/text\")}\n response_post = client.post(\n API_VERSION_PREFIX + \"/streamings/\",\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": \"artist\"},\n data=data,\n files=files,\n )\n else:\n response_post = client.post(\n API_VERSION_PREFIX + \"/streamings/\",\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": \"artist\"},\n data=data,\n )\n\n return response_post\n\n\ndef post_user_with_sub_level(client, user_id: str, user_name: str, sub_level: int):\n response = post_user(client, user_id, user_name)\n assert response.status_code == 200\n\n response = client.post(\n f\"{API_VERSION_PREFIX}/subscriptions/\",\n headers={\"api_key\": \"key\", \"uid\": user_id},\n json={\"sub_level\": sub_level},\n )\n assert response.status_code == 200\n\n response = client.post(\n f\"{API_VERSION_PREFIX}/subscriptions/\",\n headers={\"api_key\": \"key\", \"uid\": user_id},\n json={\"sub_level\": sub_level},\n )\n return response\n\n\ndef delete_user(client, user_id: str):\n return delete(client, f\"/users/{user_id}\", uid=user_id)\n\n\ndef get(\n client,\n endpoint: str,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[Union[int, str]] = None,\n limit: Optional[Union[int, str]] = None,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n headers = {\n \"api_key\": \"key\",\n \"uid\": uid,\n \"role\": role,\n }\n params = {}\n if limit is not None:\n params[\"offset\"] = offset\n params[\"limit\"] = limit\n\n response = client.get(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n headers=headers,\n params=params,\n )\n if limit is None:\n if response.status_code < 299 and \"items\" in response.json():\n items = response.json()[\"items\"]\n resp = requests.models.Response()\n resp.status_code = response.status_code\n resp._content = json.dumps(items, indent=2).encode(\"utf-8\")\n response = resp\n if unwrap:\n return response.json()\n return response\n\n\ndef post(\n client,\n endpoint: str,\n data: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n files: Optional[dict] = None,\n unwrap: bool = False,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n response = client.post(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": role},\n data=data,\n files=files,\n )\n if unwrap:\n return response.json()\n return response\n\n\ndef post_json(\n client,\n endpoint: str,\n json: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n files: Optional[dict] = None,\n unwrap: bool = False,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n response = client.post(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": role},\n json=json,\n files=files,\n )\n if unwrap:\n return response.json()\n return response\n\n\ndef delete(\n client,\n endpoint: str,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n response = client.delete(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": role},\n )\n if unwrap:\n return response.json()\n return response\n\n\ndef put(\n client,\n endpoint: str,\n data: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n response = client.put(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n data=data,\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": role},\n )\n if unwrap:\n return response.json()\n return response\n\n\ndef put_json(\n client,\n endpoint: str,\n json: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n if uid is None:\n uid = get_uid_or_create(client)\n\n response = client.put(\n f\"{API_VERSION_PREFIX}{endpoint}\",\n json=json,\n headers={\"api_key\": \"key\", \"uid\": uid, \"role\": role},\n )\n if unwrap:\n return response.json()\n return response\n\n\ndef get_album(\n client,\n album_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, f\"/albums/{album_id}\", uid, role, unwrap)\n\n\ndef add_query(endpoint: str, query: str):\n if \"?\" in endpoint:\n return f\"{endpoint}&{query}\"\n return f\"{endpoint}?{query}\"\n\n\ndef search_albums(\n client,\n artist: Optional[str] = None,\n genre: Optional[str] = None,\n name: Optional[str] = None,\n creator: Optional[str] = None,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n endpoint = \"/albums/\"\n for search_term, value in (\n (\"artist\", artist),\n (\"genre\", genre),\n (\"name\", name),\n (\"creator\", creator),\n ):\n if value is not None:\n endpoint = add_query(endpoint, f\"{search_term}={value}\")\n\n return get(\n client, endpoint, uid=uid, role=role, unwrap=unwrap, offset=offset, limit=limit\n )\n\n\ndef get_my_albums(client, uid: str, role: str = \"listener\", unwrap=False):\n return get(client, \"/my_albums/\", uid, role, unwrap)\n\n\ndef put_album(\n client,\n album_id: int,\n data: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return put(client, f\"/albums/{album_id}\", data, uid, role, unwrap)\n\n\ndef delete_album(\n client,\n album_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(client, f\"/albums/{album_id}\", uid, role, unwrap)\n\n\ndef get_song(\n client,\n song_id: int,\n uid: Optional[str] = None,\n role: Optional[str] = \"listener\",\n unwrap=False,\n):\n return get(client, f\"/songs/{song_id}\", uid, role, unwrap)\n\n\ndef get_my_songs(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n return get(client, \"/my_songs/\", uid, role, unwrap, offset=offset, limit=limit)\n\n\ndef search_songs(\n client,\n artist: Optional[str] = None,\n genre: Optional[str] = None,\n name: Optional[str] = None,\n creator: Optional[str] = None,\n sub_level: Optional[int] = None,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n endpoint = \"/songs/\"\n for search_term, value in (\n (\"artist\", artist),\n (\"genre\", genre),\n (\"name\", name),\n (\"creator\", creator),\n (\"sub_level\", sub_level),\n ):\n if value is not None:\n endpoint = add_query(endpoint, f\"{search_term}={value}\")\n\n return get(\n client, endpoint, uid=uid, role=role, unwrap=unwrap, offset=offset, limit=limit\n )\n\n\ndef put_song(\n client,\n song_id: int,\n data: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return put(client, f\"/songs/{song_id}\", data, uid, role, unwrap)\n\n\ndef delete_song(\n client,\n song_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(client, f\"/songs/{song_id}\", uid, role, unwrap)\n\n\ndef get_songs_by_creator(\n client,\n creator_id: str,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, f\"/songs/?creator={creator_id}\", uid, role, unwrap)\n\n\ndef get_playlist(\n client,\n playlist_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, f\"/playlists/{playlist_id}\", uid, role, unwrap)\n\n\ndef get_my_playlists(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, \"/my_playlists/\", uid, role, unwrap)\n\n\ndef search_playlists(\n client,\n uid: Optional[str] = None,\n colab: Optional[str] = None,\n creator: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n endpoint = \"/playlists/\"\n for search_term, value in (\n (\"colab\", colab),\n (\"creator\", creator),\n ):\n if value is not None:\n endpoint = add_query(endpoint, f\"{search_term}={value}\")\n return get(\n client, endpoint, uid=uid, role=role, unwrap=unwrap, offset=offset, limit=limit\n )\n\n\ndef put_playlist(\n client,\n playlist_id: int,\n data: dict,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return put(client, f\"/playlists/{playlist_id}\", data, uid, role, unwrap)\n\n\ndef delete_playlist(\n client,\n playlist_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(client, f\"/playlists/{playlist_id}\", uid, role, unwrap)\n\n\ndef add_playlist_song(\n client,\n playlist_id: int,\n song_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return post(\n client,\n f\"/playlists/{playlist_id}/songs/\",\n {\"song_id\": song_id},\n uid,\n role,\n unwrap=unwrap,\n )\n\n\ndef remove_playlist_song(\n client,\n playlist_id: int,\n song_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(\n client,\n f\"/playlists/{playlist_id}/songs/{song_id}/\",\n uid,\n role,\n unwrap,\n )\n\n\ndef add_playlist_colab(\n client,\n playlist_id: int,\n colab_id: str,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return post(\n client,\n f\"/playlists/{playlist_id}/colabs/\",\n {\"colab_id\": colab_id},\n uid,\n role,\n unwrap=unwrap,\n )\n\n\ndef post_users(client: Type[TestClient], *users_ids):\n for user_id in users_ids:\n post_user(client, user_id)\n\n\ndef get_uid_or_create(client):\n users = client.get(\n f\"{API_VERSION_PREFIX}/users\", headers={\"api_key\": \"key\"}\n ).json()[\"items\"]\n if len(users) == 0:\n uid = \"__user_id__\"\n post_user(client, uid)\n else:\n uid = users[0][\"id\"]\n return uid\n\n\ndef get_album_comments(\n client,\n album_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n return get(\n client, f\"/albums/{album_id}/comments/\", uid, role, unwrap, offset, limit\n )\n\n\ndef post_comment(\n client,\n album_id: int,\n message: str = \"comment_text\",\n parent_id: Optional[int] = None,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap_id=True,\n):\n response = post_json(\n client,\n f\"/albums/{album_id}/comments/\",\n {\"text\": message, \"parent_id\": parent_id},\n uid,\n role,\n unwrap=unwrap_id,\n )\n if unwrap_id:\n return response[\"id\"]\n return response\n\n\ndef get_user_comments(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, \"/users/comments/\", uid, role, unwrap)\n\n\ndef put_comment(\n client,\n comment_id: int,\n message: str,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return put_json(\n client,\n f\"/albums/comments/{comment_id}/\",\n {\"text\": message},\n uid,\n role,\n unwrap,\n )\n\n\ndef delete_comment(\n client,\n comment_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(client, f\"/albums/comments/{comment_id}/\", uid, role, unwrap)\n\n\ndef get_reviews_of_album(\n client,\n album_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[str] = None,\n limit: Optional[int] = None,\n):\n return get(client, f\"/albums/{album_id}/reviews/\", uid, role, unwrap, offset, limit)\n\n\ndef delete_review_of_album(\n client,\n album_id: int,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return delete(client, f\"/albums/{album_id}/reviews/\", uid, role, unwrap)\n\n\ndef put_review_of_album(\n client,\n album_id: int,\n text: Optional[str],\n score: Optional[int],\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return put_json(\n client,\n f\"/albums/{album_id}/reviews/\",\n {\"text\": text, \"score\": score},\n uid,\n role,\n unwrap,\n )\n\n\ndef get_users(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n return get(client, \"/users/\", uid, role, unwrap, offset, limit)\n\n\ndef get_user_reviews(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n return get(client, f\"/users/{uid}/reviews/\", uid, role, unwrap, offset, limit)\n\n\ndef get_user(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n):\n return get(client, f\"/users/{uid}/\", uid, role, unwrap)\n\n\ndef get_streamings(\n client,\n uid: Optional[str] = None,\n role: str = \"listener\",\n unwrap=False,\n offset: Optional[int] = None,\n limit: Optional[int] = None,\n):\n return get(client, \"/streamings/\", uid, role, unwrap, offset, limit)\n", "repo_name": "taller2-grupo5-rostov-1c2022/songs-server", "sub_path": "tests/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 24458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "src.main.API_VERSION_PREFIX", "line_number": 35, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 63, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 78, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 82, "usage_type": "call"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 107, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 113, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 128, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 132, "usage_type": "call"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 141, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 153, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 174, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 175, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 177, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 178, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 180, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 196, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 200, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 208, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 240, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 251, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 252, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 253, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 254, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 283, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 289, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 297, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 303, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 304, "usage_type": "call"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 344, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 354, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 369, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 376, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 389, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 396, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 410, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 413, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 413, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 414, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 414, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 429, "usage_type": "name"}, {"api_name": "requests.models.Response", "line_number": 436, "usage_type": "call"}, {"api_name": "requests.models", "line_number": 436, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 438, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 449, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 451, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 458, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 472, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 474, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 481, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 494, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 502, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 514, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 522, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 535, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 543, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 555, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 570, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 571, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 572, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 573, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 574, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 577, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 578, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 603, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 613, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 623, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 624, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 632, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 635, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 636, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 643, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 644, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 645, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 646, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 647, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 648, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 651, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 652, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 674, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 684, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 694, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 704, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 713, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 722, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 723, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 724, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 727, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 728, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 746, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 756, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 767, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 785, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 802, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 816, "usage_type": "name"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 816, "usage_type": "name"}, {"api_name": "src.main.API_VERSION_PREFIX", "line_number": 823, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 836, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 839, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 840, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 851, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 852, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 871, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 882, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 899, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 909, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 912, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 913, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 921, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 931, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 932, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 933, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 949, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 952, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 953, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 960, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 963, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 964, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 971, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 980, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 983, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 984, "usage_type": "name"}]} +{"seq_id": "73613833446", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\n\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nfrom sklearn import svm\n\nboston = datasets.load_boston()\n\n# Cross Validation (CV)\n'''\n- Hold out Cross Validation\n- k-fold Cross Validation\n\nA test set should still be held out for final evaluation, but the validation set is no longer needed when doing CV.\n\nIn the basic approach, called k-fold CV, the training set is split into k smaller sets. \nThe following procedure is followed for each of the k “folds”:\n- A model is trained using k-1 of the folds as training data;\n- the resulting model is validated on the remaining part of the data (i.e., it is used as a test set to compute a \nperformance measure such as accuracy).\n\nThe performance measure reported by k-fold cross-validation is then the average of the values computed in the loop.\n'''\n\n# Holdout Method\n'''\n- Split initial dataset into a separate training and test dataset\n- Training dataset - model training\n- Test dataset - estimate its generalisation performance\n\nA variation is to split the training set to two :- training set and validation set\n\nTraining set:- For fitting different models\nValidation set :- For tuning and comparing different parameter settings to further improve the \nperformance for making predictions on unseen data. And finally for model selection.\n\nThis process is called model selection. We want to select the optimal values of tuning parameters (also called hyperparameters).\n'''\n\n# K-fold Cross-validation\n'''\n- Randomly split the training dataset into k folds without replacement.\n- k — 1 folds are used for the model training.\n- The one fold is used for performance evaluation.\n\nThis procedure is repeated k times.\n\nFinal outcomes:- k models and performance estimates.\n\n- calculate the average performance of the models based on the different, \nindependent folds to obtain a performance estimate that is less sensitive to the sub-partitioning of the training data \ncompared to the holdout method.\n\n- k-fold cross-validation is used for model tuning. Finding the optimal hyperparameter values that yields a satisfying \ngeneralization performance.\n\n- Once we have found satisfactory hyperparameter values, we can retrain the model on the complete training set and \nobtain a final performance estimate using the independent test set. The rationale behind fitting a model to the whole \ntraining dataset after k-fold cross-validation is that providing more training samples to a learning algorithm usually \nresults in a more accurate and robust model.\n\n- Common k is 10\n\n- For relatively small training sets, increase the number of folds.\n'''\n\n# Stratified k-fold cross-validation\n'''\n- variation of k-fold\n- Can yield better bias and variance estimates, especially in cases of unequal class proportions\n'''\n\n\n# Cross-validation: evaluating estimator performance\nX_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, test_size=0.4, random_state=0)\n\nprint(X_train.shape, y_train.shape)\n\nprint(X_test.shape, y_test.shape)\n\nregression = svm.SVR(kernel='linear', C=1).fit(X_train, y_train)\nregression.score(X_test, y_test)\n\n'''\nWhen evaluating different settings (“hyperparameters”) for estimators, such as the C setting that must be manually set \nfor an SVM, there is still a risk of overfitting on the test set because the parameters can be tweaked until \nthe estimator performs optimally.\n\nThis way, knowledge about the test set can “leak” into the model and evaluation metrics no longer report on generalization performance.\n\nTo solve this problem, yet another part of the dataset can be held out as a so-called “validation set”: \ntraining proceeds on the training set, after which evaluation is done on the validation set, \nand when the experiment seems to be successful, final evaluation can be done on the test set.\n\nHowever, by partitioning the available data into three sets, we drastically reduce the number of samples which can be \nused for learning the model, and the results can depend on a particular random choice for the pair of (train, validation) sets.\n\nA solution to this problem, as discussed earlier, is a procedure called cross-validation (CV for short). \nA test set should still be held out for final evaluation, but the validation set is no longer needed when doing CV. \nIn the basic approach, called k-fold CV, the training set is split into k smaller sets \n(other approaches are described below, but generally follow the same principles). \nThe following procedure is followed for each of the k “folds”:\n\n- A model is trained using k-1 of the folds as training data;\n- the resulting model is validated on the remaining part of the data \n (i.e., it is used as a test set to compute a performance measure such as accuracy).\n\nThe performance measure reported by k-fold cross-validation is then the average of the values computed in the loop. \nThis approach can be computationally expensive, but does not waste too much data \n(as it is the case when fixing an arbitrary test set), \nwhich is a major advantage in problem such as inverse inference where the number of samples is very small.\n'''\n\n# Computing cross-validated metrics\nfrom sklearn.model_selection import cross_val_score\nregression = svm.SVR(kernel='linear', C=1)\nscores = cross_val_score(regression, boston.data, boston.target, cv=5)\nscores\n\n# The mean score and the 95% confidence interval of the score estimate are hence given by:\nprint(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std()))\n\n# By default, the score computed at each CV iteration is the score method of the estimator.\n# It is possible to change this by using the scoring parameter:\n\nscores = cross_val_score(regression, boston.data, boston.target, cv=5, scoring='neg_mean_squared_error')\nscores\n\n\n# K-fold\n'''\nKFold divides all the samples in k groups of samples, called folds \n(if k = n, this is equivalent to the Leave One Out strategy), of equal sizes (if possible). \nThe prediction function is learned using k - 1 folds, and the fold left out is used for test.\n\nExample of 2-fold cross-validation on a dataset with 4 samples:\n'''\n\nfrom sklearn.model_selection import KFold\n\nX = [\"a\", \"b\", \"c\", \"d\"]\nkf = KFold(n_splits=2)\nfor train, test in kf.split(X):\n print(\"%s %s\" % (train, test))\n\n\n# Stratified k-fold\n'''\nStratifiedKFold is a variation of k-fold which returns stratified folds: each set contains approximately \nthe same percentage of samples of each target class as the complete set.\n\nExample of stratified 3-fold cross-validation on a dataset with 10 samples from two slightly unbalanced classes\n'''\nfrom sklearn.model_selection import StratifiedKFold\n\nX = np.ones(10)\ny = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]\nskf = StratifiedKFold(n_splits=3)\nfor train, test in skf.split(X, y):\n print(\"%s %s\" % (train, test))\n\nprint(X)\nprint(y)\n\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import make_pipeline\npipe_svm = make_pipeline(StandardScaler(),\n PCA(n_components=2),\n svm.SVR(kernel='linear', C=1)) # LogisticRegression(random_state=1)\npipe_svm.fit(X_train, y_train)\ny_pred = pipe_svm.predict(X_test)\nprint('Test Accuracy: %.3f' % pipe_svm.score(X_test, y_test))\n\n\nscores = cross_val_score(estimator=pipe_svm,\n X=X_train,\n y=y_train,\n cv=10,\n n_jobs=1)\nprint('CV accuracy scores: %s' % scores)\n\nprint('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "FISHWONGY/udemy_repo", "sub_path": "The_Complete_Machine_Learning_Course_with_Python/3_Regression/08_Cross Validation.py", "file_name": "08_Cross Validation.py", "file_ext": "py", "file_size_in_byte": 7706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "seaborn.set_style", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 12, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 86, "usage_type": "name"}, {"api_name": "sklearn.svm.SVR", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 121, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 161, "usage_type": "call"}, {"api_name": "sklearn.model_selection.StratifiedKFold", "line_number": 163, "usage_type": "call"}, {"api_name": "sklearn.pipeline.make_pipeline", "line_number": 174, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 174, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 175, "usage_type": "call"}, {"api_name": "sklearn.svm.SVR", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 176, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "35797442570", "text": "from google.appengine.ext import db\nfrom sns.api.facade import iapi\nfrom sns.api import consts as api_const\nfrom django import forms\nfrom sns.deal import consts as deal_const\nfrom sns.deal import api as deal_api\nfrom sns.dm import consts as dm_const\nfrom sns.view import consts as view_const\nfrom sns.view.baseform import BaseForm, NameMultipleChoiceField\nfrom common.utils import string as str_util\n\n\nclass DMCampaignForm(BaseForm):\n \n def __init__( self, *args, **kwargs ):\n super(BaseForm, self ).__init__( *args, **kwargs )\n if self.__class__.__name__.find('CreateForm')!=-1:\n channelChoice=[]\n channels = iapi(api_const.API_M_CHANNEL).query_base().fetch(limit=1000)\n for channel in channels:\n channelChoice.append((channel.key(),channel.name))\n self.fields['sourceChannel'] = forms.ChoiceField(choices=channelChoice,widget=forms.Select(\n attrs={\"name\":\"channel\",\"id\":\"channel\"}),label=\"Define DM Source Twitter account\", required=False)\n \n articleChoice=[]\n articles=iapi(api_const.API_M_ARTICLE).query_base().fetch(limit=100) \n for article in articles:\n articleChoice.append((article.id,article.msgShort80))\n self.fields['contents'] = NameMultipleChoiceField(choices=articleChoice, label_attr='msg', required=False)\n \n choices = []\n cities = deal_api.GrouponApi.get_city_2_division_map().keys()\n cities.sort()\n for location in cities:\n choices.append((location,location))\n self.fields['locations'] = forms.MultipleChoiceField(choices=choices,required=False)\n \n choices = []\n cats = deal_const.TOPIC_2_GROUPON_CATEGORY_MAP.keys()\n cats.sort()\n for cat in cats:\n choices.append((str_util.name_2_key(cat),cat))\n self.fields['nationalTopics'] = forms.MultipleChoiceField(choices=choices,required=False)\n \n choices = []\n cats = deal_const.TOPIC_2_GROUPON_CATEGORY_MAP.keys()\n cats.sort()\n for cat in cats:\n choices.append((str_util.name_2_key(cat),cat))\n self.fields['topics'] = forms.MultipleChoiceField(choices=choices,required=False)\n \n sendOrder = forms.ChoiceField(choices=[(dm_const.DM_LATEST_TO_OLDEST,'Latest to Oldest'),(dm_const.DM_OLDEST_TO_LATEST,'Oldest to Latest')],required=True)\n interval_candidates=[]\n for i in dm_const.DM_INTERVALS:\n interval_candidates.append((i,dm_const.INTERVAL_MAP[i])) \n scheduleInterval=forms.ChoiceField(choices=interval_candidates,widget=forms.Select(\n attrs={\"id\":\"schedule_interval\"}),required=False)\n dailyTarget = forms.IntegerField(required=False)\n totalTarget = forms.IntegerField(required=False)\n gaOn=forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'retro analyticsCheck','onclick':'toggleAnalytics(this)'}),\n required=False)\n gaUseCampaignName=forms.BooleanField(widget=forms.CheckboxInput(attrs={'class':'retro campaignCheck','onclick':'toggleCampaign(this)'}),\n required=False)\n gaCampaign=forms.CharField(widget=forms.TextInput(attrs={'name':\"utm_campaign\",\n 'size':'25'}),required=False)\n gaSource=forms.CharField(widget=forms.TextInput(attrs={'name':\"utm_source\",\n 'size':'25'}),required=False)\n gaMedium=forms.CharField(widget=forms.TextInput(attrs={'name':\"utm_medium\",\n 'size':'25'}),required=False)\n gaTerm=forms.CharField(widget=forms.TextInput(attrs={'name':\"utm_term\",\n 'size':'25'}),required=False)\n gaContent=forms.CharField(widget=forms.TextInput(attrs={'name':\"utm_content\",\n 'size':'25'}),required=False)\n \n \n def api_module(self):\n return api_const.API_M_DM_RULE\n \n\nclass DMCampaignCreateForm(DMCampaignForm): \n pass\n\nclass DMCampaignUpdateForm(DMCampaignForm):\n id=forms.CharField(widget=forms.HiddenInput)\n current_page=forms.CharField(widget=forms.HiddenInput,required=False)\n sChannel = forms.CharField(widget=forms.HiddenInput(attrs={'size':'50'}),label=\"DM source account\")\n \nclass AdvancedDMCampaignForm(DMCampaignForm):\n promoteType=forms.ChoiceField(choices=[(dm_const.PROMOTE_TYPE_ACCOUNT,'Promote Accounts'),(dm_const.PROMOTE_TYPE_DEAL,'Promote Deals')],widget=forms.Select(\n attrs={'class':'retro promoteCheck','onchange':'togglePromote(this)'}),required=False)\n accountPromoteType=forms.ChoiceField(choices=[(dm_const.ACCOUNT_PROMOTE_TYPE_CITY,'City Accounts'),\n (dm_const.ACCOUNT_PROMOTE_TYPE_CITY_CATEGORY,'City Category Accounts')],widget=forms.Select(\n attrs={'class':'retro promoteCityCat','onchange':'toggleCityCat(this)'}),required=False)\n categoryType = forms.ChoiceField(choices=[(dm_const.PROMOTE_CATEGORY_TYPE_NATION,'National Category'),(dm_const.PROMOTE_CATEGORY_TYPE_CITY,'City Category')], widget=forms.Select(\n attrs={'class':'retro promoteCategory','onchange':'toggleCategory(this)'}),required=False) \n \n def api_module(self):\n return api_const.API_M_ADVANCED_DM_RULE\n \n def clean_topics(self):\n locations = self.cleaned_data['locations']\n topics = self.cleaned_data['topics']\n nationalTopics = self.cleaned_data['nationalTopics']\n catType = self.cleaned_data['categoryType']\n if catType == dm_const.PROMOTE_CATEGORY_TYPE_NATION:\n if len(nationalTopics) == 0:\n raise forms.ValidationError('National category is required!')\n elif catType == dm_const.PROMOTE_CATEGORY_TYPE_CITY:\n if len(locations) == 0 and len(topics) == 0 :\n raise forms.ValidationError('Location or category is required!')\n return topics\n \n \nclass AdvancedDMCampaignCreateForm(AdvancedDMCampaignForm): \n pass\n\nclass AdvancedDMCampaignUpdateForm(AdvancedDMCampaignForm):\n id=forms.CharField(widget=forms.HiddenInput)\n current_page=forms.CharField(widget=forms.HiddenInput,required=False)\n\n \nclass DMCampaignChartForm(BaseForm):\n def __init__( self,id, *args, **kwargs ):\n super(BaseForm, self ).__init__( *args, **kwargs )\n choices = []\n choices.append((999,\"send count\"))\n rule = db.get(id)\n for t in range(0,len(rule.sendTurn)):\n choices.append((t,\"click count - \"+str(t)))\n self.fields['type'] = forms.ChoiceField(choices=choices,\n widget=forms.Select(\n attrs={\"id\":\"chartType\",\"onchange\":\"chooseDMChartType(this)\",\"class\":\"retro loadDMChart\"}),required=True)\n\nclass AdvancedDMCampaignChartForm(BaseForm):\n def __init__( self, *args, **kwargs ):\n super(BaseForm, self ).__init__( *args, **kwargs )\n choices = []\n choices.append((0,\"click count\"))\n self.fields['type'] = forms.ChoiceField(choices=choices,\n widget=forms.Select(\n attrs={\"id\":\"chartType\",\"onchange\":\"chooseDMChartType(this)\",\"class\":\"retro loadDMChart\"}),required=True)\n\n\nclass DMCampaignSortByForm(forms.Form): \n type = forms.ChoiceField(choices=[('nameLower','Name'),('state','Status')],\n widget=forms.Select(\n attrs={\"id\":\"id_sortBy_type\",\"name\":\"id_sortBy_type\",\"onchange\":\"sortByKeyWord()\"}),required=True)\n order = forms.ChoiceField(choices=view_const.LIST_ORDER,\n widget=forms.Select(\n attrs={\"id\":\"id_direct_type\",\"name\":\"id_direct_type\",\"onchange\":\"sortByKeyWord()\"}),required=True)\n paginate = forms.ChoiceField(choices=view_const.LIST_PAGINATE,\n widget=forms.Select(\n attrs={\"id\":\"id_paginate_num\",\"name\":\"id_paginate_num\",\"onchange\":\"sortByKeyWord()\"}),required=True)\n\n\nclass SystemSettingsForm(forms.Form): \n monitor = forms.BooleanField(widget=forms.CheckboxInput(attrs={'onclick':\"changeFollowMonitor('followmonitor')\"}),\n required=False)\n weekStop = forms.BooleanField(widget=forms.CheckboxInput(attrs={'onclick':\"changeFollowMonitor('followweekstop')\"}),\n required=False)\n ", "repo_name": "fantascy/snsanalytics", "sub_path": "src/sns/dm/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 8666, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sns.view.baseform.BaseForm", "line_number": 13, "usage_type": "name"}, {"api_name": "sns.view.baseform.BaseForm", "line_number": 16, "usage_type": "argument"}, {"api_name": "sns.api.facade.iapi", "line_number": 19, "usage_type": "call"}, {"api_name": "sns.api.consts.API_M_CHANNEL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sns.api.consts", "line_number": 19, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 22, "usage_type": "call"}, {"api_name": "sns.api.facade.iapi", "line_number": 26, "usage_type": "call"}, {"api_name": "sns.api.consts.API_M_ARTICLE", "line_number": 26, "usage_type": "attribute"}, {"api_name": "sns.api.consts", "line_number": 26, "usage_type": "name"}, {"api_name": "sns.view.baseform.NameMultipleChoiceField", "line_number": 29, "usage_type": "call"}, {"api_name": "sns.deal.api.GrouponApi.get_city_2_division_map", "line_number": 32, "usage_type": "call"}, {"api_name": "sns.deal.api.GrouponApi", "line_number": 32, "usage_type": "attribute"}, {"api_name": "sns.deal.api", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 36, "usage_type": "name"}, {"api_name": "sns.deal.consts.TOPIC_2_GROUPON_CATEGORY_MAP.keys", "line_number": 39, "usage_type": "call"}, {"api_name": "sns.deal.consts.TOPIC_2_GROUPON_CATEGORY_MAP", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sns.deal.consts", "line_number": 39, "usage_type": "name"}, {"api_name": "common.utils.string.name_2_key", "line_number": 42, "usage_type": "call"}, {"api_name": "common.utils.string", "line_number": 42, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 43, "usage_type": "name"}, {"api_name": "sns.deal.consts.TOPIC_2_GROUPON_CATEGORY_MAP.keys", "line_number": 46, "usage_type": "call"}, {"api_name": "sns.deal.consts.TOPIC_2_GROUPON_CATEGORY_MAP", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sns.deal.consts", "line_number": 46, "usage_type": "name"}, {"api_name": "common.utils.string.name_2_key", "line_number": 49, "usage_type": "call"}, {"api_name": "common.utils.string", "line_number": 49, "usage_type": "name"}, {"api_name": "django.forms.MultipleChoiceField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 50, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 52, "usage_type": "name"}, {"api_name": "sns.dm.consts.DM_LATEST_TO_OLDEST", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 52, "usage_type": "name"}, {"api_name": "sns.dm.consts.DM_OLDEST_TO_LATEST", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sns.dm.consts.DM_INTERVALS", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 54, "usage_type": "name"}, {"api_name": "sns.dm.consts.INTERVAL_MAP", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 55, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 56, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 56, "usage_type": "call"}, {"api_name": "django.forms.IntegerField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 58, "usage_type": "name"}, {"api_name": "django.forms.IntegerField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 59, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 60, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 60, "usage_type": "call"}, {"api_name": "django.forms.BooleanField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 62, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 62, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 64, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 64, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 66, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 66, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 68, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 70, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 70, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 72, "usage_type": "name"}, {"api_name": "django.forms.TextInput", "line_number": 72, "usage_type": "call"}, {"api_name": "sns.api.consts.API_M_DM_RULE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "sns.api.consts", "line_number": 77, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 84, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 84, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.forms.CharField", "line_number": 85, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 85, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.forms.CharField", "line_number": 86, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 86, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 86, "usage_type": "call"}, {"api_name": "django.forms.ChoiceField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 89, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_TYPE_ACCOUNT", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 89, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_TYPE_DEAL", "line_number": 89, "usage_type": "attribute"}, {"api_name": "django.forms.Select", "line_number": 89, "usage_type": "call"}, {"api_name": "django.forms.ChoiceField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 91, "usage_type": "name"}, {"api_name": "sns.dm.consts.ACCOUNT_PROMOTE_TYPE_CITY", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 91, "usage_type": "name"}, {"api_name": "sns.dm.consts.ACCOUNT_PROMOTE_TYPE_CITY_CATEGORY", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 92, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 92, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 92, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 94, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 94, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_CATEGORY_TYPE_NATION", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 94, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_CATEGORY_TYPE_CITY", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.forms.Select", "line_number": 94, "usage_type": "call"}, {"api_name": "sns.api.consts.API_M_ADVANCED_DM_RULE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "sns.api.consts", "line_number": 98, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_CATEGORY_TYPE_NATION", "line_number": 105, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 105, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 107, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 107, "usage_type": "name"}, {"api_name": "sns.dm.consts.PROMOTE_CATEGORY_TYPE_CITY", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sns.dm.consts", "line_number": 108, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 110, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 110, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 118, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 118, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 118, "usage_type": "attribute"}, {"api_name": "django.forms.CharField", "line_number": 119, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 119, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 119, "usage_type": "attribute"}, {"api_name": "sns.view.baseform.BaseForm", "line_number": 122, "usage_type": "name"}, {"api_name": "sns.view.baseform.BaseForm", "line_number": 124, "usage_type": "argument"}, {"api_name": "google.appengine.ext.db.get", "line_number": 127, "usage_type": "call"}, {"api_name": "google.appengine.ext.db", "line_number": 127, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 130, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 130, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 131, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 131, "usage_type": "name"}, {"api_name": "sns.view.baseform.BaseForm", "line_number": 134, "usage_type": "name"}, {"api_name": "sns.view.baseform.BaseForm", "line_number": 136, "usage_type": "argument"}, {"api_name": "django.forms.ChoiceField", "line_number": 139, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 139, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 140, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 140, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 144, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 144, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 145, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 145, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 146, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 146, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 148, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 148, "usage_type": "name"}, {"api_name": "sns.view.consts.LIST_ORDER", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sns.view.consts", "line_number": 148, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 149, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 149, "usage_type": "name"}, {"api_name": "django.forms.ChoiceField", "line_number": 151, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 151, "usage_type": "name"}, {"api_name": "sns.view.consts.LIST_PAGINATE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sns.view.consts", "line_number": 151, "usage_type": "name"}, {"api_name": "django.forms.Select", "line_number": 152, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 152, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 156, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 156, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 157, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 157, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 157, "usage_type": "call"}, {"api_name": "django.forms.BooleanField", "line_number": 159, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 159, "usage_type": "name"}, {"api_name": "django.forms.CheckboxInput", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "18707251220", "text": "'''Module for parsing directory structures.'''\n\nimport os, re, warnings\nfrom operator import attrgetter\nfrom os.path import normpath\nfrom collections import namedtuple\nimport scandir\n\n\ntry:\n basestring\nexcept NameError:\n basestring = str # Python3\n\n\nclass Singleton(type):\n _instances = {}\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass NoMatchType(object):\n '''Singleton that can be returned by rules to indicate they did not match\n the given input.'''\n __metaclass__ = Singleton\n\n def __nonzero__(self):\n return False\n\n __bool__ = __nonzero__\n\n\nNoMatch = NoMatchType()\n\n\ndef make_regex_rule(regex_str):\n '''Takes a string containing a regex pattern and returns a function\n that can be used as a PathMap rule.\n\n The returned 'match_info' will be a list containing the full regex match\n plus any matched subgroups.\n '''\n regex = re.compile(regex_str)\n def rule(path, dir_entry):\n result = regex.search(path)\n if result:\n return [result.group()] + list(result.groups())\n return NoMatch\n\n return rule\n\n\ndef default_match_rule(path, dir_entry):\n '''Matches anything and returns None as the 'match_info'.'''\n return None\n\n\ndef warn_on_error(oserror):\n '''The default callback function for scandir errors. Raises a\n warning.\n\n Can be overridden with any function that takes a single argument\n (the OSError exception).'''\n warnings.warn('Error on listdir: ' + str(oserror))\n\n\nMatchResult = namedtuple('MatchResult', 'path dir_entry match_info')\n'''The return type for `PathMap.matches`. Contains the path (relative or \nabsolute depending on the `root_path` supplied to `PathMap.matches`), the\n`scandir.DirEntry` object, and the return value from the match rule.'''\n\n\nclass PathMap(object):\n '''Object which contains a number of 'rules' that define how it will\n traverse a directory structure and what paths it will yield. The PathMap \n object can then be used to generate matches starting from one or more \n root paths.\n\n Each 'rule' is a callable that takes two arguments, the path and the\n corresponding scandir.DirEntry object. The path may be relative or \n absolute depending on the supplied root_path. Any rule can also be \n provided as a string, in which case it will be converted to a callable \n using `make_regex_rule`.\n\n Parameters\n ----------\n match_rule : callable\n Returns the 'match_info' result or `NoMatch` if the path should be\n ignored. If None the `default_match_rule` will be used.\n\n ignore_rules : list of callable\n If any of these callables return a value that evaluates to True the\n path will be ignored. The first rule that returns True will cause all\n subsequent `ignore_rules` and the `match_rule` to be skipped.\n\n prune_rules : list of callable\n If a path is a directory and any of these callables return a value\n that evaluates to True the directory will not be descended into. The\n directory path itself may still be matched.\n\n depth : tuple or int\n The minimum and maximum depth for recursion. If an single int is\n given only paths at that depth will be generated.\n\n sort : bool\n If true the paths in each directory will be processed and generated\n in sorted order.\n\n on_error : callable\n Callback for errors from scandir. The errors are typically due to a\n directory being deleted between being found and being recursed into.\n\n follow_symlinks : bool\n Follow symbolic links. If set to True it is possible to get stuck in\n an infinite loop.\n\n '''\n def __init__(self, match_rule=None, ignore_rules=None, prune_rules=None,\n depth=(0,None), sort=False, on_error=None,\n follow_symlinks=False):\n\n if match_rule is None:\n match_rule = default_match_rule\n self.match_rule = match_rule\n if ignore_rules:\n self.ignore_rules = ignore_rules[:]\n else:\n self.ignore_rules = []\n if prune_rules:\n self.prune_rules = prune_rules[:]\n else:\n self.prune_rules = []\n if not isinstance(depth, tuple):\n depth = (depth, depth)\n if depth[0] < 0:\n raise ValueError(\"The minimum depth must be positive\")\n if not depth[1] is None and depth[1] < depth[0]:\n raise ValueError(\"The maximum depth must be None or greater than \"\n \"the minimum\")\n self.depth = depth\n self.sort = sort\n self.on_error = on_error\n self.follow_symlinks = follow_symlinks\n\n def _convert_regex_rules(self):\n if isinstance(self.match_rule, basestring):\n self.match_rule = make_regex_rule(self.match_rule)\n for index, rule in enumerate(self.ignore_rules):\n if isinstance(rule, basestring):\n self.ignore_rules[index] = make_regex_rule(rule)\n for index, rule in enumerate(self.prune_rules):\n if isinstance(rule, basestring):\n self.prune_rules[index] = make_regex_rule(rule)\n\n def _test_target_path(self, path, dir_entry):\n for rule in self.ignore_rules:\n if bool(rule(path, dir_entry)) == True:\n return NoMatch\n result = self.match_rule(path, dir_entry)\n return result\n\n def matches(self, root_paths, dir_entries=None):\n '''Generate matches by recursively walking from the 'root_paths' down\n into the directory structure(s).\n\n The object's rules define which paths cause a result to be generated, \n and the `match_rule` provides the `match_info` attribute in the \n generated `MatchResult` object.\n\n Parameters\n ----------\n root_paths : iter\n Provides the paths to start our walk from. If you want these to\n be processed into sorted order you must sort them yourself.\n\n dir_entries : list or None\n If given, must provide a scandir.DirEntry for each root path. If\n not provided we must call stat for each root path.\n\n Returns\n -------\n result : MatchResult\n A `MatchResult` object is generated for each matched path.\n '''\n # Allow a single path or an iterable to be passed\n if isinstance(root_paths, basestring):\n root_paths = [root_paths]\n if dir_entries is not None:\n dir_entries = [dir_entries]\n\n # Make sure any regex rules have been converted to a callable\n self._convert_regex_rules()\n\n # Crawl through each root path\n for root_idx, root_path in enumerate(root_paths):\n # Get rid of any extra path seperators\n root_path = normpath(root_path)\n\n # Get the corresponding DirEntry\n if dir_entries is None:\n p, name = os.path.split(root_path)\n if p == '':\n p = '.'\n root_entry = scandir.GenericDirEntry(p, name)\n else:\n root_entry = dir_entries[root_idx]\n\n # Check if the root path itself is matched\n if self.depth[0] == 0:\n match_info = self._test_target_path(root_path, root_entry)\n if not match_info is NoMatch:\n yield MatchResult(root_path, root_entry, match_info)\n if not root_entry.is_dir():\n continue\n\n # Check if the root_path is pruned\n prune_root = False\n for rule in self.prune_rules:\n if rule(root_path, root_entry):\n prune_root = True\n break\n if prune_root:\n continue\n\n # Walk through directory structure checking paths against\n # rules\n curr_dir = (root_path, root_entry)\n next_dirs = []\n while True:\n # Determine the current depth from the root_path\n curr_depth = (curr_dir[0].count(os.sep) -\n root_path.count(os.sep)) + 1\n\n #Build a list of entries for this level so we can sort if\n #requested\n curr_entries = []\n\n # Try getting the contents of the current directory\n try:\n for e in scandir.scandir(curr_dir[0]):\n # Keep directories under the depth limit so we can\n # resurse into them\n if e.is_dir():\n if (self.depth[1] is not None and\n curr_depth > self.depth[1]\n ):\n continue\n else:\n # Plain files can be ignored if they violate\n # either depth limit\n if (curr_depth < self.depth[0] or\n (self.depth[1] is not None and\n curr_depth > self.depth[1])\n ):\n continue\n\n #Add to the list of entries for the curr_dir\n curr_entries.append(e)\n\n except OSError as error:\n #Handle errors from the scandir call\n if self.on_error is not None:\n self.on_error(error)\n else:\n raise\n else:\n # Sort the entries if requested\n if self.sort:\n curr_entries.sort(key=attrgetter('name'))\n\n # Iterate through the entries, yielding them if they are a\n # match\n for e in curr_entries:\n p = os.path.join(curr_dir[0], e.name)\n\n if e.is_dir(follow_symlinks=self.follow_symlinks):\n # If it is not pruned, add it to next_dirs. \n for rule in self.prune_rules:\n if rule(p, e):\n break\n else:\n next_dirs.append((p, e))\n\n # If we are below min depth we don't try matching\n # the dir\n if curr_depth < self.depth[0]:\n continue\n\n # Test the path against the match/ignore rules\n match_info = self._test_target_path(p, e)\n if not match_info is NoMatch:\n yield MatchResult(p, e, match_info)\n\n # Update curr_dir or break if we are done\n try:\n curr_dir = next_dirs.pop(0)\n except IndexError:\n break\n\n", "repo_name": "moloney/pathmap", "sub_path": "pathmap.py", "file_name": "pathmap.py", "file_ext": "py", "file_size_in_byte": 11160, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.compile", "line_number": 45, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 66, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "scandir.GenericDirEntry", "line_number": 206, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 234, "usage_type": "attribute"}, {"api_name": "scandir.scandir", "line_number": 242, "usage_type": "call"}, {"api_name": "operator.attrgetter", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 276, "usage_type": "call"}, {"api_name": "os.path", "line_number": 276, "usage_type": "attribute"}]} +{"seq_id": "42839962128", "text": "'''The implementation of the diffusion method.'''\nfrom __future__ import annotations\nfrom typing import Tuple, Any, Union, TypeVar, Callable\nfrom chex import Array\nimport jax\nimport jax.numpy as jnp\nfrom einops import rearrange\nimport haiku as hk\n\nfrom .nn import ForwardFn, Model\nfrom .utils import Params, get_logger\n\n\nlogger = get_logger('kigo.diffusion')\nNumT = TypeVar('NumT', bound=Union[float, Array])\n\n\ndef expand(x: Union[float, Array], ref: Array) -> Array:\n if isinstance(x, float):\n x = jnp.array([x] * ref.shape[0])\n if isinstance(x, Array) and len(x.shape) == 1:\n x = rearrange(x, 'b -> b 1 1 1')\n assert isinstance(x, Array)\n return x\n\n\ndef gt0(x: NumT, eps: float = 1e-8) -> NumT:\n '''Ensures that x is greater than zero, i.e. can be safely used as a\n divisor or for sqrts.'''\n return jnp.clip(x, eps)\n\n\ndef cosine_snr(t: Union[float, Array], s: float = 0.008) -> Array:\n '''Signal-to-noise ratio according to a cosine schedule.'''\n t = jnp.array(t)\n t = jnp.clip(t, 0., 1.)\n return jnp.cos((t + s) / (1. + s) * jnp.pi / 2) ** 2\n\n\ndef sample_q(x0: Array, noise: Array, snr: Array) -> Array:\n snr = rearrange(snr, 'b -> b 1 1 1')\n # Eq. 4 in DDIM\n return gt0(snr) ** 0.5 * x0 + gt0(1. - snr) ** 0.5 * noise\n\n\ndef sample_p_step(xt: Array,\n noise_pred: Array,\n snr: Union[float, Array],\n snr_next: Union[float, Array],\n eta: Union[float, Array],\n noise: Array,\n clip_percentile: Union[float, Array] = 0.995,\n ) -> Tuple[Array, Array]:\n snr = expand(snr, xt)\n snr_next = expand(snr_next, xt)\n eta = expand(eta, xt)\n # Eq. 16 in DDIM, we can interpolate between DDPM (when eta = 1) and DDIM\n # (when eta = 0).\n sigma = (eta\n * gt0((1 - snr_next) / gt0(1 - snr)) ** 0.5\n * gt0((1 - snr) / gt0(snr_next)) ** 0.5)\n # Eq. 9 in DDIM\n x0_hat = (xt - gt0(1. - snr) ** 0.5 * noise_pred) / gt0(snr) ** 0.5\n # Dynamic thresholding from Imagen by the Google Brain Team.\n s = jnp.quantile(jnp.abs(x0_hat), clip_percentile, axis=(1, 2, 3),\n keepdims=True)\n x0_hat = jnp.where(s > 1.,\n jnp.clip(x0_hat, -s, s) / gt0(s),\n x0_hat)\n # Eq. 12 in DDIM\n xt = (x0_hat * gt0(snr_next) ** 0.5\n + noise_pred * gt0(1. - snr_next - sigma ** 2) ** 0.5\n + noise * sigma)\n return xt, x0_hat\n\n\ndef sample_p(xT: Array,\n forward_fn: ForwardFn,\n steps: int,\n rng: Any,\n eta: Union[float, Array] = 0.,\n clip_percentile: float = 0.995,\n ) -> Array:\n\n def body_fn(index: int, state: Tuple[Array, Any]) -> Tuple[Array, Any]:\n xt, rng = state\n rng, rng_split = jax.random.split(rng)\n snr = jnp.repeat(cosine_snr(1. - index / steps), len(xt))\n snr_next = jnp.repeat(cosine_snr(1. - (index + 1) / steps), len(xt))\n noise_pred = forward_fn(xt, snr, False)\n noise = jax.random.normal(rng_split, shape=xt.shape)\n xt_next, _ = sample_p_step(xt, noise_pred, snr, snr_next, eta, noise,\n clip_percentile)\n return xt_next, rng\n\n initial_state = xT, rng\n x0, _ = jax.lax.fori_loop(0, steps, body_fn, initial_state)\n return x0\n\n\nclass Sampler:\n\n def __init__(self, params: Params, model_fn: Callable[[], Model]) -> None:\n self.params = params\n\n def forward_fn(xt: Array, snr: Array) -> Array:\n return model_fn()(xt, snr, False)\n\n forward = hk.transform(forward_fn)\n forward = hk.without_apply_rng(forward)\n\n def body_fn(index: int,\n state: Tuple[Array, Params, Any, int, float, float],\n ) -> Tuple[Array, Params, Any, int, float, float]:\n xt, params, rng, steps, eta, clip_percentile = state\n rng, rng_split = jax.random.split(rng)\n snr = jnp.repeat(cosine_snr(1. - index / steps), len(xt))\n snr_next = jnp.repeat(cosine_snr(1. - (index + 1) / steps),\n len(xt))\n noise_pred = forward.apply(params, xt, snr)\n noise = jax.random.normal(rng_split, shape=xt.shape)\n xt_next, _ = sample_p_step(xt, noise_pred, snr, snr_next, eta,\n noise, clip_percentile)\n return xt_next, params, rng, steps, eta, clip_percentile\n\n self.body_fn = jax.jit(body_fn)\n\n def set_params(self, params: Params) -> Sampler:\n self._params = params\n return self\n\n def sample_p(self,\n xT: Array,\n steps: int,\n rng: Any,\n eta: float = 0.,\n clip_percentile: float = 0.995,\n ) -> Array:\n initial_state = xT, self.params, rng, steps, eta, clip_percentile\n x0, *_ = jax.lax.fori_loop(0, steps, self.body_fn, initial_state)\n logger.info(f'min={x0.min()}, max={x0.max()}, mean={x0.mean()}, '\n f'std={x0.std()}')\n x0 = x0.clip(-1., 1.)\n return x0\n", "repo_name": "nlsfnr/Kigo", "sub_path": "kigo/diffusion.py", "file_name": "diffusion.py", "file_ext": "py", "file_size_in_byte": 5186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.get_logger", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.TypeVar", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 15, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 18, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 18, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 20, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 21, "usage_type": "argument"}, {"api_name": "einops.rearrange", "line_number": 22, "usage_type": "call"}, {"api_name": "chex.Array", "line_number": 23, "usage_type": "argument"}, {"api_name": "jax.numpy.clip", "line_number": 30, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 33, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 33, "usage_type": "name"}, {"api_name": "jax.numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 35, "usage_type": "name"}, {"api_name": "jax.numpy.clip", "line_number": 36, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 36, "usage_type": "name"}, {"api_name": "jax.numpy.cos", "line_number": 37, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 37, "usage_type": "name"}, {"api_name": "jax.numpy.pi", "line_number": 37, "usage_type": "attribute"}, {"api_name": "chex.Array", "line_number": 40, "usage_type": "name"}, {"api_name": "einops.rearrange", "line_number": 41, "usage_type": "call"}, {"api_name": "chex.Array", "line_number": 46, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 48, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 49, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 50, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 50, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 52, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 52, "usage_type": "name"}, {"api_name": "jax.numpy.quantile", "line_number": 65, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 65, "usage_type": "name"}, {"api_name": "jax.numpy.abs", "line_number": 65, "usage_type": "call"}, {"api_name": "jax.numpy.where", "line_number": 67, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 67, "usage_type": "name"}, {"api_name": "jax.numpy.clip", "line_number": 68, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 68, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 53, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 53, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 77, "usage_type": "name"}, {"api_name": "nn.ForwardFn", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 81, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 85, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 85, "usage_type": "name"}, {"api_name": "jax.random.split", "line_number": 87, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 87, "usage_type": "attribute"}, {"api_name": "jax.numpy.repeat", "line_number": 88, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 88, "usage_type": "name"}, {"api_name": "jax.numpy.repeat", "line_number": 89, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 89, "usage_type": "name"}, {"api_name": "jax.random.normal", "line_number": 91, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 91, "usage_type": "attribute"}, {"api_name": "jax.lax.fori_loop", "line_number": 97, "usage_type": "call"}, {"api_name": "jax.lax", "line_number": 97, "usage_type": "attribute"}, {"api_name": "chex.Array", "line_number": 83, "usage_type": "name"}, {"api_name": "utils.Params", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 103, "usage_type": "name"}, {"api_name": "nn.Model", "line_number": 103, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 106, "usage_type": "name"}, {"api_name": "haiku.transform", "line_number": 109, "usage_type": "call"}, {"api_name": "haiku.without_apply_rng", "line_number": 110, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 113, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 113, "usage_type": "name"}, {"api_name": "utils.Params", "line_number": 113, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 113, "usage_type": "name"}, {"api_name": "jax.random.split", "line_number": 116, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 116, "usage_type": "attribute"}, {"api_name": "jax.numpy.repeat", "line_number": 117, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 117, "usage_type": "name"}, {"api_name": "jax.numpy.repeat", "line_number": 118, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 118, "usage_type": "name"}, {"api_name": "jax.random.normal", "line_number": 121, "usage_type": "call"}, {"api_name": "jax.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 114, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 114, "usage_type": "name"}, {"api_name": "utils.Params", "line_number": 114, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 114, "usage_type": "name"}, {"api_name": "jax.jit", "line_number": 126, "usage_type": "call"}, {"api_name": "utils.Params", "line_number": 128, "usage_type": "name"}, {"api_name": "chex.Array", "line_number": 133, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 135, "usage_type": "name"}, {"api_name": "jax.lax.fori_loop", "line_number": 140, "usage_type": "call"}, {"api_name": "jax.lax", "line_number": 140, "usage_type": "attribute"}, {"api_name": "chex.Array", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "41167866114", "text": "from pygame import image, PixelArray\nimport sys\n\nimg = sys.argv[1]\n\narr = PixelArray(image.load(img))\nmax_size = int(str(arr[0][0])[-1])\ndigits = int(str(arr[0][1])[-1])\n\nprint(digits)\n\nlength = ''\nfor i in range(2,digits+2):\n length += str(arr[0][i])[-1:]\n\nlength = int(length)\nprint('Length:', length)\n\nheight = len(arr[0])\n\nword = ''\nx = 0\ny = digits+2\nchar_buffer = ''\nfor i in range(length*max_size+1):\n if len(char_buffer) == max_size:\n print('Decoding character:', char_buffer)\n word += chr(\n int(\n char_buffer\n ))\n char_buffer = ''\n\n char_buffer += str(arr[x][y])[-1:]\n\n if y < height - 1:\n y += 1\n else:\n x += 1\n y = 0\n\nprint(word)\n", "repo_name": "JellyWX/ImageConceal", "sub_path": "reveal.py", "file_name": "reveal.py", "file_ext": "py", "file_size_in_byte": 681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.PixelArray", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "33735310032", "text": "from django.test import TestCase\nfrom django.db.models import Count\nfrom tests.models import Book\nfrom django_tabulate import tabulate_qs\n\nclass TabulateTests(TestCase):\n\n def setUp(self):\n Book.objects.create(name='Python')\n Book.objects.create(name='Django')\n Book.objects.create(name='Tabulate')\n \n def test_method_execution(self):\n qs = Book.objects.all()\n self.assertEqual(qs.tabulate(), '\\n'.join([\n ' id name',\n '---- --------',\n ' 1 Python',\n ' 2 Django',\n ' 3 Tabulate',\n ]))\n\n self.assertEqual(qs.tabulate(), tabulate_qs(qs))\n\n def test_kwargs_execution(self):\n qs = Book.objects.all()\n self.assertEqual(qs.tabulate(tablefmt='grid'), '\\n'.join([\n '+------+----------+',\n '| id | name |',\n '+======+==========+',\n '| 1 | Python |',\n '+------+----------+',\n '| 2 | Django |',\n '+------+----------+',\n '| 3 | Tabulate |',\n '+------+----------+',\n ]))\n\n def test_values_with_annotation(self):\n qs = Book.objects.values('name').annotate(Count('pk'))\n self.assertEqual(qs.tabulate(tablefmt='psql'), '\\n'.join([\n '+----------+-------------+',\n '| name | pk__count |',\n '|----------+-------------|',\n '| Django | 1 |',\n '| Python | 1 |',\n '| Tabulate | 1 |',\n '+----------+-------------+',\n ]))", "repo_name": "todorvelichkov/django-tabulate", "sub_path": "tests/test_mixin.py", "file_name": "test_mixin.py", "file_ext": "py", "file_size_in_byte": 1601, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "tests.models.Book.objects.create", "line_number": 9, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 9, "usage_type": "name"}, {"api_name": "tests.models.Book.objects.create", "line_number": 10, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 10, "usage_type": "name"}, {"api_name": "tests.models.Book.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 11, "usage_type": "name"}, {"api_name": "tests.models.Book.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 14, "usage_type": "name"}, {"api_name": "django_tabulate.tabulate_qs", "line_number": 23, "usage_type": "call"}, {"api_name": "tests.models.Book.objects.all", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.models.Book.objects.values", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.models.Book.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tests.models.Book", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "36490074533", "text": "#-----------------------------------------------------------------------------\n# Main process for topography downscale for meteorological forcing \n#\n# Author: Lu Li, Sisi Chen, Zhongwang Wei\n#-----------------------------------------------------------------------------\nimport os\nimport argparse\nimport datetime\nfrom multiprocessing import Process\n\nimport numpy as np\nimport netCDF4 as nc\nimport matplotlib.pyplot as plt\n\nfrom topo import (downscale_air_pressure, \n downscale_air_temperature,\n downscale_dew_temperature,\n downscale_in_longwave_radiation,\n downscale_in_shortwave_radiation,\n downscale_precipitation, \n downscale_specific_humidity,\n downscale_wind_speed, \n downscale_precipitation)\nfrom utils import save2nc\n\n\n\ndef main(\n year, \n month, \n day_of_month, # Start from 0\n blat, ulat,\n llon, rlon,\n region_name,\n DATA_PATH='/tera06/lilu/ForDs/data/GD/'):\n \n print('Downscaling forcing in {year:04}-{month:02}-{day:02}'.format(\n year=year, month=month, day=day_of_month))\n print('The region is {region_name}'.format(region_name=region_name))\n print('The lat range is {blat}-{ulat}'.format(blat=blat,ulat=ulat))\n print('The lon range is {llon}-{rlon}'.format(llon=llon,rlon=rlon))\n print('The authors are Lu Li, Sisi Chen, Zhongwang Wei, SYSU')\n print('\\033[1;31m%s\\033[0m' % ' Read and Processing input data')\n print(\"read data begin at: \", datetime.datetime.today())\n \n # load coare DEM data (20x20)\n f = nc.Dataset(DATA_PATH+'DEM/SRTM/ERA5Land_height.nc', 'r')\n lat_coarse, lon_coarse = f['latitude'][:], f['longitude'][:]\n lat_coarse_index = np.where((lat_coarse>blat) & (lat_coarsellon) & (lon_coarselat_coarse[-1]) & (lat_finelon_coarse[0]) & (lon_fine rad\n f = nc.Dataset(DATA_PATH+'DEM/MERITDEM/MERITDEM_GD_curvature.nc', 'r')\n curvature_fine = f['slp'][lat_fine_index][:,lon_fine_index]\n f = nc.Dataset(DATA_PATH+'DEM/MERITDEM/MERITDEM_GD_SkyViewFactor.nc', 'r')\n svf_fine = f['svf'][lat_fine_index][:,lon_fine_index]\n shadow_mask_LUT = np.load(DATA_PATH+'DEM/MERITDEM/shadow_mask_LUT.npy')[lat_fine_index][:,lon_fine_index]\n \n\n # get bilinear interpolate weight by cdo\n os.system(\"cdo griddes DEM_coarse.nc > grid_coarse.txt\")\n os.system(\"sed -i 's/generic/lonlat/g' grid_coarse.txt\")\n os.system(\"cdo griddes DEM_fine.nc > grid_fine.txt\")\n os.system(\"sed -i 's/generic/lonlat/g' grid_fine.txt\")\n os.system(\"cdo setgrid,grid_coarse.txt DEM_coarse.nc DEM_coarse_1.nc\")\n os.system(\"cdo genbil,grid_fine.txt DEM_coarse_1.nc weight.nc\")\n os.system(\"rm -rf DEM_coarse.nc DEM_coarse_1.nc\")\n \n\n print('\\033[1;31m%s\\033[0m' % ' Downscaling')\n print('We downscale {shape_coarse} to {shape_fine}'.format(\n shape_coarse=elev_coarse.shape, shape_fine=elev_fine.shape))\n print(\"downscaling begin at: \",datetime.datetime.today())\n\n # downscale air temperature\n print('processing t2m')\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_t2m_interp.nc'.format(year=year, month=month), 'r')\n t2m_fine_interp = f['t2m'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index]\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_t2m.nc'.format(year=year, month=month), 'r')\n t2m_coarse = f['t2m'][day_of_month*24:day_of_month*24+24][:,lat_coarse_index][:,:,lon_coarse_index]\n t2m_fine = downscale_air_temperature(t2m_coarse,\n t2m_fine_interp,\n elev_coarse,\n elev_fine_interp,\n elev_fine,\n lat_coarse,\n lon_coarse,\n lat_fine,\n lon_fine,\n year,\n month,\n day_of_month)\n save2nc('t2m', year, month, day_of_month, t2m_fine, lat_fine, lon_fine)\n\n \n # downscale dew temperature\n print('processing d2m')\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_d2m_interp.nc'.format(year=year, month=month), 'r')\n d2m_fine_interp = f['d2m'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index]\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_d2m.nc'.format(year=year, month=month), 'r')\n d2m_coarse = f['d2m'][day_of_month*24:day_of_month*24+24][:,lat_coarse_index][:,:,lon_coarse_index]\n d2m_fine = downscale_dew_temperature(d2m_coarse,\n d2m_fine_interp,\n elev_coarse,\n elev_fine_interp,\n elev_fine,\n lat_coarse,\n lon_coarse,\n lat_fine,\n lon_fine,\n year,\n month,\n day_of_month)\n save2nc('d2m', year, month, day_of_month, d2m_fine, lat_fine, lon_fine)\n\n # downscale air pressure\n print('processing sp')\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_sp_interp.nc'.format(year=year, month=month), 'r')\n sp_fine_interp = f['sp'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index]\n sp_fine = downscale_air_pressure(sp_fine_interp,\n t2m_fine_interp,\n t2m_fine,\n elev_fine_interp,\n elev_fine)\n save2nc('sp', year, month, day_of_month, sp_fine, lat_fine, lon_fine)\n\n # downscale specific humidity\n print('processing Q')\n Q_fine = downscale_specific_humidity(sp_fine, d2m_fine)\n save2nc('Q', year, month, day_of_month, Q_fine, lat_fine, lon_fine)\n\n # downscale longwave radiation\n print('processing longwave radiation')\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_strd_interp.nc'.format(year=year, month=month), 'r') \n strd_fine_interp = f['strd'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index] \n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_strd.nc'.format(year=year, month=month), 'r')\n strd_coarse = f['strd'][day_of_month*24:day_of_month*24+24][:,lat_coarse_index][:,:,lon_coarse_index] \n strd_fine = downscale_in_longwave_radiation(strd_coarse,\n strd_fine_interp,\n t2m_coarse,\n d2m_coarse,\n t2m_fine,\n d2m_fine,\n t2m_fine_interp,\n lat_coarse,\n lon_coarse, \n year, \n month, \n day_of_month)\n save2nc('strd', year, month, day_of_month, strd_fine, lat_fine, lon_fine)\n \n # downscale wind\n print('processing wind speed')\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_u10_interp.nc'.format(year=year, month=month), 'r')\n u10_fine_interp = f['u10'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index] \n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_v10_interp.nc'.format(year=year, month=month), 'r')\n v10_fine_interp = f['v10'][day_of_month*24:day_of_month*24+24][:,lat_fine_index][:,:,lon_fine_index] \n ws_fine = downscale_wind_speed(u10_fine_interp,\n v10_fine_interp,\n slope_fine,\n aspect_fine,\n curvature_fine) \n save2nc('ws', year, month, day_of_month, ws_fine, lat_fine, lon_fine)\n \n \n # downscale short radiation\n print('processing short radiation')\n f = nc.Dataset(DATA_PATH+'Albedo/{year:04}_{month:02}_GD_bsa_interp.nc'.format(year=year, month=month), 'r')\n black_sky_albedo_interp = np.array(f['bsa'][day_of_month,:,:][lat_fine_index][:,lon_fine_index])\n black_sky_albedo_interp[black_sky_albedo_interp<0] = 0\n black_sky_albedo_interp[black_sky_albedo_interp>1] = 0\n black_sky_albedo_interp = np.tile(black_sky_albedo_interp[np.newaxis], (24, 1, 1))\n f = nc.Dataset(DATA_PATH+'Albedo/{year:04}_{month:02}_GD_wsa_interp.nc'.format(year=year, month=month), 'r')\n white_sky_albedo_interp = np.array(f['wsa'][day_of_month,:,:][lat_fine_index][:,lon_fine_index])\n white_sky_albedo_interp[white_sky_albedo_interp<0] = 0\n white_sky_albedo_interp[white_sky_albedo_interp>1] = 0\n white_sky_albedo_interp = np.tile(white_sky_albedo_interp[np.newaxis], (24, 1, 1))\n\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_ssrd.nc'.format(year=year, month=month), 'r')\n ssrd_coarse = f['ssrd'][day_of_month*24:day_of_month*24+24,:,:][:,lat_coarse_index][:,:,lon_coarse_index]\n ssrd_coarse[ssrd_coarse<0] = 0\n f = nc.Dataset(DATA_PATH+'forcing/ERA5LAND_GD_{year:04}_{month:02}_sp.nc'.format(year=year, month=month), 'r')\n sp_coarse = f['sp'][day_of_month*24:day_of_month*24+24,:,:][:,lat_coarse_index][:,:,lon_coarse_index]\n ssrd_fine = downscale_in_shortwave_radiation(\n ssrd_coarse,\n sp_coarse,\n sp_fine,\n sp_fine_interp,\n black_sky_albedo_interp,\n white_sky_albedo_interp,\n slope_fine,\n aspect_fine,\n svf_fine,\n year,\n month,\n day_of_month,\n lat_coarse,\n lon_coarse,\n lat_fine,\n lon_fine,\n shadow_mask_LUT)\n save2nc('ssrd', year, month, day_of_month, ssrd_fine, lat_fine, lon_fine)\n \n # downscale precipitation\n \"\"\"\n # standardization\n norm_const = np.load('norm_params.npy')\n t2m_min, t2m_max = norm_const[0,0], norm_const[0,1]\n d2m_min, d2m_max = norm_const[1,0], norm_const[1,1]\n Q_min, Q_max = norm_const[2,0], norm_const[2,1]\n strd_min, strd_max = norm_const[3,0], norm_const[3,1]\n ssrd_min, ssrd_max = norm_const[4,0], norm_const[4,1]\n sp_min, sp_max = norm_const[5,0], norm_const[5,1]\n ws_min, ws_max = norm_const[6,0], norm_const[6,1]\n elev_max = norm_const[7,0]\n \n t2m_fine = (t2m_fine-t2m_min)/(t2m_max-t2m_min)\n d2m_fine = (d2m_fine-d2m_min)/(d2m_max-d2m_min)\n Q_fine = (Q_fine-Q_min)/(Q_max-Q_min)\n strd_fine = (strd_fine-strd_min)/(strd_max-strd_min)\n ssrd_fine = (ssrd_fine-ssrd_min)/(ssrd_max-ssrd_min)\n sp_fine = (sp_fine-sp_min)/(sp_max-sp_min)\n ws_fine = (ws_fine-ws_min)/(ws_max-ws_min)\n elev_fine = elev_fine/elev_max\n lat_fine = lat_fine/360\n lon_fine = lon_fine/360\n \"\"\"\n \n downscale_precipitation(\n t2m_fine,\n d2m_fine,\n sp_fine,\n Q_fine,\n strd_fine,\n ssrd_fine,\n ws_fine,\n lat_fine,\n lon_fine,\n elev_fine,\n year, \n month, \n day_of_month)\n print(\"Please use predict_tp.py to downscale precipitation, \\\n this function only save test data\")\n print(\"downscaling end at: \",datetime.datetime.today())\n \n\ndef par_main(args):\n # generate hour length according to year and month\n if ((args.year%4==0) and (args.year%100!=0)) or args.year%400==0:\n month_day = [31,29,31,30,31,30,31,31,30,31,30,31]\n else:\n month_day = [31,28,31,30,31,30,31,31,30,31,30,31]\n \n # generate multiprocessing for each 24 hour interval\n for i in range(args.begin_day, args.end_day):\n job = Process(target=main, args=( \n args.year,\n args.month,\n i,\n args.blat,\n args.ulat,\n args.llon,\n args.rlon,\n args.region_name))\n job.start()\n job.join()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--year', type=int, default=2018)\n parser.add_argument('--month', type=int, default=1)\n parser.add_argument('--begin_day', type=int, default=1)\n parser.add_argument('--end_day', type=int, default=2)\n parser.add_argument('--blat', type=float, default=23.5)\n parser.add_argument('--ulat', type=float, default=24.5)\n parser.add_argument('--llon', type=float, default=112.5)\n parser.add_argument('--rlon', type=float, default=113.5)\n parser.add_argument('--region_name', type=str, default='random')\n args = parser.parse_args()\n \n par_main(args)\n\n", "repo_name": "leelew/ForDs", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 16399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datetime.datetime.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 50, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 55, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 68, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 73, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 83, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 88, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 91, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 92, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 96, "usage_type": "call"}, {"api_name": "os.system", "line_number": 100, "usage_type": "call"}, {"api_name": "os.system", "line_number": 101, "usage_type": "call"}, {"api_name": "os.system", "line_number": 102, "usage_type": "call"}, {"api_name": "os.system", "line_number": 103, "usage_type": "call"}, {"api_name": "os.system", "line_number": 104, "usage_type": "call"}, {"api_name": "os.system", "line_number": 105, "usage_type": "call"}, {"api_name": "os.system", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 116, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 118, "usage_type": "call"}, {"api_name": "topo.downscale_air_temperature", "line_number": 120, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 132, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 137, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 139, "usage_type": "call"}, {"api_name": "topo.downscale_dew_temperature", "line_number": 141, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 153, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 157, "usage_type": "call"}, {"api_name": "topo.downscale_air_pressure", "line_number": 159, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 164, "usage_type": "call"}, {"api_name": "topo.downscale_specific_humidity", "line_number": 168, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 169, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 173, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 175, "usage_type": "call"}, {"api_name": "topo.downscale_in_longwave_radiation", "line_number": 177, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 189, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 193, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 195, "usage_type": "call"}, {"api_name": "topo.downscale_wind_speed", "line_number": 197, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 202, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 211, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 216, "usage_type": "attribute"}, {"api_name": "netCDF4.Dataset", "line_number": 218, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 221, "usage_type": "call"}, {"api_name": "topo.downscale_in_shortwave_radiation", "line_number": 223, "usage_type": "call"}, {"api_name": "utils.save2nc", "line_number": 241, "usage_type": "call"}, {"api_name": "topo.downscale_precipitation", "line_number": 268, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 284, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 296, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 310, "usage_type": "call"}]} +{"seq_id": "72238644635", "text": "#\n# ABOUT\n# Artisan Profile Transposer\n\n# LICENSE\n# This program or module is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as published\n# by the Free Software Foundation, either version 2 of the License, or\n# version 3 of the License, or (at your option) any later version. It is\n# provided for educational purposes and is distributed in the hope that\n# it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See\n# the GNU General Public License for more details.\n\n# AUTHOR\n# Marko Luther, 2023\n\nimport time as libtime\nimport warnings\nimport copy\nimport numpy\nfrom typing import List, Tuple, Callable, Optional, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from artisanlib.main import ApplicationWindow # noqa: F401 # pylint: disable=unused-import\n from PyQt6.QtWidgets import QWidget # noqa: F401 # pylint: disable=unused-import\n import numpy.typing as npt # pylint: disable=unused-import\n\nfrom artisanlib.dialogs import ArtisanDialog\nfrom artisanlib.util import stringfromseconds, stringtoseconds\n\n\ntry:\n from PyQt6.QtCore import Qt, pyqtSlot, QSettings, QRegularExpression, QDateTime # @UnusedImport @Reimport @UnresolvedImport\n from PyQt6.QtGui import QRegularExpressionValidator # @UnusedImport @Reimport @UnresolvedImport\n from PyQt6.QtWidgets import (QApplication, QHeaderView, QAbstractItemView, QWidget, QLabel, QLineEdit, QComboBox, QDialogButtonBox, # @UnusedImport @Reimport @UnresolvedImport\n QTableWidget, QTableWidgetItem, QGroupBox, QLayout, QHBoxLayout, QVBoxLayout, QFrame) # @UnusedImport @Reimport @UnresolvedImport\nexcept ImportError:\n from PyQt5.QtCore import Qt, pyqtSlot, QSettings, QRegularExpression, QDateTime # type: ignore # @UnusedImport @Reimport @UnresolvedImport\n from PyQt5.QtGui import QRegularExpressionValidator # type: ignore # @UnusedImport @Reimport @UnresolvedImport\n from PyQt5.QtWidgets import (QApplication, QHeaderView, QAbstractItemView, QWidget, QLabel, QLineEdit, QComboBox, QDialogButtonBox, # type: ignore # @UnusedImport @Reimport @UnresolvedImport\n QTableWidget, QTableWidgetItem, QGroupBox, QLayout, QHBoxLayout, QVBoxLayout, QFrame) # type: ignore # @UnusedImport @Reimport @UnresolvedImport\n\n\nclass MyQRegularExpressionValidator(QRegularExpressionValidator): # pyright: ignore [reportGeneralTypeIssues] # Argument to class must be a base class\n # we fix partial time input like '12' => '12:00', '12:' => '12:00' and '12:0' => '12:00'\n\n @staticmethod\n def fixup(value):\n if ':' not in value:\n return value + ':00'\n if value.endswith(':'):\n return value + '00'\n if len(value[value.index(':')+1:]) == 1:\n return value + '0'\n return value\n\nclass profileTransformatorDlg(ArtisanDialog):\n def __init__(self, parent:'QWidget', aw:'ApplicationWindow') -> None:\n super().__init__(parent, aw)\n self.setModal(True)\n self.setWindowTitle(QApplication.translate('Form Caption','Profile Transposer'))\n\n self.helpdialog = None\n\n self.regexpercent = QRegularExpression(r'^$|[0-9]?[0-9]?(\\.[0-9])?')\n self.regextime = QRegularExpression(r'^$|[0-9]?[0-9]:[0-5][0-9]')\n self.regextemp = QRegularExpression(r'^$|[0-9]?[0-9]?[0-9]?(\\.[0-9])?')\n\n # original data\n self.org_transMappingMode = self.aw.qmc.transMappingMode\n self.org_timex = self.aw.qmc.timex[:]\n self.org_temp2 = self.aw.qmc.temp2[:]\n self.org_extratimex = copy.deepcopy(self.aw.qmc.extratimex)\n self.org_curFile = self.aw.curFile\n self.org_UUID = self.aw.qmc.roastUUID\n self.org_roastdate = self.aw.qmc.roastdate\n self.org_roastepoch = self.aw.qmc.roastepoch\n self.org_roasttzoffset = self.aw.qmc.roasttzoffset\n self.org_roastbatchnr = self.aw.qmc.roastbatchnr\n self.org_safesaveflag = self.aw.qmc.safesaveflag\n self.org_l_event_flags_dict = self.aw.qmc.l_event_flags_dict\n self.org_l_annotations_dict = self.aw.qmc.l_annotations_dict\n\n self.phasestable = QTableWidget()\n self.timetable = QTableWidget()\n self.temptable = QTableWidget()\n\n # time table widgets initialized by createTimeTable() to a list (target/result) with 4 widgets each\n # DRY, FCs, SCs, DROP\n # if an event is not set in the profile, None is set instead of a widget\n #\n self.phases_target_widgets_time:Optional[List[Optional[QLineEdit]]] = None\n self.phases_target_widgets_percent:Optional[List[Optional[QLineEdit]]] = None\n self.phases_result_widgets:Optional[List[Optional[QTableWidgetItem]]] = None\n #\n self.time_target_widgets:Optional[List[Optional[QLineEdit]]] = None\n self.time_result_widgets:Optional[List[Optional[QTableWidgetItem]]] = None\n\n # profileTimes: list of DRY, FCs, SCs and DROP times in seconds if event is set, otherwise None\n self.profileTimes:List[Optional[float]] = self.getProfileTimes()\n # list of DRY, FCs, SCs, and DROP target times in seconds as specified by the user, or None if not set\n self.targetTimes:List[Optional[float]] = self.getTargetTimes()\n\n # temp table widgets initialized by createTempTable() to a list (target/result) with 5 widgets each\n # CHARGE, DRY, FCs, SCs, DROP\n # if an event is not set in the profile, None is set instead of a widget\n self.temp_target_widgets:Optional[List[Optional[QLineEdit]]] = None\n self.temp_result_widgets:Optional[List[Optional[QTableWidgetItem]]] = None\n\n # list of CHARGE, DRY, FCs, SCs and DROP BT temperatures\n self.profileTemps = self.getProfileTemps()\n # list of DRY, FCs, SCs, and DROP target temperatures as specified by the user, or None if not set\n self.targetTemps = self.getTargetTemps()\n\n self.createPhasesTable()\n self.createTimeTable()\n self.createTempTable()\n\n # connect the ArtisanDialog standard OK/Cancel buttons\n self.dialogbuttons.accepted.connect(self.applyTransformations)\n self.dialogbuttons.rejected.connect(self.restoreState)\n self.applyButton = self.dialogbuttons.addButton(QDialogButtonBox.StandardButton.Apply)\n self.resetButton = self.dialogbuttons.addButton(QDialogButtonBox.StandardButton.Reset)\n self.helpButton = self.dialogbuttons.addButton(QDialogButtonBox.StandardButton.Help)\n if self.applyButton is not None:\n self.applyButton.clicked.connect(self.apply)\n self.setButtonTranslations(self.applyButton,'Apply',QApplication.translate('Button','Apply'))\n if self.resetButton is not None:\n self.resetButton.clicked.connect(self.restore)\n self.setButtonTranslations(self.resetButton,'Reset',QApplication.translate('Button','Reset'))\n if self.helpButton is not None:\n self.helpButton.clicked.connect(self.openHelp)\n self.setButtonTranslations(self.helpButton,'Help',QApplication.translate('Button','Help'))\n\n #buttons\n buttonsLayout = QHBoxLayout()\n buttonsLayout.addWidget(self.dialogbuttons)\n\n mappingLabel = QLabel(QApplication.translate('Label','Mapping'))\n self.mappingModeComboBox = QComboBox()\n self.mappingModeComboBox.addItems([QApplication.translate('ComboBox','discrete'),\n QApplication.translate('ComboBox','linear'),\n QApplication.translate('ComboBox','quadratic')])\n self.mappingModeComboBox.setCurrentIndex(self.aw.qmc.transMappingMode)\n self.mappingModeComboBox.currentIndexChanged.connect(self.changeMappingMode)\n\n self.temp_formula:QLabel = QLabel()\n self.temp_formula.setTextInteractionFlags(Qt.TextInteractionFlag.TextSelectableByMouse)\n\n settingsHLayout = QHBoxLayout()\n settingsHLayout.addStretch()\n settingsHLayout.addWidget(mappingLabel)\n settingsHLayout.addWidget(self.mappingModeComboBox)\n settingsHLayout.addStretch()\n\n phasesHLayout = QHBoxLayout()\n phasesHLayout.addStretch()\n phasesHLayout.addWidget(self.phasestable)\n phasesHLayout.addStretch()\n phasesLayout = QVBoxLayout()\n phasesLayout.addLayout(phasesHLayout)\n\n timeHLayout = QHBoxLayout()\n timeHLayout.addStretch()\n timeHLayout.addWidget(self.timetable)\n timeHLayout.addStretch()\n timeLayout = QVBoxLayout()\n timeLayout.addLayout(timeHLayout)\n timeLayout.addStretch()\n\n tempHLayout = QHBoxLayout()\n tempHLayout.addWidget(self.temptable)\n tempHLayout.addStretch()\n formulaHLayout = QHBoxLayout()\n formulaHLayout.addStretch()\n formulaHLayout.addWidget(self.temp_formula)\n formulaHLayout.addStretch()\n tempLayout = QVBoxLayout()\n tempLayout.addLayout(tempHLayout)\n tempLayout.addLayout(formulaHLayout)\n tempLayout.addStretch()\n\n phasesGroupLayout = QGroupBox(QApplication.translate('Table','Phases'))\n phasesGroupLayout.setLayout(phasesLayout)\n timeGroupLayout = QGroupBox(QApplication.translate('Table','Time'))\n timeGroupLayout.setLayout(timeLayout)\n tempGroupLayout = QGroupBox(QApplication.translate('Table','BT'))\n tempGroupLayout.setLayout(tempLayout)\n\n #main\n mainlayout = QVBoxLayout()\n mainlayout.addLayout(settingsHLayout)\n mainlayout.addWidget(phasesGroupLayout)\n mainlayout.addWidget(timeGroupLayout)\n mainlayout.addWidget(tempGroupLayout)\n mainlayout.addStretch()\n mainlayout.addLayout(buttonsLayout)\n\n self.setLayout(mainlayout)\n ok_button = self.dialogbuttons.button(QDialogButtonBox.StandardButton.Ok)\n if ok_button is not None:\n ok_button.setFocus()\n\n settings = QSettings()\n if settings.contains('TransformatorPosition'):\n self.move(settings.value('TransformatorPosition'))\n\n mainlayout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize)\n\n\n # utility functions\n\n def forgroundOffset(self) -> float:\n if self.aw.qmc.timeindex[0] == -1:\n return 0\n return self.org_timex[self.aw.qmc.timeindex[0]]\n\n def backgroundOffset(self):\n if self.aw.qmc.timeindexB[0] != -1 and len(self.aw.qmc.timeB) > self.aw.qmc.timeindexB[0]:\n return self.aw.qmc.timeB[self.aw.qmc.timeindexB[0]]\n return 0\n\n def clearPhasesTargetTimes(self):\n if self.phases_target_widgets_time is not None and len(self.phases_target_widgets_time)>2:\n for i in range(3):\n phases_target_widgets_time = self.phases_target_widgets_time[i]\n if phases_target_widgets_time is not None:\n phases_target_widgets_time.setText('')\n\n def clearPhasesTargetPercent(self):\n if self.phases_target_widgets_percent is not None and len(self.phases_target_widgets_percent)>2:\n for i in range(3):\n phases_target_widgets_percent = self.phases_target_widgets_percent[i]\n if phases_target_widgets_percent is not None:\n phases_target_widgets_percent.setText('')\n\n def clearPhasesResults(self):\n if self.phases_result_widgets is not None and len(self.phases_result_widgets)>2:\n for i in range(3):\n phases_result_widgets = self.phases_result_widgets[i]\n if phases_result_widgets is not None:\n phases_result_widgets.setText('')\n\n def clearTimeTargets(self):\n if self.time_target_widgets is not None and len(self.time_target_widgets)>3:\n for i in range(4):\n time_target_widgets = self.time_target_widgets[i]\n if time_target_widgets is not None:\n time_target_widgets.setText('')\n\n def clearTimeResults(self):\n if self.time_result_widgets is not None and len(self.time_result_widgets)>3:\n for i in range(4):\n time_result_widgets = self.time_result_widgets[i]\n if time_result_widgets is not None:\n time_result_widgets.setText('')\n\n def clearTempTargets(self):\n if self.temp_target_widgets is not None and len(self.temp_target_widgets)>4:\n for i in range(5):\n temp_target_widget:Optional[QLineEdit] = self.temp_target_widgets[i]\n if temp_target_widget is not None:\n temp_target_widget.setText('')\n\n def clearTempResults(self):\n if self.temp_result_widgets is not None and len(self.temp_result_widgets)>4:\n for i in range(5):\n temp_result_widget:Optional[QTableWidgetItem] = self.temp_result_widgets[i]\n if temp_result_widget is not None:\n temp_result_widget.setText('')\n self.temp_formula.setText('')\n self.temp_formula.repaint()\n\n # returns list of DRY, FCs, SCs and DROP profile times in seconds if event is set, otherwise None\n def getProfileTimes(self) -> List[Optional[float]]:\n offset = self.forgroundOffset()\n res:List[Optional[float]] = []\n for i in [1,2,4,6]:\n idx = self.aw.qmc.timeindex[i]\n if idx == 0 or len(self.aw.qmc.timex) < idx:\n res.append(None)\n else:\n res.append(self.aw.qmc.timex[idx] - offset)\n return res\n\n # returns list of CHARGE, DRY, FCs, SCs and DROP BT temperatures if event is set, otherwise None\n def getProfileTemps(self) -> List[Optional[float]]:\n res:List[Optional[float]] = []\n for i in [0,1,2,4,6]:\n idx = self.aw.qmc.timeindex[i]\n if (i == 0 and idx == -1) or (i != 0 and idx == 0) or len(self.aw.qmc.timex) < idx:\n res.append(None)\n elif len(self.aw.qmc.temp2) > idx:\n res.append(self.aw.qmc.temp2[idx])\n else:\n res.append(None)\n return res\n\n # returns list of DRYING, MAILARD, FINISHING target phases times in seconds as first result and phases percentages (float) as second result\n # if a phase is set not set None is returned instead of a value\n def getTargetPhases(self) -> Tuple[List[Optional[int]], List[Optional[float]]]:\n res_times:List[Optional[int]] = []\n res_phases:List[Optional[float]] = []\n if self.phases_target_widgets_time is not None:\n for w in self.phases_target_widgets_time:\n ri:Optional[int] = None\n if w is not None:\n txt = w.text()\n if txt is not None and txt != '':\n ri = stringtoseconds(txt)\n res_times.append(ri)\n if self.phases_target_widgets_percent is not None:\n for w in self.phases_target_widgets_percent:\n rf:Optional[float] = None\n if w is not None:\n txt = w.text()\n if txt is not None and txt != '':\n rf = float(txt)\n res_phases.append(rf)\n return res_times, res_phases\n\n # returns list of DRY, FCs, SCs and DROP target times in seconds if event is set, otherwise None\n def getTargetTimes(self) -> List[Optional[float]]:\n res:List[Optional[float]] = []\n if self.time_target_widgets is not None:\n for w in self.time_target_widgets:\n r = None\n if w is not None:\n txt = w.text()\n if txt is not None and txt != '':\n r = stringtoseconds(txt)\n res.append(r)\n return res\n\n # returns list of CHARGE, DRY, FCs, SCs and DROP BT temperatures if event is set, otherwise None\n def getTargetTemps(self):\n res = []\n if self.temp_target_widgets is not None:\n for w in self.temp_target_widgets:\n r = None\n if w is not None:\n txt = w.text()\n if txt is not None and txt != '':\n r = float(txt)\n res.append(r)\n return res\n\n\n # message slots\n\n @pyqtSlot(int)\n def changeMappingMode(self,i):\n self.aw.qmc.transMappingMode = i\n self.updateTimeResults()\n self.updateTempResults()\n\n @pyqtSlot(int)\n def phasesTableColumnHeaderClicked(self,i):\n if (self.phases_target_widgets_time is not None and\n self.phases_target_widgets_time[i] is not None and\n self.phases_target_widgets_percent is not None and\n self.phases_target_widgets_percent[i] is not None):\n # clear target value i\n if self.phases_target_widgets_time[i].text() != '' or self.phases_target_widgets_percent[i].text() != '':\n self.phases_target_widgets_time[i].setText('')\n self.phases_target_widgets_percent[i].setText('')\n elif self.aw.qmc.backgroundprofile is not None and self.aw.qmc.timeindexB[1]>0 and self.aw.qmc.timeindexB[2]>0 and self.aw.qmc.timeindexB[6]>0 and \\\n self.aw.qmc.timeindex[1]>0 and self.aw.qmc.timeindex[2]>0 and self.aw.qmc.timeindex[6]>0:\n back_offset = self.backgroundOffset()\n back_dry = self.aw.qmc.timeB[self.aw.qmc.timeindexB[1]]\n back_fcs = self.aw.qmc.timeB[self.aw.qmc.timeindexB[2]]\n back_drop = self.aw.qmc.timeB[self.aw.qmc.timeindexB[6]]\n s:str = ''\n if i == 0:\n # DRYING\n s = stringfromseconds(back_dry - back_offset)\n elif i == 1:\n # MAILARD\n s = stringfromseconds(back_fcs - back_dry)\n elif i == 2:\n s = stringfromseconds(back_drop - back_fcs)\n self.phases_target_widgets_time[i].setText(s)\n self.updateTimeResults()\n\n @pyqtSlot(int)\n def phasesTableRowHeaderClicked(self,i):\n if i == 1: # row targets\n # clear all targets and results\n # clear all targets\n self.clearPhasesTargetTimes()\n self.clearPhasesTargetPercent()\n self.clearPhasesResults()\n\n @pyqtSlot(int)\n def timeTableColumnHeaderClicked(self,i):\n if self.time_target_widgets is not None and self.time_target_widgets[i] is not None:\n # clear target value i\n if self.time_target_widgets[i].text() != '':\n self.time_target_widgets[i].setText('')\n self.updateTimeResults()\n elif self.aw.qmc.backgroundprofile is not None:\n timeidx = [1,2,4,6][i]\n if self.aw.qmc.timeindex[timeidx] and self.aw.qmc.timeindexB[timeidx]:\n s = stringfromseconds(self.aw.qmc.timeB[self.aw.qmc.timeindexB[timeidx]]-self.backgroundOffset(),False)\n self.time_target_widgets[i].setText(s)\n self.updateTimeResults()\n\n @pyqtSlot(int)\n def timeTableRowHeaderClicked(self,i):\n if i == 1: # row targets\n self.clearTimeTargets()\n self.clearTimeResults()\n\n @pyqtSlot(int)\n def tempTableColumnHeaderClicked(self,i):\n if self.temp_target_widgets is not None and self.temp_target_widgets[i] is not None:\n # clear target value i\n if self.temp_target_widgets[i].text() != '':\n self.temp_target_widgets[i].setText('')\n self.updateTempResults()\n elif self.aw.qmc.backgroundprofile is not None:\n timeidx = [0,1,2,4,6][i]\n if self.aw.qmc.timeindexB[timeidx] > 0:\n self.temp_target_widgets[i].setText(str(self.aw.float2float(self.aw.qmc.temp2B[self.aw.qmc.timeindexB[timeidx]])))\n self.updateTempResults()\n\n @pyqtSlot(int)\n def tempTableRowHeaderClicked(self,i):\n if i == 1: # row targets\n self.clearTempTargets()\n self.clearTempResults()\n\n @pyqtSlot()\n def updatePhasesWidget(self):\n self.clearTimeTargets()\n if self.phases_target_widgets_time is not None and self.phases_target_widgets_percent is not None:\n sender = self.sender()\n assert isinstance(sender, QLineEdit)\n # clear corresponding time target if percentage target is set, or the otherway around\n if sender.text() != '':\n try:\n time_idx = self.phases_target_widgets_time.index(sender)\n phases_target_widgets_percent = self.phases_target_widgets_percent[time_idx]\n if phases_target_widgets_percent is not None:\n phases_target_widgets_percent.setText('')\n except Exception: # pylint: disable=broad-except\n pass\n try:\n percent_idx = self.phases_target_widgets_percent.index(sender)\n phases_target_widgets_time = self.phases_target_widgets_time[percent_idx]\n if phases_target_widgets_time is not None:\n phases_target_widgets_time.setText('')\n except Exception: # pylint: disable=broad-except\n pass\n self.updateTimeResults()\n\n @pyqtSlot()\n def updateTimesWidget(self):\n self.clearPhasesTargetTimes()\n self.clearPhasesTargetPercent()\n self.updateTimeResults()\n\n # updates time and phases result widgets\n def updateTimeResults(self):\n self.targetTimes = self.getTargetTimes()\n time_targets_clear = all(v is None for v in self.targetTimes)\n target_times, target_phases = self.getTargetPhases()\n phases_targets_clear = all(v is None for v in target_times + target_phases)\n self.clearPhasesResults()\n self.clearTimeResults()\n if not (phases_targets_clear and time_targets_clear):\n # phases targets are set, first clear the time targets\n if not phases_targets_clear:\n self.targetTimes = self.getTargetPhasesTimes()\n else:\n self.targetTimes = self.getTargetTimes()\n # set new time results\n result_times = self.calcTimeResults()\n if self.time_result_widgets is not None:\n for i in range(4):\n time_result_widget = self.time_result_widgets[i]\n if time_result_widget is not None:\n if result_times[i] is None:\n s = ''\n else:\n s = stringfromseconds(result_times[i],leadingzero=False)\n time_result_widget.setText(s)\n # set new phases results\n if self.phases_result_widgets is not None:\n result_times = self.calcTimeResults()\n if all(result_times[r] is not None for r in [0,1,3]):\n # DRYING\n drying_period = result_times[0]\n drying_percentage = 100 * drying_period / result_times[3]\n drying_str = \\\n f'{stringfromseconds(drying_period,leadingzero=False)} {self.aw.float2float(drying_percentage)}%'\n phases_result_widgets = self.phases_result_widgets[0]\n if phases_result_widgets is not None:\n phases_result_widgets.setText(drying_str)\n # MAILARD\n mailard_period = result_times[1] - result_times[0]\n mailard_percentage = 100 * mailard_period / result_times[3]\n mailard_str = \\\n f'{stringfromseconds(mailard_period,leadingzero=False)} {self.aw.float2float(mailard_percentage)}%'\n phases_result_widgets= self.phases_result_widgets[1]\n if phases_result_widgets is not None:\n phases_result_widgets.setText(mailard_str)\n # FINISHING\n finishing_period = result_times[3] - result_times[1]\n finishing_percentage = 100 * finishing_period / result_times[3]\n finishing_str = \\\n f'{stringfromseconds(finishing_period,leadingzero=False)} {self.aw.float2float(finishing_percentage)}%'\n phases_result_widgets = self.phases_result_widgets[2]\n if phases_result_widgets is not None:\n phases_result_widgets.setText(finishing_str)\n else:\n for w in self.phases_result_widgets:\n if w is not None:\n w.setText('')\n\n @pyqtSlot()\n def updateTempResults(self):\n self.targetTemps = self.getTargetTemps()\n if all(v is None for v in self.targetTemps):\n # clear all results if no targets are set\n self.clearTempResults()\n # set new results\n elif self.temp_result_widgets is not None and len(self.temp_result_widgets)>4:\n result_temps,fit = self.calcTempResults()\n for i in range(5):\n temp_result_widget:Optional[QTableWidgetItem] = self.temp_result_widgets[i]\n result_temp = result_temps[i]\n if temp_result_widget is not None and result_temp is not None:\n temp_result_widget.setText(str(self.aw.float2float(result_temp)) + self.aw.qmc.mode)\n s = ''\n if fit is not None:\n s = fit\n self.temp_formula.setText(s)\n self.temp_formula.repaint()\n\n #called from Apply button\n @pyqtSlot(bool)\n def apply(self,_=False):\n applied_time = self.applyTimeTransformation()\n applied_temp = self.applyTempTransformation()\n if applied_time or applied_temp:\n self.aw.qmc.roastUUID = None\n self.aw.qmc.roastdate = QDateTime.currentDateTime()\n self.aw.qmc.roastepoch = self.aw.qmc.roastdate.toSecsSinceEpoch()\n self.aw.qmc.roasttzoffset = libtime.timezone\n self.aw.qmc.roastbatchnr = 0\n self.aw.setCurrentFile(None,addToRecent=False)\n self.aw.qmc.l_event_flags_dict = {}\n self.aw.qmc.l_annotations_dict = {}\n self.aw.qmc.fileDirty()\n self.aw.qmc.timealign()\n self.aw.autoAdjustAxis()\n self.aw.qmc.redraw()\n else:\n self.restore()\n\n #called from Restore button\n @pyqtSlot(bool)\n def restore(self,_=False):\n self.aw.setCurrentFile(self.org_curFile,addToRecent=False)\n self.aw.qmc.roastUUID = self.org_UUID\n self.aw.qmc.roastdate = self.org_roastdate\n self.aw.qmc.roastepoch = self.org_roastepoch\n self.aw.qmc.roasttzoffset = self.org_roasttzoffset\n self.aw.qmc.roastbatchnr = self.org_roastbatchnr\n if self.org_safesaveflag:\n self.aw.qmc.fileDirty()\n else:\n self.aw.qmc.fileClean()\n self.aw.qmc.l_event_flags_dict = self.org_l_event_flags_dict\n self.aw.qmc.l_annotations_dict = self.org_l_annotations_dict\n self.aw.qmc.timex = self.org_timex[:]\n self.aw.qmc.extratimex = copy.deepcopy(self.org_extratimex)\n self.aw.qmc.temp2 = self.org_temp2[:]\n self.aw.autoAdjustAxis()\n self.aw.qmc.redraw()\n\n #called from OK button\n @pyqtSlot()\n def applyTransformations(self):\n self.apply()\n #save window position (only; not size!)\n settings = QSettings()\n settings.setValue('TransformatorPosition',self.frameGeometry().topLeft())\n self.accept()\n\n #called from Cancel button\n @pyqtSlot()\n def restoreState(self):\n self.restore()\n self.aw.qmc.transMappingMode = self.org_transMappingMode\n #save window position (only; not size!)\n settings = QSettings()\n settings.setValue('TransformatorPosition',self.geometry().topLeft())\n self.closeHelp()\n self.reject()\n\n @pyqtSlot(bool)\n def openHelp(self,_=False):\n from help import transposer_help # type: ignore [attr-defined] # pylint: disable=no-name-in-module\n self.helpdialog = self.aw.showHelpDialog(\n self, # this dialog as parent\n self.helpdialog, # the existing help dialog\n QApplication.translate('Form Caption','Profile Transposer Help'),\n transposer_help.content())\n\n def closeHelp(self):\n self.aw.closeHelpDialog(self.helpdialog)\n\n def closeEvent(self, _):\n self.restoreState()\n\n\n # Calculations\n\n # returns the list of results times in seconds\n def calcTimeResults(self):\n res = []\n profileTime:Optional[float]\n if self.aw.qmc.transMappingMode == 0:\n # discrete mapping\n # adding CHARGE\n fits:List[Optional['npt.NDArray[numpy.float64]']] = self.calcDiscretefits([0] + self.profileTimes,[0] + self.targetTimes)\n if len(fits)>4 and len(self.profileTimes)>3:\n for i in range(4):\n fit:Optional['npt.NDArray[numpy.float64]'] = fits[i+1]\n profileTime = self.profileTimes[i]\n if fit is not None and profileTime is not None:\n res.append(numpy.poly1d(fit)(profileTime))\n else:\n res.append(None)\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n fit_fuc = self.calcTimePolyfit() # note that this fit is already applied to numpy.polyfit !!\n for i in range(4):\n profileTime = self.profileTimes[i]\n if fit_fuc is not None and profileTime is not None:\n res.append(fit_fuc(profileTime))\n else:\n res.append(None)\n except numpy.RankWarning:\n pass\n except Exception: # pylint: disable=broad-except\n pass\n return res\n\n # returns the list of results temperatures and the polyfit or None as second result\n def calcTempResults(self) -> Tuple[List[Optional[float]], Optional[str]]:\n res:List[Optional[float]] = []\n fit:Optional['npt.NDArray[numpy.float64]'] = None\n fit_str:Optional[str] = None\n profileTemp:Optional[float]\n if self.aw.qmc.transMappingMode == 0:\n # discrete mapping\n fits = self.calcDiscretefits(self.profileTemps,self.targetTemps)\n for i in range(5):\n fit = fits[i]\n profileTemp = self.profileTemps[i]\n if profileTemp is not None and fit is not None:\n res.append(numpy.poly1d(fit)(profileTemp))\n else:\n res.append(None)\n active_fits = list(filter(lambda x: x[1][1] is not None,zip(fits,zip(self.profileTemps,self.targetTemps))))\n if len(active_fits) > 0 and len(active_fits) < 3:\n fit_str = self.aw.fit2str(fits[0])\n else:\n formula = ''\n last_target:Optional[Tuple] = None\n for f,tpl in reversed(active_fits[:-1]):\n if last_target is None:\n formula = self.aw.fit2str(f)\n else:\n formula = f'({self.aw.fit2str(f)} if x<{last_target} else {formula})'\n last_target = tpl[1]\n fit_str = formula\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n fit_func = self.calcTempPolyfit() # numpy.poly1d not yet applied to this fit\n if fit_func is not None:\n p = numpy.poly1d(fit_func)\n for i in range(5):\n profileTemp = self.profileTemps[i]\n if profileTemp is not None:\n res.append(p(profileTemp))\n else:\n res.append(None)\n fit_str = self.aw.fit2str(fit_func)\n else:\n res = [None]*5\n except numpy.RankWarning:\n pass\n except Exception: # pylint: disable=broad-except\n pass\n return res,fit_str\n\n # returns target times based on the phases target\n def getTargetPhasesTimes(self):\n # get the offset\n offset:float = self.forgroundOffset()\n # get profile phases events time\n dry:float = self.aw.qmc.timex[self.aw.qmc.timeindex[1]] - offset\n fcs:float = self.aw.qmc.timex[self.aw.qmc.timeindex[2]] - offset\n drop:float = self.aw.qmc.timex[self.aw.qmc.timeindex[6]] - offset\n # flags for targets set\n dry_set:bool = False\n drop_set:bool = False\n fcs_set:bool = False\n\n if self.phases_target_widgets_time is None or self.phases_target_widgets_percent is None:\n return []\n\n # first determine the target DROP time (relative to the profile drop) if any\n drop_phases_target_widget_time = self.phases_target_widgets_time[2]\n drop_phases_target_widget_percent = self.phases_target_widgets_percent[2]\n if drop_phases_target_widget_time is not None and drop_phases_target_widget_time.text() != '':\n drop = fcs + stringtoseconds(drop_phases_target_widget_time.text())\n drop_set = True\n elif drop_phases_target_widget_percent is not None and drop_phases_target_widget_percent.text() != '':\n drop = fcs + (float(drop_phases_target_widget_percent.text()) * drop / 100)\n drop_set = True\n\n # determine the target DRY time (relative to the target drop of above) if any\n dry_phases_target_widgets_time = self.phases_target_widgets_time[0]\n dry_phases_target_widgets_percent = self.phases_target_widgets_percent[0]\n if dry_phases_target_widgets_time is not None and dry_phases_target_widgets_time.text() != '':\n dry = stringtoseconds(dry_phases_target_widgets_time.text())\n dry_set = True\n elif dry_phases_target_widgets_percent is not None and dry_phases_target_widgets_percent.text() != '':\n dry = float(dry_phases_target_widgets_percent.text()) * drop / 100\n dry_set = True\n\n # determine the target FCs time (relative to the target drop of above) if any\n fcs_phases_target_widgets_time = self.phases_target_widgets_time[1]\n fcs_phases_target_widgets_percent = self.phases_target_widgets_percent[1]\n if fcs_phases_target_widgets_time is not None and fcs_phases_target_widgets_time.text() != '':\n fcs = dry + stringtoseconds(fcs_phases_target_widgets_time.text())\n fcs_set = True\n elif fcs_phases_target_widgets_percent is not None and fcs_phases_target_widgets_percent.text() != '':\n fcs = dry + (float(fcs_phases_target_widgets_percent.text()) * drop / 100)\n fcs_set = True\n\n# return [(dry if dry_set else None),(fcs if fcs_set else None), None, (drop if drop_set else None)]\n # set all unset target times to the profile times\n return [\n (dry if dry_set else (self.aw.qmc.timex[self.aw.qmc.timeindex[1]] - offset)),\n (fcs if fcs_set else (self.aw.qmc.timex[self.aw.qmc.timeindex[2]] - offset)),\n None,\n (drop if drop_set else (self.aw.qmc.timex[self.aw.qmc.timeindex[6]] - offset))]\n\n # calculates the linear (self.aw.qmc.transMappingMode = 1) or quadratic (self.aw.qmc.transMappingMode = 2) mapping\n # between the profileTimes and the targetTimes\n def calcTimePolyfit(self) -> Optional[Callable[[float], float]]:\n # initialized by CHARGE time 00:00\n xa:List[float] = [0]\n ya:List[float] = [0]\n for i in range(4):\n profileTime:Optional[float] = self.profileTimes[i]\n targetTime:Optional[float] = self.targetTimes[i]\n if profileTime is not None and targetTime is not None:\n xa.append(profileTime)\n ya.append(targetTime)\n deg = self.aw.qmc.transMappingMode\n if len(xa) > 1:\n try:\n deg = min(len(xa) - 1,deg)\n z = numpy.polyfit(xa, ya, deg)\n return numpy.poly1d(z)\n except Exception: # pylint: disable=broad-except\n return None\n else:\n return None\n\n # calculates the linear (self.aw.qmc.transMappingMode = 1) or quadratic (self.aw.qmc.transMappingMode = 2) mapping\n # between the profileTemps and the targetTemps\n def calcTempPolyfit(self):\n xa:List[float] = []\n ya:List[float] = []\n for i in range(5):\n profileTemp:Optional[float] = self.profileTemps[i]\n targetTemp:Optional[float] = self.targetTemps[i]\n if profileTemp is not None and targetTemp is not None:\n xa.append(profileTemp)\n ya.append(targetTemp)\n deg = self.aw.qmc.transMappingMode\n if len(xa) > 0:\n try:\n deg = min(len(xa) - 1,deg)\n if deg == 0:\n z = numpy.array([1, ya[0] - xa[0]])\n else:\n z = numpy.polyfit(xa, ya, deg)\n return z\n except Exception: # pylint: disable=broad-except\n return None\n else:\n return None\n\n # returns a list of segment-wise fits between sources and targets\n # each fit is a numpy.array as returned by numpy.polyfit\n # a source element of None generates None as fit\n # a target element of None is skipped and previous and next segments are joined\n # the lists of sources and targets are expected to be of the same length\n # the length of the result list is the same as that of the sources and targets\n @staticmethod\n def calcDiscretefits(sources:List[Optional[float]], targets:List[Optional[float]]) -> List[Optional['npt.NDArray[numpy.float64]']]:\n if len(sources) != len(targets):\n return [None]*len(sources)\n fits:List[Optional['npt.NDArray[numpy.float64]']] = [None]*len(sources)\n last_fit:Optional['npt.NDArray[numpy.float64]'] = None\n for i, _ in enumerate(sources):\n if sources[i] is not None:\n if targets[i] is None:\n # we take the last fit\n fits[i] = last_fit\n else:\n next_idx = None # the index of the next non-empty source/target pair\n for j in range(i+1,len(sources)):\n if sources[j] is not None and targets[j] is not None:\n next_idx = j\n break\n if next_idx is None:\n sources_i = sources[i]\n targets_i = targets[i]\n if last_fit is not None:\n fits[i] = last_fit # copy previous fit\n elif sources_i is not None and targets_i is not None:\n # set a simple offset only as there is no previous nor next fit\n fits[i] = numpy.array([1,targets_i - sources_i])\n else:\n fits[i] = numpy.array([1,0])\n else:\n sources_next = sources[next_idx]\n targets_next = targets[next_idx]\n if sources_i is None or targets_i is None or sources_next is None or targets_next is None:\n fits[i] = numpy.array([1,0])\n else:\n fits[i] = numpy.polyfit([sources_i, sources_next], [targets_i, targets_next] ,1)\n # if this is the first fit, we copy it to all previous positions\n if last_fit is None:\n for k in range(i):\n if sources[k] is not None:\n fits[k] = fits[i]\n # register this fit\n last_fit = fits[i]\n return fits\n\n # fits of length 5\n def applyDiscreteTimeMapping(self,timex,fits):\n offset = self.forgroundOffset()\n res_timex = []\n if offset == 0 or fits[0] is None:\n new_offset = 0\n else:\n new_offset = numpy.poly1d(fits[0])(offset)\n for i, _ in enumerate(timex):\n # first fit is to be applied for all readings before DRY\n j = 0\n if self.aw.qmc.timeindex[6] > 0 and i >= self.aw.qmc.timeindex[6]:\n # last fit counts after DROP\n j = 4\n elif self.aw.qmc.timeindex[4] > 0 and i >= self.aw.qmc.timeindex[4]:\n j = 3 # after SCs\n elif self.aw.qmc.timeindex[2] > 0 and i >= self.aw.qmc.timeindex[2]:\n j = 2 # after FCs\n elif self.aw.qmc.timeindex[1] > 0 and i >= self.aw.qmc.timeindex[1]:\n j = 1 # after DRY\n if fits[j] is None:\n res_timex.append(timex[i] - offset + new_offset)\n else:\n fit = numpy.poly1d(fits[j]) # fit to be applied\n res_timex.append(fit(timex[i] - offset)+new_offset)\n return res_timex\n\n # returns False if no transformation was applied\n def applyTimeTransformation(self):\n # first update the targets\n self.targetTimes = self.getTargetTimes()\n if all(v is None for v in self.targetTimes):\n target_times, target_phases = self.getTargetPhases()\n if all(v is None for v in target_times + target_phases):\n self.aw.qmc.timex = self.org_timex[:]\n self.aw.qmc.extratimex = copy.deepcopy(self.org_extratimex)\n return False\n self.targetTimes = self.getTargetPhasesTimes()\n # calculate the offset of 00:00\n offset = self.forgroundOffset()\n # apply either the discrete or the polyfit mappings\n if self.aw.qmc.transMappingMode == 0:\n # discrete mapping\n fits = self.calcDiscretefits([0] + self.profileTimes,[0] + self.targetTimes)\n self.aw.qmc.timex = self.applyDiscreteTimeMapping(self.org_timex,fits)\n # apply to the extra timex\n self.aw.qmc.extratimex = []\n for timex in self.org_extratimex:\n try:\n timex_trans = self.applyDiscreteTimeMapping(timex,fits)\n except Exception: # pylint: disable=broad-except\n timex_trans = timex\n self.aw.qmc.extratimex.append(timex_trans)\n else:\n # polyfit mappings\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n fit = self.calcTimePolyfit() # the fit returned here is already applied to numpy.poly1d\n if fit is not None:\n self.aw.qmc.timex = [fit(tx-offset) for tx in self.org_timex]\n if len(self.aw.qmc.timex) > 0 and self.aw.qmc.timeindex[0] != -1:\n foffset = self.aw.qmc.timex[0]\n self.aw.qmc.timex = [tx+foffset for tx in self.aw.qmc.timex]\n extratimex = []\n for timex in self.org_extratimex:\n offset = 0\n if len(timex) > 0 and self.aw.qmc.timeindex[0] != -1:\n offset = timex[self.aw.qmc.timeindex[0]]\n new_timex = [fit(tx-offset) for tx in timex]\n if len(new_timex) > 0 and self.aw.qmc.timeindex[0] != -1:\n foffset = new_timex[0]\n new_timex = [tx+foffset for tx in new_timex]\n extratimex.append(new_timex)\n self.aw.qmc.extratimex = extratimex\n except numpy.RankWarning:\n pass\n return True\n\n # returns False if no transformation was applied\n def applyTempTransformation(self):\n # first update the targets\n self.targetTemps = self.getTargetTemps()\n if all(v is None for v in self.targetTemps):\n self.aw.qmc.temp2 = self.org_temp2[:]\n return False\n # apply either the discrete or the polyfit mappings\n if self.aw.qmc.transMappingMode == 0:\n # discrete mappings, length 5\n fits = self.calcDiscretefits(self.profileTemps,self.targetTemps)\n self.aw.qmc.temp2 = []\n for i, _ in enumerate(self.org_temp2):\n # first fit is to be applied for all readings before DRY\n j = 0\n if self.aw.qmc.timeindex[6] > 0 and i >= self.aw.qmc.timeindex[6]:\n # last fit counts after DROP\n j = 4\n elif self.aw.qmc.timeindex[4] > 0 and i >= self.aw.qmc.timeindex[4]:\n j = 3 # after SCs\n elif self.aw.qmc.timeindex[2] > 0 and i >= self.aw.qmc.timeindex[2]:\n j = 2 # after FCs\n elif self.aw.qmc.timeindex[1] > 0 and i >= self.aw.qmc.timeindex[1]:\n j = 1 # after DRY\n\n tp = self.org_temp2[i]\n fitj = fits[j]\n if tp is None or tp == -1 or fitj is None:\n self.aw.qmc.temp2.append(tp)\n else:\n fit = numpy.poly1d(fitj) # fit to be applied\n self.aw.qmc.temp2.append(fit(tp))\n return True\n # polyfit mappings\n with warnings.catch_warnings():\n warnings.filterwarnings('error')\n try:\n p = self.calcTempPolyfit()\n if p is not None:\n fit = numpy.poly1d(p)\n if fit is not None:\n self.aw.qmc.temp2 = [(-1 if (temp is None) or (temp == -1) else fit(temp)) for temp in self.org_temp2]\n except numpy.RankWarning:\n pass\n return True\n\n # tables\n\n def createPhasesTable(self):\n vheader = self.phasestable.verticalHeader()\n hheader = self.phasestable.horizontalHeader()\n\n self.phasestable.setStyleSheet('QTableView { background-color: red); }')\n\n self.phasestable.setRowCount(3)\n self.phasestable.setColumnCount(3)\n if hheader is not None:\n hheader.setStretchLastSection(False)\n hheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n hheader.setHighlightSections(False)\n if vheader is not None:\n vheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n\n self.phasestable.setHorizontalHeaderLabels([QApplication.translate('Label','Drying'),\n QApplication.translate('Label','Maillard'),\n QApplication.translate('Label','Finishing')])\n self.phasestable.setVerticalHeaderLabels([QApplication.translate('Table','Profile'),\n QApplication.translate('Table','Target'),\n QApplication.translate('Table','Result')])\n self.phasestable.setShowGrid(True)\n self.phasestable.setAlternatingRowColors(True)\n self.phasestable.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.phasestable.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n if hheader is not None and vheader is not None:\n self.phasestable.setFixedSize(\n hheader.length() +\n vheader.sizeHint().width(),\n vheader.length() +\n hheader.height())\n self.phasestable.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers)\n self.phasestable.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.phasestable.setSelectionMode(QAbstractItemView.SelectionMode.NoSelection)\n self.phasestable.setAutoScroll(False)\n if vheader is not None:\n vheader.sectionClicked.connect(self.phasesTableRowHeaderClicked)\n if hheader is not None:\n hheader.sectionClicked.connect(self.phasesTableColumnHeaderClicked)\n\n self.phases_target_widgets_time = []\n self.phases_target_widgets_percent = []\n self.phases_result_widgets = []\n\n profilePhasesTimes:List[Optional[float]] = [None]*3 # DRYING, MAILARD, FINISHING\n profilePhasesPercentages:List[Optional[float]] = [None] * 3\n #\n # the phases transformation are only enabled if at least DRY, FCs and DROP events are set\n phases_enabled = self.aw.qmc.timeindex[1] and self.aw.qmc.timeindex[2] and self.aw.qmc.timeindex[6]\n #\n if phases_enabled:\n profilePhasesTimes[0] = self.profileTimes[0] # DRYING == DRY\n if self.profileTimes[0] is not None and self.profileTimes[1] is not None:\n profilePhasesTimes[1] = self.profileTimes[1] - self.profileTimes[0]\n if self.profileTimes[1] is not None and self.profileTimes[3] is not None:\n profilePhasesTimes[2] = self.profileTimes[3] - self.profileTimes[1]\n if self.profileTimes[3] is not None:\n profilePhasesPercentages = [(ppt/self.profileTimes[3])*100 for ppt in profilePhasesTimes if ppt is not None]\n\n for i in range(3):\n profilePhasesTime = profilePhasesTimes[i]\n profilePhasesPercentage = profilePhasesPercentages[i]\n if len(profilePhasesTimes) > i and profilePhasesTime is not None and profilePhasesPercentage is not None:\n profile_phases_time_str = \\\n f'{stringfromseconds(int(round(profilePhasesTime)),leadingzero=False)} {self.aw.float2float(profilePhasesPercentage)}%'\n profile_phases_widget = QTableWidgetItem(profile_phases_time_str)\n profile_phases_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.phasestable.setItem(0,i,profile_phases_widget)\n #\n target_widget_time = QLineEdit('')\n target_widget_time.setValidator(MyQRegularExpressionValidator(self.regextime))\n target_widget_time.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n if phases_enabled:\n target_widget_time.editingFinished.connect(self.updatePhasesWidget)\n else:\n target_widget_time.setEnabled(False)\n target_widget_percent = QLineEdit('')\n target_widget_percent.setValidator(QRegularExpressionValidator(self.regexpercent))\n target_widget_percent.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n if phases_enabled:\n target_widget_percent.editingFinished.connect(self.updatePhasesWidget)\n else:\n target_widget_percent.setEnabled(False)\n target_cell_widget = QWidget()\n target_cell_layout = QHBoxLayout(target_cell_widget)\n target_cell_layout.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n target_cell_layout.setContentsMargins(4,4,4,4)\n target_cell_layout.addWidget(target_widget_time)\n target_cell_layout.addWidget(target_widget_percent)\n target_cell_widget.setLayout(target_cell_layout)\n self.phasestable.setCellWidget(1,i,target_cell_widget)\n #\n result_widget = QTableWidgetItem('')\n result_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.phasestable.setItem(2,i,result_widget)\n else:\n target_widget_time = None\n target_widget_percent = None\n result_widget = None\n self.phases_target_widgets_time.append(target_widget_time)\n self.phases_target_widgets_percent.append(target_widget_percent)\n self.phases_result_widgets.append(result_widget)\n\n def createTimeTable(self):\n hheader = self.timetable.horizontalHeader()\n vheader = self.timetable.verticalHeader()\n self.timetable.clear()\n self.timetable.setRowCount(3)\n self.timetable.setColumnCount(4)\n if hheader is not None:\n hheader.setStretchLastSection(False)\n hheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n hheader.setHighlightSections(False)\n if vheader is not None:\n vheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n self.timetable.setHorizontalHeaderLabels([QApplication.translate('Label','DRY END'),\n QApplication.translate('Label','FC START'),\n QApplication.translate('Label','SC START'),\n QApplication.translate('Label','DROP')])\n self.timetable.setVerticalHeaderLabels([QApplication.translate('Table','Profile'),\n QApplication.translate('Table','Target'),\n QApplication.translate('Table','Result')])\n self.timetable.setShowGrid(True)\n self.timetable.setAlternatingRowColors(False)\n self.timetable.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.timetable.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.timetable.setFrameStyle(QFrame.Shape.NoFrame)\n if hheader is not None and vheader is not None:\n self.timetable.setFixedSize(\n hheader.length() +\n vheader.sizeHint().width(),\n vheader.length() +\n hheader.height())\n self.timetable.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers)\n self.timetable.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.timetable.setSelectionMode(QAbstractItemView.SelectionMode.NoSelection)\n self.timetable.setAutoScroll(False)\n if vheader is not None:\n vheader.sectionClicked.connect(self.timeTableRowHeaderClicked)\n if hheader is not None:\n hheader.sectionClicked.connect(self.timeTableColumnHeaderClicked)\n\n self.time_target_widgets = []\n self.time_result_widgets = []\n\n for i in range(4):\n profileTime = self.profileTimes[i]\n if len(self.profileTimes) > i and profileTime is not None:\n profile_time_str = stringfromseconds(int(round(profileTime)),leadingzero=False)\n profile_widget = QTableWidgetItem(profile_time_str)\n profile_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.timetable.setItem(0,i,profile_widget)\n #\n target_widget = QLineEdit('')\n target_widget.setValidator(MyQRegularExpressionValidator(self.regextime))\n target_widget.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n target_widget.editingFinished.connect(self.updateTimesWidget)\n target_cell_widget = QWidget()\n target_cell_layout = QHBoxLayout(target_cell_widget)\n target_cell_layout.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n target_cell_layout.setContentsMargins(4,4,4,4)\n target_cell_layout.addWidget(target_widget)\n target_cell_widget.setLayout(target_cell_layout)\n self.timetable.setCellWidget(1,i,target_cell_widget)\n #\n result_widget = QTableWidgetItem('') #profile_time_str)\n result_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.timetable.setItem(2,i,result_widget)\n else:\n target_widget = None\n result_widget = None\n self.time_target_widgets.append(target_widget)\n self.time_result_widgets.append(result_widget)\n\n def createTempTable(self):\n vheader = self.temptable.verticalHeader()\n hheader = self.temptable.horizontalHeader()\n self.temptable.clear()\n self.temptable.setRowCount(3)\n self.temptable.setColumnCount(5)\n if hheader is not None:\n hheader.setStretchLastSection(False)\n hheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n if vheader is not None:\n vheader.setSectionResizeMode(QHeaderView.ResizeMode.Fixed)\n self.temptable.setHorizontalHeaderLabels([QApplication.translate('Label','CHARGE'),\n QApplication.translate('Label','DRY END'),\n QApplication.translate('Label','FC START'),\n QApplication.translate('Label','SC START'),\n QApplication.translate('Label','DROP')])\n self.temptable.setVerticalHeaderLabels([QApplication.translate('Table','Profile'),\n QApplication.translate('Table','Target'),\n QApplication.translate('Table','Result')])\n self.temptable.setShowGrid(True)\n self.temptable.setAlternatingRowColors(False)\n self.temptable.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.temptable.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n hheader = self.temptable.horizontalHeader()\n vheader = self.temptable.verticalHeader()\n if hheader is not None and vheader is not None:\n self.temptable.setFixedSize(\n hheader.length() +\n vheader.sizeHint().width(),\n vheader.length() +\n hheader.height())\n self.temptable.setEditTriggers(QAbstractItemView.EditTrigger.NoEditTriggers)\n self.temptable.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.temptable.setSelectionMode(QAbstractItemView.SelectionMode.NoSelection)\n self.temptable.setAutoScroll(False)\n if vheader is not None:\n vheader.sectionClicked.connect(self.tempTableRowHeaderClicked)\n if hheader is not None:\n hheader.sectionClicked.connect(self.tempTableColumnHeaderClicked)\n\n self.temp_target_widgets = []\n self.temp_result_widgets = []\n\n for i in range(5):\n profileTemp = self.profileTemps[i]\n if len(self.profileTemps) > i and profileTemp is not None:\n profile_temp_str = str(self.aw.float2float(profileTemp)) + self.aw.qmc.mode\n profile_widget = QTableWidgetItem(profile_temp_str)\n profile_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.temptable.setItem(0,i,profile_widget)\n #\n target_widget = QLineEdit('')\n target_widget.setValidator(QRegularExpressionValidator(self.regextemp))\n target_widget.editingFinished.connect(self.updateTempResults)\n target_widget.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n\n target_cell_widget = QWidget()\n target_cell_layout = QHBoxLayout(target_cell_widget)\n target_cell_layout.setAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n target_cell_layout.setContentsMargins(4,4,4,4)\n target_cell_layout.addWidget(target_widget)\n# target_cell_layout.addWidget(QLabel(self.aw.qmc.mode))\n target_cell_widget.setLayout(target_cell_layout)\n self.temptable.setCellWidget(1,i,target_cell_widget)\n #\n result_widget = QTableWidgetItem('')\n result_widget.setTextAlignment(Qt.AlignmentFlag.AlignCenter|Qt.AlignmentFlag.AlignVCenter)\n self.temptable.setItem(2,i,result_widget)\n else:\n target_widget = None\n result_widget = None\n self.temp_target_widgets.append(target_widget)\n self.temp_result_widgets.append(result_widget)\n", "repo_name": "artisan-roaster-scope/artisan", "sub_path": "src/artisanlib/transposer.py", "file_name": "transposer.py", "file_ext": "py", "file_size_in_byte": 61260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 809, "dataset": "github-code", "pt": "50", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtGui.QRegularExpressionValidator", "line_number": 45, "usage_type": "name"}, {"api_name": "artisanlib.dialogs.ArtisanDialog", "line_number": 58, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 62, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QRegularExpression", "line_number": 66, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRegularExpression", "line_number": 67, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QRegularExpression", "line_number": 68, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 74, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 85, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 86, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidget", "line_number": 87, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 93, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 94, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 95, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 98, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 98, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 101, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 103, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 109, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 109, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox.StandardButton", "line_number": 123, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 123, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox.StandardButton", "line_number": 124, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 124, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox.StandardButton", "line_number": 125, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 125, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 128, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 128, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 134, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 137, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 140, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 140, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 140, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 141, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 142, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 143, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 143, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 144, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 144, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 148, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.TextInteractionFlag", "line_number": 149, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 149, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 151, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 161, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 164, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 168, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 172, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 175, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 179, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 184, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 184, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 186, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 186, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 188, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 188, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 188, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 192, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox.StandardButton", "line_number": 201, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 201, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QSettings", "line_number": 205, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLayout.SizeConstraint", "line_number": 209, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QLayout", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 262, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 262, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 269, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 269, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 278, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 276, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 276, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 289, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 288, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 303, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 303, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 304, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 304, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 307, "usage_type": "name"}, {"api_name": "artisanlib.util.stringtoseconds", "line_number": 311, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 315, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 302, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 302, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 302, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 325, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 325, "usage_type": "name"}, {"api_name": "artisanlib.util.stringtoseconds", "line_number": 332, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 324, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 324, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 352, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 377, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 380, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 382, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 358, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 386, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 405, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 395, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 409, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 415, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 428, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 439, "usage_type": "argument"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 434, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 458, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 487, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 497, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 505, "usage_type": "call"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 513, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 532, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 532, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 522, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDateTime.currentDateTime", "line_number": 549, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QDateTime", "line_number": 549, "usage_type": "name"}, {"api_name": "time.timezone", "line_number": 551, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 543, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 579, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 564, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSettings", "line_number": 589, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 585, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QSettings", "line_number": 599, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 594, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 610, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 610, "usage_type": "name"}, {"api_name": "help.transposer_help.content", "line_number": 611, "usage_type": "call"}, {"api_name": "help.transposer_help", "line_number": 611, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSlot", "line_number": 604, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 625, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 629, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 629, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 632, "usage_type": "name"}, {"api_name": "numpy.poly1d", "line_number": 635, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 639, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 640, "usage_type": "call"}, {"api_name": "numpy.RankWarning", "line_number": 649, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 657, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 657, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 658, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 659, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 660, "usage_type": "name"}, {"api_name": "numpy.poly1d", "line_number": 668, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 676, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 676, "usage_type": "name"}, {"api_name": "warnings.catch_warnings", "line_number": 685, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.RankWarning", "line_number": 700, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 656, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 656, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 656, "usage_type": "name"}, {"api_name": "artisanlib.util.stringtoseconds", "line_number": 726, "usage_type": "call"}, {"api_name": "artisanlib.util.stringtoseconds", "line_number": 736, "usage_type": "call"}, {"api_name": "artisanlib.util.stringtoseconds", "line_number": 746, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 764, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 765, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 767, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 768, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 776, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 777, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 762, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 762, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 786, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 787, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 789, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 790, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 799, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 801, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 815, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 815, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 818, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 818, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 819, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 838, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 840, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 845, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 847, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 864, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 880, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 892, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 912, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 913, "usage_type": "call"}, {"api_name": "numpy.RankWarning", "line_number": 932, "usage_type": "attribute"}, {"api_name": "numpy.poly1d", "line_number": 966, "usage_type": "call"}, {"api_name": "warnings.catch_warnings", "line_number": 970, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 971, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 975, "usage_type": "call"}, {"api_name": "numpy.RankWarning", "line_number": 978, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 994, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 994, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 997, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 997, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 999, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 999, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1000, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1000, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1001, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1001, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1002, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1002, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1003, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1003, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1004, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1004, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1007, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1007, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1008, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1008, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.EditTrigger", "line_number": 1015, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1015, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.FocusPolicy", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1016, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectionMode", "line_number": 1017, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1017, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1028, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1028, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 1029, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 1029, "usage_type": "name"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 1048, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1049, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1050, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1050, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1053, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1055, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1055, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1060, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QRegularExpressionValidator", "line_number": 1061, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1062, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1062, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 1067, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1068, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1069, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1069, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1076, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1077, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1077, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 1095, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 1095, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 1098, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 1098, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1099, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1099, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1100, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1100, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1101, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1101, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1102, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1102, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1103, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1103, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1104, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1104, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1105, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1105, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1108, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1108, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1109, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1109, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame.Shape", "line_number": 1110, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 1110, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.EditTrigger", "line_number": 1117, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1117, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.FocusPolicy", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1118, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectionMode", "line_number": 1119, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1119, "usage_type": "name"}, {"api_name": "artisanlib.util.stringfromseconds", "line_number": 1132, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1133, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1134, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1134, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1137, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1139, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1139, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 1141, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1142, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1143, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1143, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1149, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1150, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1150, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 1166, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 1166, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHeaderView.ResizeMode", "line_number": 1168, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QHeaderView", "line_number": 1168, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1169, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1169, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1170, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1170, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1171, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1171, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1172, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1172, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1173, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1173, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1174, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1174, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1175, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1175, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.translate", "line_number": 1176, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1176, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1179, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1179, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.ScrollBarPolicy", "line_number": 1180, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1180, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.EditTrigger", "line_number": 1189, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1189, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.FocusPolicy", "line_number": 1190, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1190, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView.SelectionMode", "line_number": 1191, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QAbstractItemView", "line_number": 1191, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1205, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1206, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1206, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 1209, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QRegularExpressionValidator", "line_number": 1210, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1212, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1212, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 1214, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 1215, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1216, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1216, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTableWidgetItem", "line_number": 1223, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignmentFlag", "line_number": 1224, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 1224, "usage_type": "name"}]} +{"seq_id": "41030958655", "text": "\"\"\"import base64\nfrom Crypto.Cipher import AES\n\nx = base64.b64decode(open('7.txt', 'r').read())\n\nkey = b'YELLOW SUBMARINE'\ncipher = AES.new(key, AES.MODE_ECB)\ny = cipher.decrypt(x)\nprint(y)\n\"\"\"\nfrom Crypto.Cipher import AES\nimport base64\nfrom Crypto.Util.strxor import strxor\n\ndef cbc_encrypt(plaintext,key):\n #split the plaintext into blocks of size 16 bytes\n blocks = [plaintext[i: i + 16] for i in range(0, len(plaintext), 16)]\n c = AES.new(key, AES.MODE_ECB)\n IV = b\"\\x00\"*16\n ciphertext = b\"\"\n prev = IV\n for plaintext_block in blocks:\n cipherblock = c.encrypt(strxor(plaintext_block, prev))\n ciphertext += cipherblock\n prev = cipherblock\n return ciphertext\ndef cbc_decrypt(ciphertext, key):\n blocks = [ciphertext[i: i + 16] for i in range(0, len(ciphertext), 16)]\n plaintext = b\"\"\n c = AES.new(key, AES.MODE_ECB)\n prev = b\"\\x00\"*16\n for cipherblock in blocks:\n plaintext_block = strxor(c.decrypt(cipherblock), prev)\n plaintext += plaintext_block\n prev = cipherblock\n return plaintext\n\n\nx = base64.b64decode(open('file').read())\ndecrypted = cbc_decrypt(x, b'YELLOW SUBMARINE')\nprint(decrypted)\nprint(base64.b64encode(cbc_encrypt(decrypted, b'YELLOW SUBMARINE')))\n", "repo_name": "josephxu1234/cryptopals", "sub_path": "set2/chal10/cbc.py", "file_name": "cbc.py", "file_ext": "py", "file_size_in_byte": 1252, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "Crypto.Cipher.AES.new", "line_number": 18, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 18, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Crypto.Util.strxor.strxor", "line_number": 23, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 30, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 30, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 30, "usage_type": "attribute"}, {"api_name": "Crypto.Util.strxor.strxor", "line_number": 33, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 39, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "16484062939", "text": "from . import basetask\nfrom data.ai.task import SuccessTask\n\n\nclass FindResourceTaskParser(basetask.BaseTaskParser):\n \"\"\" Parses find resource tasks.\n\n Member:\n resource -- The resource type to find (string).\n successful -- Indicates if task is successful (bool).\n \"\"\"\n\n def __init__(self, base_task_parameter=None, variance_min=None,\n variance_max=None, task=None, resource=None):\n super().__init__(base_task_parameter, variance_min, variance_max, task)\n self.resource = resource\n if self.input in self.pipeline:\n self.resource = self.pipeline[self.input]\n self.successful = task.successful if task else False\n\n def create_new(self, data):\n entity_pos = data.game.region.get_pos(self.entity)\n resource_entity = data.game.region.find_resource(self.resource, entity_pos)\n if resource_entity:\n self.successful = True\n if self.output:\n self.pipeline[self.output] = resource_entity\n else:\n raise basetask.PipelineParameterException()\n \n self.task = SuccessTask(self.base_task_parameter(), self.successful)\n return self.task\n\n def is_success(self):\n return self.successful", "repo_name": "tea2code/fantasy-rts", "sub_path": "python-prototype/ai/task/findresourcetask.py", "file_name": "findresourcetask.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "data.ai.task.game.region.get_pos", "line_number": 22, "usage_type": "call"}, {"api_name": "data.ai.task.game", "line_number": 22, "usage_type": "attribute"}, {"api_name": "data.ai.task", "line_number": 22, "usage_type": "name"}, {"api_name": "data.ai.task.game.region.find_resource", "line_number": 23, "usage_type": "call"}, {"api_name": "data.ai.task.game", "line_number": 23, "usage_type": "attribute"}, {"api_name": "data.ai.task", "line_number": 23, "usage_type": "name"}, {"api_name": "data.ai.task.SuccessTask", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "8475341334", "text": "\nimport bpy\nimport socket\nimport os\nimport sys\nimport json\nfrom bpy.app.handlers import persistent\n\n\nprint('\\n\\n\\n\\n\\n======================== INIT BLENDER SESSION ============================')\n\nhome = os.path.expanduser('~') \nincludepath = os.path.join(home, 'Documents', 'lab', 'm2b')\n\n\n# /// SETTINGS\nOBJ_FILE = ''\n\n\n# disable splash\nbpy.context.user_preferences.view.show_splash = False\n\n\ndef get_sceneinfo():\n \"\"\"Check if last argument is a file\n \"\"\"\n arg = sys.argv[-1]\n print('> CMD LINE ARG:', arg)\n if 'blend' in arg:\n basepath = os.path.dirname(arg)\n uuid = os.path.basename(arg)\n uuid = os.path.splitext(uuid)[0]\n jsonfile = os.path.join(basepath, uuid + '.json')\n with open(jsonfile) as conffile:\n conf_data = json.load(conffile)\n print(conf_data)\n return conf_data\n \n elif 'json' in arg:\n with open(arg) as conffile:\n conf_data = json.load(conffile)\n return conf_data\n\n\n\n\ndef import_obj(fp):\n old_state = list(bpy.context.scene.objects)\n bpy.ops.import_scene.obj(filepath=fp)\n new_state = list(bpy.context.scene.objects)\n return set(new_state) - set(old_state)\n\n\ndef setup_scene():\n print('> SETTING UP SCENE')\n\n print('> texture:', TEXTURE)\n for object in bpy.data.objects:\n if object.name == 'Cube':\n object.select = True\n bpy.ops.object.delete()\n\n new_items = import_obj(OBJ_FILE)\n for object in bpy.context.scene.objects:\n object.select = False\n\n scn = bpy.context.scene\n scn.render.engine = 'CYCLES'\n \n mat = bpy.data.materials.new('MayaTexture')\n mat.use_nodes = True\n texnode = mat.node_tree.nodes.new(type=\"ShaderNodeTexImage\")\n mat.node_tree.links.new(texnode.outputs['Color'], mat.node_tree.nodes['Diffuse BSDF'].inputs['Color'])\n if os.path.isfile(str(TEXTURE)):\n texnode.image = bpy.data.images.load(TEXTURE)\n\n for item in new_items:\n if item.type == 'MESH':\n ob = item\n mesh = ob.data\n mesh.materials.append(mat)\n item.select = True\n bpy.context.scene.objects.active = item\n\n \n preview_texture(TEXTURE)\n\n print('SAVING AS', BLEND)\n bpy.ops.wm.save_as_mainfile(filepath=BLEND, check_existing=False)\n\n\ndef update_maya():\n print('MSG > MAYA', PORT)\n host = '127.0.0.1'\n port = PORT\n #message = 'import sys;sys.path.append(\"' + includepath + '\");import m2b;m2b.update(\"' + UUID + '\")'\n message = 'import m2b;m2b.edit_mesh.update(\"' + UUID + '\")'\n \n maya = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n maya.connect((host, port))\n msg = bytes(message, 'UTF-8')\n try:\n print('sending')\n maya.send(msg)\n except:\n print('failed')\n print(msg)\n finally:\n print('closed')\n maya.close()\n\ndef deselect():\n for object in bpy.context.scene.objects:\n object.select = False\n\ndef preview_texture(image):\n print('> PREVIEW TEXTURE GENERATION', TEXTURE)\n\n for area in bpy.data.screens['Default'].areas:\n if area.type == 'VIEW_3D':\n for space in area.spaces:\n if space.type == 'VIEW_3D':\n space.viewport_shade = 'TEXTURED'\n space.show_textured_solid = True\n deselect()\n #mat = bpy.data.materials.new('TexMat')\n\n\ndef export():\n selection = bpy.context.selected_objects\n\n deselect()\n\n for object in bpy.context.scene.objects:\n if not object.hide_render:\n object.select = True\n bpy.ops.export_scene.obj(filepath=OBJ_FILE,\n use_materials=False,\n use_blen_objects=False,\n use_selection=True)\n\n # restore selection\n for object in bpy.context.scene.objects:\n object.select = False\n if object in selection:\n object.select = True\n\n update_maya()\n\n@persistent\ndef save_handler(dummy):\n export()\n\n\nclass BlenderBridge(object):\n def __init__(self):\n self.handler = save_handler\n\n\ninfo = get_sceneinfo()\nif info is not None:\n OBJ_FILE = info['obj']\n TEXTURE = info['tex']\n BLEND = info['blend']\n UUID = info['uuid']\n PORT = info['port']\n\nif os.path.isfile(BLEND):\n print('> matching blend file found. using that.')\nelse:\n setup_scene()\n\n\n\n\n\nif len(bpy.app.handlers.save_post) < 1:\n bpy.app.handlers.save_post.append(save_handler)\n", "repo_name": "woelper/maya_edit_outside", "sub_path": "b2m.py", "file_name": "b2m.py", "file_ext": "py", "file_size_in_byte": 4461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.expanduser", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 21, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "json.load", "line_number": 41, "usage_type": "call"}, {"api_name": "bpy.context", "line_number": 48, "usage_type": "attribute"}, {"api_name": "bpy.ops.import_scene.obj", "line_number": 49, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 49, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 50, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 58, "usage_type": "attribute"}, {"api_name": "bpy.ops.object.delete", "line_number": 61, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 61, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 64, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 67, "usage_type": "attribute"}, {"api_name": "bpy.data.materials.new", "line_number": 70, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "bpy.data.images.load", "line_number": 75, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 75, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 83, "usage_type": "attribute"}, {"api_name": "bpy.ops.wm.save_as_mainfile", "line_number": 89, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 89, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 99, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 99, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 99, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 113, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 119, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 130, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 134, "usage_type": "attribute"}, {"api_name": "bpy.ops.export_scene.obj", "line_number": 137, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 137, "usage_type": "attribute"}, {"api_name": "bpy.context", "line_number": 143, "usage_type": "attribute"}, {"api_name": "bpy.app.handlers.persistent", "line_number": 150, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "bpy.app", "line_number": 177, "usage_type": "attribute"}, {"api_name": "bpy.app.handlers.save_post.append", "line_number": 178, "usage_type": "call"}, {"api_name": "bpy.app", "line_number": 178, "usage_type": "attribute"}]} +{"seq_id": "14405725921", "text": "import json\nimport os\nimport queue\nimport time\n\nfrom loguru import logger\nimport paho.mqtt.client as mqtt\nimport boto3\nimport threading\n\nfrom fastapi import FastAPI, HTTPException\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.status import (\n HTTP_202_ACCEPTED,\n HTTP_404_NOT_FOUND, HTTP_409_CONFLICT)\n\n\nfrom hobs.__init__ import __version__\nfrom hobs.csv import csvManager\nfrom hobs.sub import Subscriber\n\n\nlogger.info({'version': __version__})\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n)\n\nMQTT_KEEP_ALIVE = 5\nmqtt_port = os.getenv('HOBS_MQTT_PORT')\nmqtt_host = os.getenv('HOBS_MQTT_HOST')\nca_certs = os.getenv('HOBS_CA_CERTS')\ncertifile = os.getenv('HOBS_CERTFILE')\nkeyfile = os.getenv('HOBS_KEYFILE')\n\nsub = threading.Thread(target=Subscriber)\nsub.start()\n\n\ndef device_get():\n dyn_resource = boto3.resource('dynamodb')\n table_exp = dyn_resource.Table('lab_rpi')\n response = table_exp.scan()\n rpidev = []\n # logger.info(response['Items'])\n for i in range(len(response['Items'])):\n rpidev.append({\n 'name': response['Items'][i]['name'],\n 'rpi_id': response['Items'][i]['rpi_id'],\n })\n return rpidev\n\n\nresponses_post = {\n HTTP_404_NOT_FOUND: {\n \"description\": \"Device not found\"\n },\n # HTTP_409_CONFLICT: {\n # \"description\": \"Already in use\",\n # \"content\": {\n # \"application/json\": {\n # }\n # }\n # },\n}\n\nresponses_get = {\n HTTP_404_NOT_FOUND: {\n \"description\": \"Device not found\"\n },\n HTTP_409_CONFLICT: {\n \"description\": \"The status of container was abnormal\",\n \"content\": {\n \"application/json\": {\n }\n }\n },\n}\n\nresponses_put = responses_get\n\nresponses_delete = {\n HTTP_404_NOT_FOUND: {\n \"description\": \"Device not found\"\n },\n}\n\n\nclass MqttRequester(mqtt.Client):\n def __init__(\n self,\n url=mqtt_host,\n port=int(mqtt_port),\n ca_certs=ca_certs,\n certfile=certifile,\n keyfile=keyfile,\n ):\n mqtt.Client.__init__(self)\n self.tls_set(\n ca_certs=ca_certs,\n certfile=certfile,\n keyfile=keyfile\n )\n self.tls_insecure_set(True)\n self._queue = queue.Queue(1)\n self.connect(url, port, MQTT_KEEP_ALIVE)\n self.loop_start()\n\n # def on_connect(self, mqttc, obj, flags, rc):\n # logger.info({\n # 'event': 'on_connect',\n # 'rc': str(rc),\n # })\n\n # def on_disconnect(self, client, userdata, rc):\n # logger.info({\n # 'event': 'on_disconnect',\n # 'userdata': userdata,\n # 'rc': str(rc),\n # })\n\n def on_message(self, mqttc, obj, msg):\n # logger.info({\n # 'event': 'on_message',\n # 'topic': msg.topic,\n # 'qos': str(msg.qos),\n # 'payload': msg.payload.decode(),\n # })\n self._queue.put(msg.payload)\n\n # def on_publish(self, mqttc, obj, mid):\n # logger.debug({\n # 'event': 'on_publish',\n # 'mid': str(mid),\n # })\n\n # def on_subscribe(self, mqttc, obj, mid, granted_qos):\n # logger.debug({\n # 'event': 'on_subscribe',\n # 'mid': str(mid),\n # 'qos': str(granted_qos),\n # })\n\n def on_log(self, mqttc, obj, level, string):\n if level is mqtt.MQTT_LOG_ERR:\n logger.error({\n 'event': 'on_log',\n 'level': level,\n 'string': string,\n })\n\n def run(self, pub, dev_id):\n msg_id = str(time.time())\n\n self.subscribe('/dev_id/' + dev_id + '/srw/#')\n\n try:\n if pub == 'scan':\n self.publish('/dev_id/' + dev_id + '/wrs/', 'scan')\n resmsg = self._queue.get(block=True, timeout=15.0).decode()\n\n elif pub == 'get_status':\n self.publish('/dev_id/' + dev_id + '/wrs/', 'get_status')\n resmsg = self._queue.get(block=True, timeout=0.9).decode()\n\n elif pub == 'update':\n self.publish('/dev_id/' + dev_id + '/wrs/', 'update')\n resmsg = self._queue.get(block=True, timeout=10.0).decode()\n\n else:\n resmsg = \"invalid pub\"\n\n # logger.debug({\n # 'msg_id': msg_id,\n # 'resmsg': resmsg,\n # })\n\n return json.loads(resmsg)\n\n except queue.Empty as e:\n logger.warning({\n 'title': 'mqtt',\n 'dev-id': dev_id,\n 'action': 'timeout',\n 'except': e,\n })\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=\"Item not found\"\n )\n\n except Exception as e:\n logger.exception(e)\n raise e\n\n finally:\n self.disconnect()\n\n\n@app.get(\n '/ble',\n status_code=HTTP_202_ACCEPTED,\n responses={**responses_get},\n tags=['device'],)\ndef get_ble(dev_id: str):\n try:\n logger.info({\n 'dev-id': dev_id,\n })\n\n return MqttRequester().run('scan', dev_id)\n\n except Exception as e:\n logger.exception(e)\n raise HTTPException(\n status_code=HTTP_404_NOT_FOUND,\n detail=\"Item not found\")\n\n\n@app.get(\n '/status',\n tags=['device'])\ndef get_status(dev_id: str):\n try:\n # logger.info({\n # 'method': 'GET',\n # 'dev-id': dev_id,\n # })\n\n return MqttRequester().run('get_status', dev_id)\n\n except Exception as e:\n logger.exception(e)\n raise HTTPException(status_code=404, detail=\"Item not found\")\n\n\n@app.put(\n '/',\n tags=['device'],)\ndef update(dev_id: str):\n try:\n logger.info({\n 'method': 'GET',\n 'dev-id': dev_id,\n })\n\n return MqttRequester().run('update', dev_id)\n\n except Exception as e:\n logger.exception(e)\n raise HTTPException(status_code=404, detail=\"Item not found\")\n\n\n@app.get(\n '/db/device',\n tags=['db'],\n )\ndef get_device():\n rpi = device_get()\n\n rpid = {\n 'devices': rpi\n }\n return rpid\n\n\n# XXX status code 를 넣고 , CODE 넘버 바꾸고, Details 에다가 detail 만들것,\n@app.post(\n '/db/csv',\n tags=['db'],\n)\ndef get_csv(experiment_id: str):\n csvm = csvManager(experiment_id).execute()\n if csvm != 0:\n return {\n 'code': 1,\n 'detail': 'success',\n }\n else:\n return {\n 'code': 0,\n 'detail': 'Invalied exp_name',\n }", "repo_name": "nbbeom/ble_serv", "sub_path": "hobs/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 6821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "loguru.logger.info", "line_number": 23, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 23, "usage_type": "name"}, {"api_name": "hobs.__init__.__version__", "line_number": 23, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 25, "usage_type": "call"}, {"api_name": "starlette.middleware.cors.CORSMiddleware", "line_number": 27, "usage_type": "argument"}, {"api_name": "os.getenv", "line_number": 35, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 38, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 41, "usage_type": "call"}, {"api_name": "hobs.sub.Subscriber", "line_number": 41, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 46, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 60, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 73, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_409_CONFLICT", "line_number": 76, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 88, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client", "line_number": 94, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client", "line_number": 94, "usage_type": "name"}, {"api_name": "paho.mqtt.client.Client.__init__", "line_number": 103, "usage_type": "call"}, {"api_name": "paho.mqtt.client.Client", "line_number": 103, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client", "line_number": 103, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 110, "usage_type": "call"}, {"api_name": "paho.mqtt.client.MQTT_LOG_ERR", "line_number": 150, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client", "line_number": 150, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 151, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 151, "usage_type": "name"}, {"api_name": "time.time", "line_number": 158, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 183, "usage_type": "call"}, {"api_name": "queue.Empty", "line_number": 185, "usage_type": "attribute"}, {"api_name": "loguru.logger.warning", "line_number": 186, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 186, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 192, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 193, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 198, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 198, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 212, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 212, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 219, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 219, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 220, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 221, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_202_ACCEPTED", "line_number": 207, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 238, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 238, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 239, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 247, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 247, "usage_type": "name"}, {"api_name": "loguru.logger.exception", "line_number": 255, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 255, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 256, "usage_type": "call"}, {"api_name": "hobs.csv.csvManager", "line_number": 278, "usage_type": "call"}]} +{"seq_id": "748827434", "text": "from gensim.models import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec \nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom bert_serving.client import BertClient\n\n\nfilename = \"MTURK-771.csv\"\n\nfor i in range(1):\n if i == 0:\n # Google word2vec\n model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)\n elif i == 1:\n # Glove\n #glove_input_file = 'glove.6B.200d.txt'\n word2vec_output_file = 'glove.6B.50d.txt.word2vec'\n #glove2word2vec(glove_input_file, word2vec_output_file)\n model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)\n\n elif i== 2:\n word2vec_output_file = 'glove.6B.100d.txt.word2vec'\n model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)\n elif i == 3:\n word2vec_output_file = 'glove.6B.200d.txt.word2vec'\n model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)\n elif i == 4:\n word2vec_output_file = 'glove.6B.300d.txt.word2vec'\n model = KeyedVectors.load_word2vec_format(word2vec_output_file, binary=False)\n elif i == 5:\n # FastText\n model = KeyedVectors.load_word2vec_format('wiki-news-300d-1M.vec') \n elif i == 6:\n model = KeyedVectors.load_word2vec_format('wiki-news-300d-1M-subword.vec')\n\n dev = 0\n length = 0\n final = []\n summ = 0\n nf = 0\n with open(filename) as f:\n lis = [line.split() for line in f] \n # create a list of lists\n length = len(lis)\n for i, x in enumerate(lis): #print the list items \n if (i == 0):\n continue\n t = x[0].split(',')\n #t = x # For space separated files\n a = t[0]\n b = t[1]\n c = t[2]\n c = float(c)*2\n summ += float(c)\n try:\n res = model.similarity(a,b)*10\n except KeyError:\n res = float(c)\n nf = nf + 1\n dev = dev + abs(res - float(c))\n dev = dev/length \n final.append(dev)\n summ = summ/length\n percentage = (dev/summ)*100\n print (dev)\n print(percentage)\n print(nf)\n \n \n \n# BERT \nbc = BertClient() \ndev = 0\nlength = 0\nfinal = []\nsumm = 0\nnf = 0\nwith open(filename) as f:\n lis = [line.split() for line in f] \n # create a list of lists\n length = len(lis)\n for i, x in enumerate(lis): #print the list items \n if (i == 0):\n continue\n t = x[0].split(',')\n #t = x\n a = t[0]\n b = t[1]\n c = t[2]\n c = float(c)*2\n summ += float(c)\n try:\n #res = model.similarity(a,b)*10\n x = bc.encode([a, b])\n t1 = x[0]\n t2 = x[1]\n t1 = t1.reshape(1,768)\n t2 = t2.reshape(1,768)\n res = cosine_similarity(t1,t2)\n res = res[0][0]*10\n except KeyError:\n res = float(c)\n nf = nf + 1\n dev = dev + abs(res - float(c))\ndev = dev/length \nfinal.append(dev)\nsumm = summ/length\npercentage = (dev/summ)*100\nprint(dev)\nprint(percentage)\nprint(nf) ", "repo_name": "YatinGupta777/Comparing-different-Word-Embeddings", "sub_path": "comparison.py", "file_name": "comparison.py", "file_ext": "py", "file_size_in_byte": 3249, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 12, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 12, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 18, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 18, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 22, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 22, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 25, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 25, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 28, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 28, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 31, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 31, "usage_type": "name"}, {"api_name": "gensim.models.KeyedVectors.load_word2vec_format", "line_number": 33, "usage_type": "call"}, {"api_name": "gensim.models.KeyedVectors", "line_number": 33, "usage_type": "name"}, {"api_name": "bert_serving.client.BertClient", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.metrics.pairwise.cosine_similarity", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "11027290535", "text": "import datetime\nfrom dateutil.relativedelta import *\nstart = datetime.datetime(1901, 1, 1)\nend = datetime.datetime(2000, 12, 1)\n# print(start)\n\n# print(start.weekday())\ncount = 0\nwhile start < end:\n\tif start.weekday() == 6:\n\t\tcount += 1\n\tstart += relativedelta(months=+1)\nprint(count)\n\n\n\n\n\n\n", "repo_name": "umairkarel/Project-Euler", "sub_path": "#19.py", "file_name": "#19.py", "file_ext": "py", "file_size_in_byte": 291, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime", "line_number": 3, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "11310786856", "text": "import json\nimport censys.ipv4\nfrom google.cloud import bigquery\n\nfrom censys_ml import utils\n\n\ndef data_from_json_file(path):\n with open(path, 'r') as in_file:\n return json.load(in_file)\n\n\ndef data_to_json_file(path, data):\n with open(path, 'w') as out_file:\n json.dump(data, out_file, indent=2, sort_keys=True)\n\n\ndef data_from_txt_file(path):\n with open(path, 'r') as in_file:\n return [p.replace('\\n', '')\n for p in in_file.readlines()]\n\n\ndef parse_html(raw_html):\n rows = raw_html.replace('&', 'and').split('')\n res = set()\n for row in rows:\n row_entry = row.split('')[0]\n name = row_entry.split('table-field-prefix\">')[-1].replace('', '')\n res.add(name.strip())\n return list(res)\n\n\ndef filter_leaf_fields(fields):\n res = []\n for field in fields:\n parent = '.'.join(field.split('.')[:-1])\n if (parent != '') and (parent in res):\n del res[res.index(parent)]\n res.append(field)\n return res\n\n\ndef filter_parent_fields(fields):\n return list({f.split('.')[0] for f in fields})\n\n\ndef parse_for_domain_names(fields):\n res = []\n leaves = filter_leaf_fields(fields)\n for leaf in leaves:\n if leaf[0] == 'p' and ('name' in leaf or 'dn' in leaf):\n res.append(leaf)\n return res\n\n\ndef bigquery_dry_run(client, field, table):\n project = ''\n dataset_id = ''\n query = (\"SELECT {} FROM `{}.{}.{}` WHERE ip='8.8.8.8';\".format(\n field, project, dataset_id, table\n ))\n job_config = bigquery.QueryJobConfig()\n job_config.dry_run = True\n job_config.use_query_cache = False\n query_job = client.query(query, job_config=job_config)\n\n # A dry run query completes immediately.\n assert query_job.state == 'DONE'\n assert query_job.dry_run\n return query_job.total_bytes_processed/(2.0 ** 20) # in MiB\n\n\ndef parse_schema(fields):\n result = {}\n for field in fields:\n j_field = field\n\n if 'fields' in j_field and len(j_field['fields']) != 0:\n parsed = parse_schema(j_field['fields'])\n for k in parsed:\n result[j_field['name'] + '.' + k] = parsed[k]\n else:\n result[j_field['name']] = j_field\n\n return result\n\n# def type_precedence(item1, item2):\n# # If the type changed over time, return the type with highest number\n# precedence = {\n# 'STRING': 0,\n# 'INTEGER': 1\n# }\n# return item1\n\n\n# def handle_time_changes(model_definition, json_schema, k):\n# TODO : handle model changes over time (str in 2018 -> int in 2019)\n# if k in model_definition:\n# model_definition[k] = type_precedence(item1=json_schema[k],\n# item2=model_definition[k])\n# else:\n# pass\n\nreport_cache = {}\ncensys_api = None\nREPORT_WHITELIST = {\n 'country_code'\n}\n\ndef setup_censys_api():\n if censys_api == None:\n CENSYS_API_KEY_FILEPATH = utils.get_config()['censys']['report_key']\n CENSYS_API_KEY = utils.get_json_data(filepath=CENSYS_API_KEY_FILEPATH)\n censys_api = censys.ipv4.CensysIPv4(**CENSYS_API_KEY)\n\ndef grab_top_string_values(field, n_values=20):\n setup_censys_api()\n report_args = {\n \"query\": \"\",\n \"field\": field,\n \"buckets\": n_values\n }\n\n if field not in report_cache:\n try:\n report = censys_api.report(**report_args)\n report_cache[field] = report\n except Exception as e:\n print(f\"[!] Ran into an error '{str(e)}' when \"\n f\"generating a report for field '{field}'...\")\n report_cache[field] = dict()\n\n return report_cache[field]\n\n\ndef include_top_occurrences(json_schema, k):\n if json_schema[k]['type'] == 'STRING' and any([v in k for v in REPORT_WHITELIST]):\n json_schema[k]['top_values'] = grab_top_string_values(field=k)\n\n\ndef update_model_definition(json_schema, model_definition):\n for k in json_schema:\n include_top_occurrences(json_schema, k)\n model_definition[k] = json_schema[k]\n # handle_time_changes(model_definition, json_schema, k)\n\n\ndef get_model_definition(client, dataset_ref):\n model_definition = {}\n total = len([0 for _ in client.list_tables(dataset_ref)])\n\n utils.printProgressBar(iteration=0, total=total,\n prefix='Table : ', suffix='Complete',\n length=50)\n for i, table in enumerate(client.list_tables(dataset_ref)):\n utils.printProgressBar(iteration=i, total=total,\n prefix='Table : {}'.format(table.table_id),\n suffix='Complete', length=50)\n\n table_ref = dataset_ref.table(table.table_id)\n raw_schema = client.get_table(table_ref).schema\n json_schema = parse_schema([r.to_api_repr() for r in raw_schema])\n update_model_definition(json_schema, model_definition)\n return model_definition\n\n\ndef save_model_definition(model_definition):\n outfile = utils.get_config()['censys']['model_outfile']\n with open(outfile, 'w') as _file:\n json.dump(model_definition, _file, indent=2)\n\n\ndef main():\n print('[x] Gathering censys data model')\n config = utils.get_config()\n service_file = config['censys']['service_file']\n project = config['censys']['project']\n dataset_id = config['censys']['dataset_id']\n\n client = bigquery.Client.from_service_account_json(service_file)\n dataset_ref = client.dataset(dataset_id, project=project)\n model_def = get_model_definition(client, dataset_ref)\n save_model_definition(model_definition=model_def)\n\n# OLD\n# def main():\n# fields = data_from_json('../../raw/aggregated_columns.json')\n# tables = ['20170106', '20180530', '20190211']\n# costs = {}\n# for t, table in enumerate(tables):\n# print \"\\n[X] Table {}\".format(table)\n#\n# columns = only_parents()\n# for i, field in enumerate(only_parents(fields)):\n# print \"[X] Field {}.{} ) {}\".format(t, i, field)\n# if field in costs:\n# costs[field].append(get_dry_run(field, table) if field in columns else 0)\n# else:\n# costs[field] = [get_dry_run(field, table) if field in columns else 0]\n# avg_costs = {k: max(costs[k]) for k in costs}\n# data_to_json('../../raw/aggregated_costs.json', avg_costs)\n# print(\"{} query will process {} bytes.\".format(f, avg_proc))\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "censys-ml/censys-ml", "sub_path": "censys_ml/update_censys_model.py", "file_name": "update_censys_model.py", "file_ext": "py", "file_size_in_byte": 6518, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 15, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.QueryJobConfig", "line_number": 63, "usage_type": "call"}, {"api_name": "google.cloud.bigquery", "line_number": 63, "usage_type": "name"}, {"api_name": "censys_ml.utils.get_config", "line_number": 113, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 113, "usage_type": "name"}, {"api_name": "censys_ml.utils.get_json_data", "line_number": 114, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 114, "usage_type": "name"}, {"api_name": "censys.ipv4.ipv4.CensysIPv4", "line_number": 115, "usage_type": "call"}, {"api_name": "censys.ipv4.ipv4", "line_number": 115, "usage_type": "attribute"}, {"api_name": "censys.ipv4", "line_number": 115, "usage_type": "name"}, {"api_name": "censys_ml.utils.printProgressBar", "line_number": 153, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 153, "usage_type": "name"}, {"api_name": "censys_ml.utils.printProgressBar", "line_number": 157, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 157, "usage_type": "name"}, {"api_name": "censys_ml.utils.get_config", "line_number": 169, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 169, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 171, "usage_type": "call"}, {"api_name": "censys_ml.utils.get_config", "line_number": 176, "usage_type": "call"}, {"api_name": "censys_ml.utils", "line_number": 176, "usage_type": "name"}, {"api_name": "google.cloud.bigquery.Client.from_service_account_json", "line_number": 181, "usage_type": "call"}, {"api_name": "google.cloud.bigquery.Client", "line_number": 181, "usage_type": "attribute"}, {"api_name": "google.cloud.bigquery", "line_number": 181, "usage_type": "name"}]} +{"seq_id": "35651225046", "text": "import pytest\nfrom modules import MainPage\nfrom modules import SortPages\nfrom modules import Menu\nfrom modules import Genres\nfrom helper import page_helper\nfrom locators.Genres import *\nfrom locators.Sort import *\nfrom locators.User import *\n\n\nclass TestGenres:\n @pytest.mark.smoke\n def test_genres_name(self, browser):\n Menu.go_to_menu_genres(browser)\n name1 = page_helper.get_text_element(browser, *GenresPageLocators.GENRE_NAME)\n Genres.open_random_genre(browser)\n h1 = page_helper.get_h1_text(browser)\n assert name1 in h1\n\n @pytest.mark.regression\n def test_genres_sorting(self, browser):\n Menu.go_to_menu_genres(browser)\n SortPages.press_second_sort(browser)\n name1 = page_helper.get_class_element(browser, *SortLocators.SECOND_SORT)\n name2 = SortLocators.ACTIVE_SORT\n assert name1 in name2\n\n @pytest.mark.smoke\n def test_genres_like_track_button(self, browser):\n Menu.go_to_menu_genres(browser)\n Genres.open_random_genre(browser)\n MainPage.like_track(browser)\n assert page_helper.is_element_clickable(browser, *UserLocators.VK_AUTH)\n", "repo_name": "kocteban/Zvonko-test", "sub_path": "Mp3ha/Mp3ha/tests/test_genres.py", "file_name": "test_genres.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "modules.Menu.go_to_menu_genres", "line_number": 15, "usage_type": "call"}, {"api_name": "modules.Menu", "line_number": 15, "usage_type": "name"}, {"api_name": "helper.page_helper.get_text_element", "line_number": 16, "usage_type": "call"}, {"api_name": "helper.page_helper", "line_number": 16, "usage_type": "name"}, {"api_name": "modules.Genres.open_random_genre", "line_number": 17, "usage_type": "call"}, {"api_name": "modules.Genres", "line_number": 17, "usage_type": "name"}, {"api_name": "helper.page_helper.get_h1_text", "line_number": 18, "usage_type": "call"}, {"api_name": "helper.page_helper", "line_number": 18, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "modules.Menu.go_to_menu_genres", "line_number": 23, "usage_type": "call"}, {"api_name": "modules.Menu", "line_number": 23, "usage_type": "name"}, {"api_name": "modules.SortPages.press_second_sort", "line_number": 24, "usage_type": "call"}, {"api_name": "modules.SortPages", "line_number": 24, "usage_type": "name"}, {"api_name": "helper.page_helper.get_class_element", "line_number": 25, "usage_type": "call"}, {"api_name": "helper.page_helper", "line_number": 25, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 21, "usage_type": "attribute"}, {"api_name": "modules.Menu.go_to_menu_genres", "line_number": 31, "usage_type": "call"}, {"api_name": "modules.Menu", "line_number": 31, "usage_type": "name"}, {"api_name": "modules.Genres.open_random_genre", "line_number": 32, "usage_type": "call"}, {"api_name": "modules.Genres", "line_number": 32, "usage_type": "name"}, {"api_name": "modules.MainPage.like_track", "line_number": 33, "usage_type": "call"}, {"api_name": "modules.MainPage", "line_number": 33, "usage_type": "name"}, {"api_name": "helper.page_helper.is_element_clickable", "line_number": 34, "usage_type": "call"}, {"api_name": "helper.page_helper", "line_number": 34, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "73626917604", "text": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nurl = 'https://www.avito.ma/fr/maroc/voitures-à_vendre'\n\ndriver = webdriver.Chrome()\ndriver.get(url)\n\n# Create an empty list to store phone numbers\nphone_numbers = []\n\n# Loop indefinitely to check for new phone numbers\nwhile True:\n # Extract links to all vehicle posts on the first page\n links = []\n posts = driver.find_elements(\"xpath\", '//*[@id=\"__next\"]/div/main/div/div[6]/div[1]/div/div[2]')\n for post in posts:\n link = post.find_element(\"xpath\", './/a').get_attribute('href')\n links.append(link)\n\n # Extract phone numbers from each vehicle post\n for link in links:\n driver.get(link)\n try:\n # Click the button to reveal the phone number\n button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"__next\"]/div/main/div/div[3]/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/div/div/div/div[4]/button[2]')))\n button.click()\n\n # Get the phone number\n phone_number = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"__next\"]/div/main/div/div[2]/div/div/div/div/a/span/span')))\n phone_number = phone_number.text.strip()\n\n # Check if the phone number is new\n if phone_number not in phone_numbers:\n # Save the phone number to a file\n with open('phone_number.txt', 'a') as f:\n f.write(phone_number + '\\n')\n phone_numbers.append(phone_number)\n\n print(phone_number)\n\n except:\n print(\"Phone number not found\")\n\n # Refresh the main page to check for new posts\n driver.quit()\n driver = webdriver.Chrome()\n time.sleep(6)\n driver.get(url)\n time.sleep(5)\n\ndriver.quit()", "repo_name": "abdelilahBouaiss/marketing-tools", "sub_path": "hamza.py", "file_name": "hamza.py", "file_ext": "py", "file_size_in_byte": 1979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 29, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 33, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 50, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 50, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "41702662499", "text": "\"\"\"\nCreated on June 24, 2022\n@author: Lance A. Endres\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nimport cv2\n\nfrom lendres.plotting.PlotHelper import PlotHelper\n\n\nclass ImageHelper():\n arrayImageSize = 2.5\n\n\n @classmethod\n def DefaultSettings(cls):\n \"\"\"\n Gets the default image plotting settings parameter file.\n\n Recommended usage:\n PlotHelper.PushSettings(parameterFile=ImageHelper.DefaultSettings())\n # Display images.\n ...\n PlotHelper.PopSettings()\n\n Returns\n -------\n : str\n The name of the parameters file..\n \"\"\"\n return \"imagedefault\"\n\n\n @classmethod\n def PlotImage(cls, image, title=None, size=6, colorConversion=None):\n \"\"\"\n Plot example image.\n\n Parameters\n ----------\n image : image\n Image to plot.\n title : string\n Title of the figure.\n size : float\n Size (width and height) of figure.\n colorConversion : OpenCV color conversion enumeration.\n Color conversion to perform before plotting. Images are plotted in RGB. For example, if the\n image is in BGR (as is used in OpenCV) then cv2.COLOR_BGR2RGB should be passed.\n\n Returns\n -------\n figure : matplotlib.figure.Figure\n The newly created figure.\n \"\"\"\n # Defining the figure size. Automatically adjust for the number of images to be displayed.\n PlotHelper.Format()\n\n # Adding subplots with 3 rows and 4 columns.\n figure = plt.gcf()\n figure.set_figwidth(size)\n figure.set_figheight(size)\n\n axis = plt.gca()\n\n # Plotting the image.\n if colorConversion != None:\n image = cv2.cvtColor(image, colorConversion)\n axis.imshow(image)\n\n if title != None:\n axis.set_title(title)\n\n # Turn off the grid lines.\n axis.grid(False)\n\n plt.show()\n return figure\n\n\n @classmethod\n def CreateImageArrayPlot(cls, images, labels, columns=4, colorConversion=None):\n \"\"\"\n Plots the images in an array.\n\n Parameters\n ----------\n images : array like\n Set of images to plot.\n labels : array like\n Set of labels to use for the individual images.\n columns : integer\n The number of columns to plot.\n colorConversion : OpenCV color conversion enumeration.\n Color conversion to perform before plotting. Images are plotted in RGB. For example, if the\n image is in BGR cv2.COLOR_BGR2RGB should be passed.\n\n Returns\n -------\n None.\n \"\"\"\n # Calculate required values.\n numberOfImages = len(images)\n rows = math.ceil(numberOfImages / columns)\n\n # Defining the figure size. Automatically adjust for the number of images to be displayed.\n PlotHelper.Format()\n figure = plt.figure()\n figure.set_figwidth(columns*ImageHelper.arrayImageSize+2)\n figure.set_figheight(rows*ImageHelper.arrayImageSize+2)\n\n # Position in the index array/range.\n k = -1\n\n for i in range(columns):\n for j in range(rows):\n # Adding subplots with 3 rows and 4 columns.\n axis = figure.add_subplot(rows, columns, i*rows+j+1)\n\n # Plot the image. Convert colors if required.\n k +=1\n image = images[k]\n if colorConversion != None:\n image = cv2.cvtColor(image, colorConversion)\n axis.imshow(image)\n\n # Turn off white grid lines.\n axis.grid(False)\n\n axis.set_title(labels[k], y=0.9)\n\n # Adjust spacing so titles don't run together.\n plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)\n\n plt.show()\n\n\n @classmethod\n def DisplayColorChannels(cls, image, colorConversion=None):\n \"\"\"\n Displays an image along with the individual color channels.\n\n Parameters\n ----------\n image : image\n Image in an array.\n colorConversion : OpenCV color conversion enumeration.\n Color conversion to perform before plotting. Images are plotted in RGB. For example, if the\n image is in BGR cv2.COLOR_BGR2RGB should be passed.\n\n Returns\n -------\n None.\n \"\"\"\n imageArray = cv2.split(image)\n imageArray.insert(0, image)\n titles = [\"Original\", \"Blue\", \"Green\", \"Red\"]\n ImageHelper.CreateImageArrayPlot(imageArray, titles, columns=4, colorConversion=colorConversion)\n\n\n @classmethod\n def DisplayChromaKey(cls, image, lowerBounds, upperBounds, maskBlurSize=3, colorConversion=None, inputBoundsFormat=\"hsv\"):\n \"\"\"\n Displays an image along with the image separated into two components based on chroma keying.\n\n Parameters\n ----------\n image : image\n Image in an array.\n lowerBounds : numpy array of 3 values.\n Lower bounds of mask.\n maskBlurSize : int\n Size of the blur to apply to the mask. Must be an odd number.\n upperBounds : numpy array of 3 values.\n Upper bounds of mask.\n colorConversion : OpenCV color conversion enumeration.\n Color conversion to perform before plotting. Images are plotted in RGB. For example, if the\n image is in BGR cv2.COLOR_BGR2RGB should be passed.\n inputBoundsFormat : string\n Format of lowerBounds and upperBounds.\n\n Returns\n -------\n None.\n \"\"\"\n imageArray = ImageHelper.ChromaKey(image, lowerBounds, upperBounds, maskBlurSize, inputBoundsFormat)\n\n imageArray.insert(0, image)\n titles = [\"Original\", \"Masked Image\", \"Image Remainder\", \"Mask\"]\n ImageHelper.CreateImageArrayPlot(imageArray, titles, columns=4, colorConversion=colorConversion)\n\n\n @classmethod\n def ApplyColorConversion(cls, images, colorConversion):\n \"\"\"\n Applies a color conversion to the images.\n\n Parameters\n ----------\n images : array like set of images\n Images in an array.\n colorConversion : OpenCV color conversion enumeration.\n Color conversion to perform before plotting. Images are plotted in RGB. For example, if the\n image is in BGR cv2.COLOR_BGR2RGB should be passed.\n\n Returns\n -------\n newImages : array like set of images\n The new images with the conversion applied.\n \"\"\"\n newImages = np.empty(images.shape, dtype=images.dtype)\n\n if len(images.shape) < 4:\n # Only one image provided. Shape of input is similar to (width, height, color_depth).\n newImages = cv2.cvtColor(images, colorConversion)\n else:\n # More than one image provided.\n for i in range(len(images)):\n newImages[i] = cv2.cvtColor(images[i], colorConversion)\n\n return newImages\n\n\n @classmethod\n def ApplyGaussianBlur(cls, images, **kwargs):\n \"\"\"\n Applies a gaussian blur to the images.\n\n Parameters\n ----------\n images : array like set of images\n Images in an array.\n **kwargs : keyword arguments\n Arguments passed to the Gaussian filter. For example, \"ksize=(5,5), sigmaX=0\"\n\n Returns\n -------\n newImages : array like set of images\n The new images with the blur applied.\n \"\"\"\n newImages = np.empty(images.shape, dtype=images.dtype)\n\n if len(images.shape) < 4:\n # Only one image provided. Shape of input is similar to (width, height, color_depth).\n newImages = cv2.GaussianBlur(images, **kwargs)\n else:\n # More than one image provided.\n for i in range(len(images)):\n newImages[i] = cv2.GaussianBlur(images[i], **kwargs)\n\n return newImages\n\n\n @classmethod\n def ApplyHighPassFilter(cls, images, convertToGrey=True, **kwargs):\n \"\"\"\n Applies a high pass filter to images(s).\n\n Parameters\n ----------\n images : array like set of images\n Images in an array.\n convertToGrey : TYPE, optional\n DESCRIPTION. The default is True.\n **kwargs : keyword arguments\n Arguments passed to the Gaussian filter. For example, \"ksize=(21, 21), sigmaX=3\"\n\n Returns\n -------\n highPass : array like set of images\n The high passed images.\n \"\"\"\n # The high pass filter is created by subtracting a low pass filter from the original image(s).\n lowPass = cls.ApplyGaussianBlur(images, **kwargs)\n highPass = images - lowPass\n\n # If specified, the images are converted to a greyish color. This is the expected result of a high pass.\n if convertToGrey:\n highPass -= 127\n\n return highPass\n\n\n @classmethod\n def ChromaKey(cls, image, lowerBounds, upperBounds, maskBlurSize=3, inputBoundsFormat=\"hsv\"):\n \"\"\"\n Splits the image into two components based on chroma keying.\n\n Parameters\n ----------\n image : image\n Image in an array.\n lowerBounds : numpy array of 3 values.\n Lower bounds of mask.\n upperBounds : numpy array of 3 values.\n Upper bounds of mask.\n maskBlurSize : int\n Size of the blur to apply to the mask. Must be an odd number.\n inputBoundsFormat : string\n Format of lowerBounds and upperBounds.\n\n Returns\n -------\n maskedImage : image\n Part of the image that passes the mask.\n imageRemainder : image\n Part of the image that did not pass the mask.\n mask : image\n Mask used on the image.\n \"\"\"\n imageArray = []\n if inputBoundsFormat == \"bgr\":\n imageArray = ImageHelper.ChromaKeyWithBGR(image, lowerBounds, upperBounds, maskBlurSize)\n elif inputBoundsFormat == \"hsv\":\n imageArray = ImageHelper.ChromaKeyWithHSV(image, lowerBounds, upperBounds, maskBlurSize)\n else:\n raise Exception(\"Input bounds format argument not valid.\")\n\n return imageArray\n\n\n @classmethod\n def GetChromaKeyPart(cls, images, lowerBounds, upperBounds, maskBlurSize=3, inputBoundsFormat=\"hsv\", keep=\"bounded\"):\n \"\"\"\n Applies a chroma key filter to the images and returns the portion of interest.\n\n The ChromaKey functions splits an image into 3 parts, the bounded part, the remained, and the mask. This function\n goes through an array of images and returns just one of those parts for all images.\n\n Parameters\n ----------\n images : array like set of images\n Images in an array.\n lowerBounds : numpy array of 3 values.\n Lower bounds of mask.\n upperBounds : numpy array of 3 values.\n Upper bounds of mask.\n maskBlurSize : int\n Size of the blur to apply to the mask. Must be an odd number.\n inputBoundsFormat : string\n Format of lowerBounds and upperBounds.\n keep : string\n Part of the split image to keep.\n bounded : The original image that is bounded by the input.\n remainder : The original image that is outside of the input bounds.\n mask : The mask used to separate the image.\n\n Returns\n -------\n newImages : array like set of images\n An array of images that contains the specified part of the split image.\n \"\"\"\n\n \"\"\"\n Should be updated to use np arrays like ApplyGaussianBlur.\n \"\"\"\n\n keepIndex = 0\n if keep == \"bounded\":\n keepIndex = 0\n elif keep == \"remainder\":\n keepIndex = 1\n elif keep == \"mask\":\n keepIndex = 2\n else:\n raise Exception(\"The input argument \\\"keep\\\" contains an invalid value.\")\n\n newImages = []\n\n for i in range(len(images)):\n imageArray = ImageHelper.ChromaKey(images[i], lowerBounds, upperBounds, maskBlurSize, inputBoundsFormat)\n newImages.append(imageArray[keepIndex])\n\n return newImages\n\n\n @classmethod\n def ChromaKeyWithBGR(cls, image, lowerBounds, upperBounds, maskBlurSize=3):\n \"\"\"\n Splits the image into two components based on chroma keying.\n\n Parameters\n ----------\n image : image\n Image in an array.\n lowerBounds : numpy array of 3 values.\n Lower bounds of mask.\n upperBounds : numpy array of 3 values.\n Upper bounds of mask.\n maskBlurSize : int\n Size of the blur to apply to the mask. Must be an odd number.\n\n Returns\n -------\n maskedImage : image\n Part of the image that passes the mask.\n imageRemainder : image\n Part of the image that did not pass the mask.\n mask : image\n Mask used on the image.\n \"\"\"\n mask = cv2.inRange(image, lowerBounds, upperBounds)\n mask = cv2.medianBlur(mask, maskBlurSize)\n maskedImage = cv2.bitwise_and(image, image, mask=mask)\n imageRemainder = image - maskedImage\n\n return [maskedImage, imageRemainder, mask]\n\n\n @classmethod\n def ChromaKeyWithHSV(cls, image, lowerBounds, upperBounds, maskBlurSize=3):\n \"\"\"\n Splits the image into two components based on chroma keying.\n\n Parameters\n ----------\n image : image\n Image in an array.\n lowerBounds : numpy array of 3 values.\n Lower bounds of mask.\n upperBounds : numpy array of 3 values.\n Upper bounds of mask.\n maskBlurSize : int\n Size of the blur to apply to the mask. Must be an odd number.\n\n Returns\n -------\n maskedImage : image\n Part of the image that passes the mask.\n imageRemainder : image\n Part of the image that did not pass the mask.\n mask : image\n Mask used on the image.\n \"\"\"\n hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsvImage, lowerBounds, upperBounds)\n mask = cv2.medianBlur(mask, maskBlurSize)\n maskedImage = cv2.bitwise_and(image, image, mask=mask)\n imageRemainder = image - maskedImage\n\n return [maskedImage, imageRemainder, mask]", "repo_name": "lendres/Python", "sub_path": "lendres/ImageHelper.py", "file_name": "ImageHelper.py", "file_ext": "py", "file_size_in_byte": 14904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "lendres.plotting.PlotHelper.PlotHelper.Format", "line_number": 59, "usage_type": "call"}, {"api_name": "lendres.plotting.PlotHelper.PlotHelper", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 106, "usage_type": "call"}, {"api_name": "lendres.plotting.PlotHelper.PlotHelper.Format", "line_number": 109, "usage_type": "call"}, {"api_name": "lendres.plotting.PlotHelper.PlotHelper", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "cv2.split", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 213, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 217, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 243, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 247, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 251, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 404, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 405, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 406, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 437, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 437, "usage_type": "attribute"}, {"api_name": "cv2.inRange", "line_number": 438, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 439, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 440, "usage_type": "call"}]} +{"seq_id": "7189192983", "text": "import os\nfrom os.path import dirname\n\nfrom gi.repository import Adw\nfrom gi.repository import Gtk\n\nfrom .accounts_list import DharmaAccountsList\nfrom .filepath_settings import DharmaFilePathSettings\nfrom .machines_window import DharmaMachinesWindow\nfrom .guest.main import Dharma\n\n\n@Gtk.Template.from_file(dirname(__file__) + \"/gtk/window.ui\")\nclass DharmaWindow(Adw.ApplicationWindow):\n __gtype_name__ = \"DharmaWindow\"\n\n accounts_list = Gtk.Template.Child()\n qcow_file_button = Gtk.Template.Child()\n game_files_button = Gtk.Template.Child()\n run_button = Gtk.Template.Child()\n\n filepath_settings = None\n accounts_list_object = None\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n self.accounts_list_object = DharmaAccountsList(self.accounts_list, self)\n self.filepath_settings = DharmaFilePathSettings(\n self.qcow_file_button, self.game_files_button, self\n )\n self.run_button.connect(\"clicked\", self.run)\n\n def run(self, button):\n dharma_main = Dharma(\n self.accounts_list_object.accounts,\n self.filepath_settings.qcow_image_path,\n self.filepath_settings.game_folder_path,\n )\n dharma_main.run_loop(2)\n DharmaMachinesWindow(self, dharma_main.manager)", "repo_name": "wineTGH/Dharma", "sub_path": "Dharma/window.py", "file_name": "window.py", "file_ext": "py", "file_size_in_byte": 1299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gi.repository.Adw.ApplicationWindow", "line_number": 14, "usage_type": "attribute"}, {"api_name": "gi.repository.Adw", "line_number": 14, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Template.Child", "line_number": 17, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 17, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Template.Child", "line_number": 18, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template", "line_number": 18, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 18, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Template.Child", "line_number": 19, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 19, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Template.Child", "line_number": 20, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template", "line_number": 20, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 20, "usage_type": "name"}, {"api_name": "accounts_list.DharmaAccountsList", "line_number": 28, "usage_type": "call"}, {"api_name": "filepath_settings.DharmaFilePathSettings", "line_number": 29, "usage_type": "call"}, {"api_name": "guest.main.Dharma", "line_number": 35, "usage_type": "call"}, {"api_name": "machines_window.DharmaMachinesWindow", "line_number": 41, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template.from_file", "line_number": 13, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Template", "line_number": 13, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "72432043364", "text": "from sentence_transformers import SentenceTransformer\nfrom chunker import DocumentChunker\nfrom actions import (\n index_chroma_document,\n create_chroma_collection,\n index_elastic_document,\n create_elastic_index,\n)\nfrom utils import anonymize\nimport torch\n\n\nclass ChromaIndexer:\n def __init__(self, embedding_model: str, chunk_size: int, chunk_overlap: int):\n self.embedding_model = SentenceTransformer(embedding_model)\n self.embedding_model.to(\"cuda\")\n self.embedding_model.eval()\n\n self.chunker = DocumentChunker(\n chunk_size=chunk_size, chunk_overlap=chunk_overlap\n )\n\n def __embed(self, text: str):\n chunks = self.chunker.chunk(text)\n embeddings = []\n with torch.no_grad():\n embeddings = self.embedding_model.encode(chunks)\n\n embeddings = embeddings.tolist()\n return chunks, embeddings\n\n def create_index(self, name: str):\n return create_chroma_collection(name)\n\n def index(self, collection: str, doc: dict, metadata):\n chunks, embeddings = self.__embed(doc[\"text\"])\n\n metadatas = [metadata for _ in chunks]\n\n res = index_chroma_document(\n collection,\n {\n \"documents\": chunks,\n \"embeddings\": embeddings,\n \"metadatas\": metadatas,\n },\n )\n del embeddings\n del chunks\n\n return res\n\n\nclass ElasticsearchIndexer:\n def __init__(self, anonymize_type=[]):\n self.anonymize_type = anonymize_type\n\n def create_index(self, name: str):\n return create_elastic_index(name)\n\n def index(self, index: str, doc: dict):\n annotations = [\n {\n \"id\": ann[\"_id\"],\n # this will be a real ER id when it exists\n \"id_ER\": ann[\"_id\"],\n \"start\": ann[\"start\"],\n \"end\": ann[\"end\"],\n \"type\": ann[\"type\"],\n \"mention\": ann[\"features\"][\"mention\"],\n \"is_linked\": ann[\"features\"][\"url\"] != None\n and (not ann[\"features\"][\"linking\"][\"is_nil\"]),\n # this is temporary, there will be a display name directly in the annotaion object\n \"display_name\": anonymize(ann[\"features\"][\"mention\"])\n if ann[\"type\"] in self.anonymize_type\n else ann[\"features\"][\"mention\"],\n }\n for ann in doc[\"annotation_sets\"][\"entities_merged\"][\"annotations\"]\n ]\n\n metadata = [\n # for now let's make them static\n {\"type\": \"anno sentenza\", \"value\": doc[\"features\"].get(\"annosentenza\", \"\")},\n {\"type\": \"anno ruolo\", \"value\": doc[\"features\"].get(\"annoruolo\", \"\")},\n ]\n\n elastic_doc = {\n \"mongo_id\": doc[\"id\"],\n \"name\": doc[\"name\"],\n \"text\": doc[\"text\"],\n \"metadata\": metadata,\n \"annotations\": annotations,\n }\n\n return index_elastic_document(index, elastic_doc)\n", "repo_name": "marcoripa96/dave-search", "sub_path": "packages/indexer/src/indexer.py", "file_name": "indexer.py", "file_ext": "py", "file_size_in_byte": 3026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sentence_transformers.SentenceTransformer", "line_number": 15, "usage_type": "call"}, {"api_name": "chunker.DocumentChunker", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 26, "usage_type": "call"}, {"api_name": "actions.create_chroma_collection", "line_number": 33, "usage_type": "call"}, {"api_name": "actions.index_chroma_document", "line_number": 40, "usage_type": "call"}, {"api_name": "actions.create_elastic_index", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.anonymize", "line_number": 74, "usage_type": "call"}, {"api_name": "actions.index_elastic_document", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "31254567909", "text": "import wx\n\n# 自定义窗口类\nclass MyFrame(wx.Frame):\n def __init__(self):\n super().__init__(None, title=\"The first UI program of python\", size=(768, 576), pos=(100,100))\n #\n \n def OnInit(self):\n return true\n\n\n# 创建应用程序对象 \napp = wx.App()\n\n# 创建窗口对象\nfrm = MyFrame()\n\nfrm.Show()\n# 主循环\napp.MainLoop()\n\n\n", "repo_name": "jeremyjia/Games", "sub_path": "python/sample/wxPythonUI.py", "file_name": "wxPythonUI.py", "file_ext": "py", "file_size_in_byte": 372, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "wx.Frame", "line_number": 4, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "28160212399", "text": "# -*- coding: utf-8 -*-\nfrom collections import (defaultdict, deque)\n\ndef dfs_step(graph, start, end):\n \"\"\"\n DFS最小步数\n\n Parameters\n ----------\n graph : defaultdict\n 图.\n start : str\n 起点.\n end : str\n 终点.\n\n Returns\n -------\n int\n 最小步数.\n\n \"\"\"\n stack = [(start, 0)]\n visited = {start}\n \n while stack:\n node_curt, step_curt = stack.pop()\n \n for node_next in graph[node_curt]:\n if node_next == end:\n return step_curt + 1\n else:\n if node_next not in visited:\n visited.add(node_next)\n stack.append((node_next, step_curt+1))\n \n return 0\n\n\nif __name__ == \"__main__\":\n graph = defaultdict(list)\n graph[\"A\"] = [\"B\", \"C\"]\n graph[\"B\"] = [\"A\", \"D\", \"C\"]\n graph[\"C\"] = [\"A\", \"B\", \"D\", \"E\"]\n graph[\"D\"] = [\"B\", \"C\", \"F\"]\n graph[\"E\"] = [\"C\", \"D\"]\n graph[\"F\"] = [\"D\"]\n print(\"graph \\n\", graph)\n \n start = \"A\"\n end = \"E\"\n \n minimum_step = dfs_step(graph, start, end)\n print(minimum_step)\n", "repo_name": "Lukaschen1986/LeetCodeProgress", "sub_path": "graph/func_dfs_step.py", "file_name": "func_dfs_step.py", "file_ext": "py", "file_size_in_byte": 1124, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "20516061007", "text": "from IPython.core.magic import Magics, magics_class, line_cell_magic\nfrom mlnotify.mlnotify import plugin_manager\n\n# Jupyter line and cell magic\n@magics_class\nclass MLNotifyMagic(Magics):\n @line_cell_magic\n def notify(self, line, cell=None):\n plugin_manager.run_before()\n self.shell.run_cell(line)\n if cell is not None:\n self.shell.run_cell(cell)\n plugin_manager.run_after()\n\n\ndef register_jupyter_magic():\n ipython = get_ipython()\n ipython.register_magics(MLNotifyMagic)\n\n", "repo_name": "aporia-ai/mlnotify", "sub_path": "sdk/src/mlnotify/jupyter_magic.py", "file_name": "jupyter_magic.py", "file_ext": "py", "file_size_in_byte": 524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 339, "dataset": "github-code", "pt": "52", "api": [{"api_name": "IPython.core.magic.Magics", "line_number": 6, "usage_type": "name"}, {"api_name": "mlnotify.mlnotify.plugin_manager.run_before", "line_number": 9, "usage_type": "call"}, {"api_name": "mlnotify.mlnotify.plugin_manager", "line_number": 9, "usage_type": "name"}, {"api_name": "mlnotify.mlnotify.plugin_manager.run_after", "line_number": 13, "usage_type": "call"}, {"api_name": "mlnotify.mlnotify.plugin_manager", "line_number": 13, "usage_type": "name"}, {"api_name": "IPython.core.magic.line_cell_magic", "line_number": 7, "usage_type": "name"}, {"api_name": "IPython.core.magic.magics_class", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "9169775417", "text": "from typing import Union\n\nfrom django.conf import settings\nfrom django.db import models, transaction\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom hasker.users.models import User\n\n\nclass Tag(models.Model):\n\n name = models.CharField(max_length=100)\n\n def __str__(self) -> str:\n return self.name\n\n\nclass SimpleAction(models.IntegerChoices):\n\n LIKE = 1\n DISLIKE = -1\n\n\nclass AbstractQuestionAnswer(models.Model):\n\n class Meta:\n abstract = True\n\n text = models.TextField()\n creation_date = models.DateTimeField(default=timezone.now)\n\n @transaction.atomic\n def make_user_action(self, user: User, action: SimpleAction) -> None:\n make_user_action_(action_object=self, user=user, action=action)\n\n @property\n def rating(self):\n rating_aggregation_result = self.actions.aggregate(rating=models.Sum(\"action\"))[\"rating\"]\n rating = rating_aggregation_result if rating_aggregation_result is not None else 0\n return rating\n\n\nclass Question(AbstractQuestionAnswer):\n\n author = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n related_name=\"questions\",\n on_delete=models.CASCADE\n )\n title = models.CharField(max_length=300)\n\n tags = models.ManyToManyField(\n to=Tag,\n blank=True\n )\n correct_answer = models.OneToOneField(\n to=\"Answer\",\n null=True,\n blank=True,\n related_name=\"correct_answer_for\",\n on_delete=models.CASCADE\n )\n\n def get_absolute_url(self):\n return reverse(\"questions:question\", kwargs={'question_id': self.pk})\n\n @property\n def url(self):\n return self.get_absolute_url()\n\n\nclass Answer(AbstractQuestionAnswer):\n\n author = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n related_name=\"answers\",\n on_delete=models.CASCADE\n )\n question = models.ForeignKey(\n to=Question,\n related_name=\"answers\",\n on_delete=models.CASCADE\n )\n\n\nclass AbstractUserAction(models.Model):\n\n action = models.IntegerField(choices=SimpleAction.choices)\n action_date = models.DateTimeField(default=timezone.now)\n\n class Meta:\n abstract = True\n\n\nclass QuestionAction(AbstractUserAction):\n\n user = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n related_name=\"question_actions\",\n on_delete=models.CASCADE\n )\n question = models.ForeignKey(\n to=Question,\n related_name=\"actions\",\n on_delete=models.CASCADE\n )\n\n\nclass AnswerAction(AbstractUserAction):\n\n user = models.ForeignKey(\n to=settings.AUTH_USER_MODEL,\n related_name=\"answer_actions\",\n on_delete=models.CASCADE\n )\n answer = models.ForeignKey(\n to=Answer,\n related_name=\"actions\",\n on_delete=models.CASCADE\n )\n\n\ndef make_user_action_(action_object: Union[Question, Answer],\n user: User,\n action: SimpleAction) -> None:\n\n \"\"\"\n Makes action (like or dislike) on object (question or answer\n :param action_object: object to maker action on (concrete question or answer)\n :param user: user who makes action\n :param action: action that should be made (like or dislike)\n \"\"\"\n\n opposite_action = SimpleAction.DISLIKE if action == SimpleAction.LIKE else SimpleAction.LIKE\n action_object.actions.filter(\n models.Q(user_id=user.id) &\n models.Q(action=opposite_action)\n ).delete()\n\n user_action_is_already_done = action_object.actions.filter(\n models.Q(user_id=user.id) &\n models.Q(action=action)\n )\n\n if user_action_is_already_done.exists():\n user_action_is_already_done.delete()\n return\n\n action_object.actions.create(\n user_id=user.id,\n action=action\n )\n", "repo_name": "DalerBakhriev/otus_homeworks", "sub_path": "hw_week_7/hasker/hasker/questions/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 3803, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.IntegerChoices", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 31, "usage_type": "name"}, {"api_name": "hasker.users.models.User", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.db.transaction", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 47, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 80, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 90, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 103, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 113, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 115, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 117, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 117, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 120, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 124, "usage_type": "name"}, {"api_name": "hasker.users.models.User", "line_number": 125, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 137, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 137, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 138, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 142, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "18779924162", "text": "from enum import Enum\nfrom abc import ABC, abstractmethod\n\nclass Member:\n def __init__(\n self,\n hit_point: int,\n magic_point: int,\n max_hit_point: int\n ):\n self.hit_point = hit_point\n self.magic_point = magic_point\n self.max_hit_point = max_hit_point\n \n def can_act(self):\n return True\n \n def consume_magic_point(self, cost_magic_point: int):\n print(\"consume!\")\n\n def chant(self, magic):\n print(\"발동!\")\n \n def add_state(self, state: \"HealthCondition\"):\n ...\n\nclass HealthCondition(Enum):\n dead = 1\n danger = 2\n caution = 3\n fine = 4\n\n\nclass DamageType(Enum):\n hit_point = 1\n magic_point = 2\n\n\nclass Damage(ABC):\n @abstractmethod\n def execute(self, damage_amount: int) -> Member:\n ...\n\nclass HitPointDamage(Damage):\n def execute(self, member: Member, damage_amount: int) -> Member:\n member.hit_point -= damage_amount\n if 0 < member.hit_point: \n return member\n\n member.hit_point = 0\n member.add_state(HealthCondition.dead)\n\n return member\n\n\nclass MagicPointDamage(Damage):\n def execute(self, member: Member, damage_amount: int) -> Member:\n member.magic_point -= damage_amount\n if 0 < member.magic_point: \n return member\n\n member.magic_point = 0\n\n return member\n\ndamages = {\n DamageType.hit_point: HitPointDamage(), \n DamageType.magic_point: MagicPointDamage()\n}\n\ndef apply_damage(member: Member, damage_type: DamageType, damage_amount: int):\n damage = damages.get(damage_type)\n return damage.execute(member, damage_amount)\n\n\nif __name__ == \"__main__\":\n member = Member(\n hit_point=50,\n magic_point=60,\n max_hit_point=100\n )\n member = apply_damage(member, DamageType.hit_point, 30)\n print(member.hit_point)\n\n member = apply_damage(member, DamageType.hit_point, 50)\n print(member.hit_point)\n\n member = apply_damage(member, DamageType.magic_point, 50)\n print(member.magic_point)\n\n member = apply_damage(member, DamageType.magic_point, 50)\n print(member.magic_point)\n", "repo_name": "salmon131/ood", "sub_path": "ch_06/06.py", "file_name": "06.py", "file_ext": "py", "file_size_in_byte": 2173, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 27, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 34, "usage_type": "name"}, {"api_name": "abc.ABC", "line_number": 39, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "4567906518", "text": "from django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom django.utils.text import slugify\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, authentication_classes, permission_classes\n\nfrom .models import Course, Lesson, Comment, Category\nfrom .serializers import CourseListSerializer, CourseDetailSerializer, LessonListSerializer, CommentSerializer, CategorySerializer, QuizSerializer, UserSerializer\n\n@api_view(['GET'])\ndef get_quiz(request, course_slug, lesson_slug):\n lesson = Lesson.objects.get(slug=lesson_slug)\n quiz = lesson.quizzes.first()\n serializer = QuizSerializer(quiz)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@authentication_classes([])\n@permission_classes([])\ndef get_categories(request):\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@authentication_classes([])\n@permission_classes([])\ndef get_courses(request):\n category_id = request.GET.get('category_id', '')\n # courses = Course.objects.all()\n courses = Course.objects.filter(status=Course.PUBLISHED)\n \n if category_id:\n courses = courses.filter(categories__in=[int(category_id)])\n\n serializer = CourseListSerializer(courses, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@authentication_classes([])\n@permission_classes([])\ndef get_frontpage_courses(request):\n courses = Course.objects.filter(status=Course.PUBLISHED)[0:4]\n serializer = CourseListSerializer(courses, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@permission_classes([])\ndef get_course(request, slug):\n course = Course.objects.filter(status=Course.PUBLISHED).get(slug=slug)\n course_serializer = CourseDetailSerializer(course)\n lesson_serializer = LessonListSerializer(course.lessons.all(), many=True)\n print(\"COURSE SERIALIZER\")\n print(course_serializer.data)\n print(request.user.is_authenticated)\n\n if request.user.is_authenticated:\n print('user login')\n course_data = course_serializer.data\n else:\n course_data = {}\n\n data = {\n 'course': course_data,\n 'lessons': lesson_serializer.data\n }\n return Response(data)\n\n@api_view(['GET'])\ndef get_comments(request, course_slug, lesson_slug):\n lesson = Lesson.objects.get(slug=lesson_slug)\n serializer = CommentSerializer(lesson.comments.all(), many=True)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef add_comment(request, course_slug, lesson_slug):\n data = request.data\n name = data.get('name')\n content = data.get('content')\n\n course = Course.objects.get(slug=course_slug)\n lesson = Lesson.objects.get(slug=lesson_slug)\n\n comment = Comment.objects.create(course=course, lesson=lesson, name=name, content=content, created_by=request.user)\n serializer = CommentSerializer(comment)\n\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef create_course(request):\n data = request.data.get('_rawValue')\n\n course = Course.objects.create(\n title = data['title'],\n slug = slugify(data['title']),\n short_description = data['short_description'],\n long_description = data['long_description'],\n created_by = request.user\n )\n\n for id in data['categories']:\n course.categories.add(id)\n\n course.save()\n\n print(course)\n\n return Response({'Yo': 'yo'})\n\n\n@api_view(['GET'])\ndef get_author_courses(request, user_id):\n user = User.objects.get(pk=user_id)\n courses = user.courses.filter(status=Course.PUBLISHED)\n\n user_serializer = UserSerializer(user, many=False)\n courses_serializer = CourseListSerializer(courses, many=True)\n\n return Response({\n 'courses': courses_serializer.data,\n 'created_by': user_serializer.data\n })\n", "repo_name": "jcromerohdz/learning-management-system", "sub_path": "studynet_django/course/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Lesson.objects.get", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Lesson.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Lesson", "line_number": 13, "usage_type": "name"}, {"api_name": "serializers.QuizSerializer", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Category.objects.all", "line_number": 22, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 22, "usage_type": "name"}, {"api_name": "serializers.CategorySerializer", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Course.objects.filter", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Course.PUBLISHED", "line_number": 32, "usage_type": "attribute"}, {"api_name": "serializers.CourseListSerializer", "line_number": 37, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 38, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 26, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Course.objects.filter", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Course.PUBLISHED", "line_number": 44, "usage_type": "attribute"}, {"api_name": "serializers.CourseListSerializer", "line_number": 45, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 46, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.decorators.authentication_classes", "line_number": 41, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Course.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Course.PUBLISHED", "line_number": 51, "usage_type": "attribute"}, {"api_name": "serializers.CourseDetailSerializer", "line_number": 52, "usage_type": "call"}, {"api_name": "serializers.LessonListSerializer", "line_number": 53, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.decorators.permission_classes", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Lesson.objects.get", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Lesson.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.Lesson", "line_number": 72, "usage_type": "name"}, {"api_name": "serializers.CommentSerializer", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 74, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 70, "usage_type": "call"}, {"api_name": "models.Course.objects.get", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 83, "usage_type": "name"}, {"api_name": "models.Lesson.objects.get", "line_number": 84, "usage_type": "call"}, {"api_name": "models.Lesson.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.Lesson", "line_number": 84, "usage_type": "name"}, {"api_name": "models.Comment.objects.create", "line_number": 86, "usage_type": "call"}, {"api_name": "models.Comment.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "models.Comment", "line_number": 86, "usage_type": "name"}, {"api_name": "serializers.CommentSerializer", "line_number": 87, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 89, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Course.objects.create", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 95, "usage_type": "name"}, {"api_name": "django.utils.text.slugify", "line_number": 97, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 110, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 91, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Course.PUBLISHED", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 116, "usage_type": "name"}, {"api_name": "serializers.UserSerializer", "line_number": 118, "usage_type": "call"}, {"api_name": "serializers.CourseListSerializer", "line_number": 119, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "171870685", "text": "from __future__ import absolute_import, print_function\n\nimport copy\nimport os\nimport logging\nimport random\nimport sys\nimport subprocess\nimport six\nimport string\nfrom . import util\n\nfrom . import util\n\ndef add_entrypoint_if_needed(cmd, host_name, container_host_name):\n from . import myhosts\n script = myhosts.get_startup_script(container_host_name)\n\n if not script:\n return\n \n # Write the commands in a temporary file\n random_string = util.get_random_string()\n tmp_dir = \"/tmp\"\n path = \"{}/gae123_com_{}_{}\".format(tmp_dir, container_host_name, random_string)\n with open(path, \"w\") as file_desc:\n file_desc.write(\"#!/usr/bin/env bash\\n\")\n file_desc.write(script)\n\n os.system(\"chmod +x {path} && \\\n scp {path} {host}:{path} && \\\n rm -f {path}\".format(path=path, host=host_name))\n\n # Mound the temp file in the container and make it they entrypoint\n dest_path = path\n cmd.append(\"--mount type=bind,source={},destination={}\".format(path, dest_path))\n cmd.append(\"--entrypoint={}\". format(dest_path))\n\n\ndef create_start_docker(host_name, container_host_name, dryrun=False):\n from . import myhosts, gen_interfaces\n if not dryrun:\n gen_interfaces.gen_interfaces(host_name)\n cmd = []\n ns_host_auth = myhosts.get_run_before(container_host_name)\n if ns_host_auth:\n cmd.extend([\n ns_host_auth,\n \";\"\n ])\n \n ns_container_ip = myhosts.get_container_ip(container_host_name)\n ns_container_host_ip = myhosts.get_container_host_ip(container_host_name)\n sshport = 22 if util.is_localhost(host_name) else 2222\n ports = copy.copy(myhosts.get_container_ports(container_host_name))\n ports.append(\"{}:22\".format(sshport))\n ns_host_ports = ' '.join(['{}:{}'.format(ns_container_ip,x) for x in ports])\n ns_extra_args = myhosts.get_container_extra_flags(container_host_name)\n ns_aliases = myhosts.get_container_aliases(container_host_name)\n ns_hostAliases = myhosts.get_container_hostAliases(container_host_name)\n\n cmd.extend([\n 'sudo',\n 'docker',\n 'run',\n '--detach',\n '--privileged',\n \"--name={sd2ContainerName}\",\n \"--hostname={sd2ContainerName}\",\n ])\n for alias in ns_aliases:\n cmd.append(\"--add-host='{alias}:{ip}'\".format(alias=alias, ip=ns_container_ip))\n for alias in ns_hostAliases:\n cmd.append(\"--add-host='{alias}:{ip}'\".format(alias=alias, ip=ns_container_host_ip))\n\n if myhosts.get_container_mount_home_dir(container_host_name):\n cmd.extend([\"--volume\", \"$HOME:/home/$USER\"])\n if util.remote_path_exists(host_name, '/mnt'):\n cmd.extend(['--volume', '/mnt:/mnt'])\n\n for ports in ns_host_ports.split():\n cmd.append('--publish')\n cmd.append(ports)\n \n env = {\n \"SD2IP\": ns_container_ip\n }\n env.update()\n for var in myhosts.get_container_env(container_host_name):\n env[var['name']] = var['value']\n for kk,vv in six.iteritems(env):\n cmd.append(\"--env\")\n cmd.append(\"{}={}\".format(kk,vv))\n\n cmd.extend(['--workdir', \"/home/$USER\"])\n cmd.append('--tty')\n\n add_entrypoint_if_needed(cmd, host_name, container_host_name)\n if isinstance(ns_extra_args, six.string_types):\n ns_extra_args.split()\n for arg in ns_extra_args.split():\n cmd.append(arg)\n cmd.append(myhosts.get_container_docker_image(container_host_name))\n cmd.append(myhosts.get_container_command(container_host_name))\n\n command = ' '.join(cmd)\n command = command.format(sd2ContainerName=container_host_name)\n if (not dryrun):\n logging.info(\"EXEC %s\", command)\n util.remote_system(host_name, command)\n from .events import events\n events.emit({\"hostname\": container_host_name, \"action\": \"start\"})\n else:\n print(command)\n\ndef find_id_of_container(host_name, container_name):\n containers = util.remote_subprocess_check_output(host_name,\n [\"sudo\", \"docker\", \"ps\", \"-qa\"])\n cmd = [\n \"sudo\", \"docker\", \"inspect\", \"-f\",\n '{{.Id}},{{.Config.Hostname}}',\n ] + containers.split()\n inspout = util.remote_subprocess_check_output(host_name, cmd)\n ids = [line.split(',')[0] for line in inspout.split() if\n line.split(',')[1].startswith(container_name)]\n return None if not ids else ids[0]\n\ndef remove_container_by_hostname(host_name, container_host_name, dryrun):\n contid = find_id_of_container(host_name, container_host_name)\n cmd = \"sudo docker rm -f \" + contid\n if not dryrun:\n logging.debug(\"EXEC: {}\".format(cmd))\n util.remote_system(host_name, cmd)\n else:\n print(cmd)\n\ndef create_start_docker_if_needed(host_name, container_host_name, dryrun):\n rr = False\n from . import myhosts\n running = None\n containers = util.remote_subprocess_check_output(host_name,\n [\"sudo\", \"docker\", \"ps\", \"-qa\"])\n if containers:\n cmd = [\n \"sudo\", \"docker\", \"inspect\", \"-f\",\n '{{.State.Running}},{{.Config.Hostname}},{{.Config.Image}}',\n ] + containers.split()\n inspout = util.remote_subprocess_check_output(host_name, cmd)\n running = [line.split(',')[0] for line in inspout.split() if\n line.split(',')[1] == container_host_name]\n image = [line.split(',')[2] for line in inspout.split() if\n line.split(',')[1] == container_host_name]\n\n create_new_one = False\n if running == ['true']:\n remove = False\n if (image[0] == myhosts.get_container_docker_image(container_host_name) and\n myhosts.is_container_enabled(container_host_name)):\n print((container_host_name + ': Found running...'))\n else:\n if (not myhosts.is_container_enabled(container_host_name)):\n print((container_host_name + ': Found running when it should not.'))\n reason = \" because it should not be running\"\n remove = myhosts.get_container_remove_flag(container_host_name)\n else:\n print((container_host_name + ': Found running a different image {}.'.format(image)))\n reason = 'to start one with the right image {}'.format(\n myhosts.get_container_docker_image(container_host_name))\n remove = myhosts.get_container_upgrade_flag(container_host_name)\n if remove:\n print(('{}: Removing container {}'.format(container_host_name, reason)))\n remove_container_by_hostname(host_name, container_host_name, dryrun)\n create_new_one = myhosts.is_container_enabled(container_host_name)\n elif running == ['false']:\n create_new_one = myhosts.is_container_enabled(container_host_name)\n print((container_host_name + \n ': Found stopped and removing. ({})'.format(create_new_one)))\n remove_container_by_hostname(host_name, container_host_name, dryrun)\n else:\n create_new_one = myhosts.is_container_enabled(container_host_name)\n print((container_host_name + \n ': Not Found. ({})'.format('NOT OK' if create_new_one else 'OK')))\n if create_new_one:\n print((container_host_name + ' Creating a new one...'))\n create_start_docker(host_name, container_host_name, dryrun)\n rr = True\n cmd = 'sudo docker exec -i -t {} su - {}'.format(\n container_host_name, os.getenv('USER'))\n print((\"Attach by running: '{}'\".format(cmd)))\n return rr\n \n\ndef do_containers(host, containers, force, dryrun):\n if force:\n for cont in containers:\n cmd = \"sudo docker rm -f \" + cont\n if not dryrun:\n logging.info(\"EXEC: \" + cmd)\n util.remote_system(host, cmd)\n else:\n print(cmd)\n ret = False\n for cont in containers:\n ret0 = create_start_docker_if_needed(host, cont, dryrun)\n ret = ret or ret0\n return ret\n\ndef check_for_prereqs(args):\n tools_found = True\n tools = [\n ['ssh', 'Please install ssh and restart..'],\n ['rsync', 'Please install rsync and restart..'],\n ]\n if args.fswatch:\n tools.append(['fswatch', 'Please install fswatch and restart..'])\n for tool in tools:\n try:\n rr = subprocess.check_output(\"type {}\".format(tool[0]), shell=True)\n except Exception as ex:\n sys.stdout.write(\"{}\\n\".format(tool[1]))\n tools_found = False\n if not tools_found:\n sys.exit(1)\n \n \ndef main(args):\n from . import gen_all, myhosts\n if not args.noinit:\n gen_all.gen_all()\n \n containers = []\n our_hostname = util.get_our_hostname()\n if not args.hostname:\n args.hostname = our_hostname\n if not args.containers and args.all:\n containers = myhosts.get_container_names(args.hostname)\n else:\n containers = args.containers\n \n # if the user just passed 0 convert it too hostname-0\n containers = [(x if x.startswith(args.hostname)\n else (args.hostname + '-' + x)) for x in containers]\n ret = do_containers(args.hostname, containers, args.force, args.dryrun)\n \n logging.debug(\"sd2cont: Considered %s on %s\", containers, args.hostname)\n \n # 0 means it did start one or more\n sys.exit(0 if ret else 1)\n \n \ndef add_argument_parsing(subparsers):\n parser_cont = subparsers.add_parser(\n 'cont',\n description='Start containers in hosts'\n )\n parser_cont.add_argument('--noinit', '-i', action=\"store_true\",\n default=False,\n help=\"do not attempty to initialize /etc/hosts etc\")\n parser_cont.add_argument('--force', '-f', action=\"store_true\",\n default=False,\n help=\"first remove existing container if exists\")\n \n parser_cont.add_argument('--all', '-a', action=\"store_true\", default=False,\n help=\"Used to restart all containers in this host\")\n parser_cont.add_argument(\"hostname\", nargs=\"?\",\n help=\"hostname where to start the images\",\n default='')\n parser_cont.add_argument(\n \"containers\", nargs=\"*\",\n help=\"containers to start, leave it empty and pass --all to restart all of them\",\n default='')\n parser_cont.set_defaults(func=main, logging=True)\n\n\n\n\n\n\n\n\n", "repo_name": "gae123/sd2", "sub_path": "src/lib/sd2/sd2cont.py", "file_name": "sd2cont.py", "file_ext": "py", "file_size_in_byte": 10468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.system", "line_number": 30, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 55, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 91, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 99, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "events.events.emit", "line_number": 112, "usage_type": "call"}, {"api_name": "events.events", "line_number": 112, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 132, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 188, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 198, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 218, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 220, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 220, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 223, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 245, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "16296688405", "text": "from msrest.service_client import ServiceClient\nfrom msrest import Serializer, Deserializer\nfrom msrestazure import AzureConfiguration\nfrom .version import VERSION\n\n\nclass StorageManagementClientConfiguration(AzureConfiguration):\n \"\"\"Configuration for StorageManagementClient\n Note that all parameters used to create this instance are saved as instance\n attributes.\n\n :param credentials: Credentials needed for the client to connect to Azure.\n :type credentials: :mod:`A msrestazure Credentials\n object`\n :param subscription_id: Gets subscription credentials which uniquely\n identify the Microsoft Azure subscription. The subscription ID forms part\n of the URI for every service call.\n :type subscription_id: str\n :param str base_url: Service URL\n \"\"\"\n\n def __init__(\n self, credentials, subscription_id, base_url=None):\n\n if credentials is None:\n raise ValueError(\"Parameter 'credentials' must not be None.\")\n if subscription_id is None:\n raise ValueError(\"Parameter 'subscription_id' must not be None.\")\n if not isinstance(subscription_id, str):\n raise TypeError(\"Parameter 'subscription_id' must be str.\")\n if not base_url:\n base_url = 'https://management.azure.com'\n\n super(StorageManagementClientConfiguration, self).__init__(base_url)\n\n self.add_user_agent('storagemanagementclient/{}'.format(VERSION))\n self.add_user_agent('Azure-SDK-For-Python')\n\n self.credentials = credentials\n self.subscription_id = subscription_id\n\n\nclass StorageManagementClient(object):\n \"\"\"The Azure Storage Management API.\n\n :ivar config: Configuration for client.\n :vartype config: StorageManagementClientConfiguration\n\n :param credentials: Credentials needed for the client to connect to Azure.\n :type credentials: :mod:`A msrestazure Credentials\n object`\n :param subscription_id: Gets subscription credentials which uniquely\n identify the Microsoft Azure subscription. The subscription ID forms part\n of the URI for every service call.\n :type subscription_id: str\n :param str base_url: Service URL\n \"\"\"\n\n DEFAULT_API_VERSION = '2017-06-01'\n\n def __init__(\n self, credentials, subscription_id, api_version=DEFAULT_API_VERSION, base_url=None):\n\n self.config = StorageManagementClientConfiguration(credentials, subscription_id, base_url)\n self._client = ServiceClient(self.config.credentials, self.config)\n\n client_models = {k: v for k, v in self.models(api_version).__dict__.items() if isinstance(v, type)}\n self.api_version = api_version\n self._serialize = Serializer(client_models)\n self._deserialize = Deserializer(client_models)\n\n @classmethod\n def models(cls, api_version=DEFAULT_API_VERSION):\n \"\"\"Module depends on the API version:\n\n * 2015-06-15: :mod:`v2015_06_15.models`\n * 2016-12-01: :mod:`v2016_12_01.models`\n * 2017-06-01: :mod:`v2017_06_01.models`\n \"\"\"\n if api_version == '2015-06-15':\n from .v2015_06_15 import models\n return models\n elif api_version == '2016-12-01':\n from .v2016_12_01 import models\n return models\n elif api_version == '2017-06-01':\n from .v2017_06_01 import models\n return models\n raise NotImplementedError(\"APIVersion {} is not available\".format(api_version))\n\n @property\n def operations(self):\n \"\"\"Instance depends on the API version:\n\n * 2017-06-01: :class:`Operations`\n \"\"\"\n if self.api_version == '2017-06-01':\n from .v2017_06_01.operations import Operations as OperationClass\n else:\n raise NotImplementedError(\"APIVersion {} is not available\".format(self.api_version))\n return OperationClass(self._client, self.config, self._serialize, self._deserialize)\n\n @property\n def storage_accounts(self):\n \"\"\"Instance depends on the API version:\n\n * 2015-06-15: :class:`StorageAccountsOperations`\n * 2016-12-01: :class:`StorageAccountsOperations`\n * 2017-06-01: :class:`StorageAccountsOperations`\n \"\"\"\n if self.api_version == '2015-06-15':\n from .v2015_06_15.operations import StorageAccountsOperations as OperationClass\n elif self.api_version == '2016-12-01':\n from .v2016_12_01.operations import StorageAccountsOperations as OperationClass\n elif self.api_version == '2017-06-01':\n from .v2017_06_01.operations import StorageAccountsOperations as OperationClass\n else:\n raise NotImplementedError(\"APIVersion {} is not available\".format(self.api_version))\n return OperationClass(self._client, self.config, self._serialize, self._deserialize)\n\n @property\n def usage(self):\n \"\"\"Instance depends on the API version:\n\n * 2015-06-15: :class:`UsageOperations`\n * 2016-12-01: :class:`UsageOperations`\n * 2017-06-01: :class:`UsageOperations`\n \"\"\"\n if self.api_version == '2015-06-15':\n from .v2015_06_15.operations import UsageOperations as OperationClass\n elif self.api_version == '2016-12-01':\n from .v2016_12_01.operations import UsageOperations as OperationClass\n elif self.api_version == '2017-06-01':\n from .v2017_06_01.operations import UsageOperations as OperationClass\n else:\n raise NotImplementedError(\"APIVersion {} is not available\".format(self.api_version))\n return OperationClass(self._client, self.config, self._serialize, self._deserialize)\n", "repo_name": "EnjoyLifeFund/macHighSierra-cellars", "sub_path": "azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/storage/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "msrestazure.AzureConfiguration", "line_number": 7, "usage_type": "name"}, {"api_name": "version.VERSION", "line_number": 36, "usage_type": "argument"}, {"api_name": "msrest.service_client.ServiceClient", "line_number": 65, "usage_type": "call"}, {"api_name": "msrest.Serializer", "line_number": 69, "usage_type": "call"}, {"api_name": "msrest.Deserializer", "line_number": 70, "usage_type": "call"}, {"api_name": "v2015_06_15.models", "line_number": 82, "usage_type": "name"}, {"api_name": "v2016_12_01.models", "line_number": 85, "usage_type": "name"}, {"api_name": "v2017_06_01.models", "line_number": 88, "usage_type": "name"}, {"api_name": "v2017_06_01.operations.Operations", "line_number": 101, "usage_type": "call"}, {"api_name": "v2017_06_01.operations.StorageAccountsOperations", "line_number": 119, "usage_type": "call"}, {"api_name": "v2017_06_01.operations.UsageOperations", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "4657367763", "text": "import numpy as np\nimport pytest\nfrom bamot.core.base_types import (CameraParameters, Landmark, ObjectTrack,\n Observation, StereoCamera)\nfrom bamot.core.optimization import object_bundle_adjustment\nfrom bamot.util.cv import from_homogeneous, project, to_homogeneous\n\nRNG = np.random.default_rng()\n\n\n@pytest.fixture\ndef camera_params():\n return CameraParameters(fx=500, fy=500, cx=320, cy=240)\n\n\n@pytest.fixture\ndef T_left_right():\n T = np.identity(4)\n T[3, 3] = 0.03 # baseline of 3 cms\n\n\n@pytest.fixture\ndef stereo_camera(camera_params, T_left_right):\n return StereoCamera(\n left=camera_params, right=camera_params, T_left_right=T_left_right\n )\n\n\n@pytest.fixture\ndef steps():\n return 10\n\n\n@pytest.fixture(scope=\"module\")\ndef object_points():\n # landmarks are w.r.t. object coordinate system (which is identical to first camera seeing object)\n # sample 500 points uniformly around (x, y, z) = (1, 1, 3) with max dimensions of 3m around center\n # add landmarks\n pts_3d = RNG.uniform(-1.5, 1.5, (500, 3)) + np.array([1, 1, 3])\n return pts_3d\n\n\n@pytest.fixture\ndef object_poses(steps):\n poses = {}\n pose = np.identity(4)\n poses[0] = pose\n motion_transform = np.identity(4)\n motion_translation = np.array([0.05, 0.01, 0.1])\n motion_transform[:3, 3] = motion_translation\n for i in range(1, steps - 1):\n pose = motion_transform @ poses[i - 1]\n poses[i] = pose\n return poses\n\n\n@pytest.fixture\ndef landmarks(object_points, object_poses, camera_params):\n # go through all object points\n lms = {}\n for i, pt_obj in enumerate(object_points):\n # for every object pose, decide whether point was seen\n # transform point to current pose\n # project onto camera + add some noise\n observations = []\n for pose_id, pose in object_poses.items():\n if RNG.normal(0.8, 0.5) <= 0.5:\n # point isn't observed at this pose\n continue\n pt_3d = from_homogeneous(pose @ to_homogeneous(pt_obj))\n pt_2d = (project(camera_params, pt_3d) + RNG.random() * 2).reshape(\n (2,)\n ) # up to two pixel error\n observations.append(\n Observation(descriptor=None, pt_2d=pt_2d, timecam_id=(pose_id, 0))\n )\n landmark = Landmark(pt_obj, observations)\n lms[i] = landmark\n return lms\n\n\n@pytest.fixture\ndef object_track(landmarks, object_poses):\n return ObjectTrack(\n landmarks=landmarks,\n current_pose=list(object_poses.values())[-1],\n poses=object_poses,\n velocity=np.array([0.0, 0.0, 0.0]),\n active=True,\n )\n\n\ndef test_object_bundle_adjustment(object_track, object_poses, stereo_camera):\n updated_object_track = object_bundle_adjustment(\n object_track=object_track, all_poses=object_poses, stereo_cam=stereo_camera\n )\n assert False\n\n\n# TODO:\n# Create same test but with example optimization and see if it converges\n", "repo_name": "AnselmC/bamot", "sub_path": "bamot/core/test_optimization.py", "file_name": "test_optimization.py", "file_ext": "py", "file_size_in_byte": 3022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.default_rng", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "bamot.core.base_types.CameraParameters", "line_number": 13, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.identity", "line_number": 18, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "attribute"}, {"api_name": "bamot.core.base_types.StereoCamera", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.identity", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 43, "usage_type": "attribute"}, {"api_name": "bamot.util.cv.from_homogeneous", "line_number": 70, "usage_type": "call"}, {"api_name": "bamot.util.cv.to_homogeneous", "line_number": 70, "usage_type": "call"}, {"api_name": "bamot.util.cv.project", "line_number": 71, "usage_type": "call"}, {"api_name": "bamot.core.base_types.Observation", "line_number": 75, "usage_type": "call"}, {"api_name": "bamot.core.base_types.Landmark", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 57, "usage_type": "attribute"}, {"api_name": "bamot.core.base_types.ObjectTrack", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 82, "usage_type": "attribute"}, {"api_name": "bamot.core.optimization.object_bundle_adjustment", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "31913833016", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 31 20:57:58 2017\n\n@author: Rachana\n\"\"\"\n\nimport tweepy\nfrom textblob import TextBlob\nfrom accesscodes import *\n\nauthorization = tweepy.OAuthHandler(consuer_key,consumer_secrect)\nauthorization.set_access_token(access_token,access_token_screct)\n\napi = tweepy.API(authorization)\n\ndef userinput():\n topic = input('Enter a topic: ')\n public_tweet = api.search(topic)\n return public_tweet\n \ndef analysis():\n pub = userinput()\n for tweets in pub:\n print(tweets.text)\n ana = TextBlob(tweets.text)\n print(ana.sentiment)\n\n\n\nanalysis()\n", "repo_name": "Prachana/twitter_sentiment", "sub_path": "twitter_analysis.py", "file_name": "twitter_analysis.py", "file_ext": "py", "file_size_in_byte": 632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 16, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "17563724231", "text": "import jieba\n\nwith open('145.txt', 'r', encoding='utf-8') as f:\n gov = f.read()\njieba.load_userdict(\"AIDict.txt\")\nseg_list = jieba.cut(gov, cut_all=False)\n#print(list(seg_list))\n\ntf = {}\nfor seg in seg_list:\n if seg in tf:\n tf[seg] += 1\n else:\n tf[seg] = 1\nprint(len(tf))\n\nci = list(tf.keys())\n# print(ci)\nwith open('stopword.txt', 'r',encoding='utf-8') as ft:\n stopword = ft.read()\n\nfor seg in ci:\n if tf[seg] < 10 or len(seg) < 2 or seg in stopword or '一' in seg:\n tf.pop(seg)\n\nprint(len(tf))\nci = list(tf.keys())\nnum = list(tf.values())\ndata = []\n\nfor i in range(len(tf)):\n data.append((num[i], ci[i]))\n\ndata.sort()\ndata.reverse()\n# print(data)\nf = open(\"result.txt\", \"w\",encoding='utf-8')\nfor i in range(len(data)):\n f.write(data[i][1] + \",\" + str(data[i][0]) + \"\\r\\n\")\nf.close()\n\nsortdata = {}\nfor d in data:\n sortdata[d[1]] = d[0]\nprint(sortdata)", "repo_name": "5ycrow/2021-yuqingfenxi-code", "sub_path": "Jieba145/jieba145.py", "file_name": "jieba145.py", "file_ext": "py", "file_size_in_byte": 899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "jieba.load_userdict", "line_number": 5, "usage_type": "call"}, {"api_name": "jieba.cut", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "9016682138", "text": "import numpy as np\r\nnp.set_printoptions(precision = 4, linewidth = 150)\r\nimport matplotlib.pyplot as plt\r\n\r\nd = lambda x,T,L,A,E: - T*(x**3 - L**3)/(6*A*E)\r\nf = lambda x,T,A: - T*(x**2)/(2*A)\r\n\r\nLargo = 1.50\r\nT = -200000\r\nelementos = 10\r\nA = 0.001\r\nE = 210e9\r\n\r\nDesplazamientos = np.zeros(elementos + 1)\r\nFuerzas = np.zeros(elementos + 1)\r\n\r\nVectLong = np.linspace(0, Largo, elementos + 1)\r\n\r\nKGlob = np.zeros((elementos + 1, elementos + 1))\r\nKloc = np.eye(2)\r\nKloc[0,1] = -1\r\nKloc[1,0] = -1\r\nTensión = []\r\nFtot = T*(VectLong[1]**2 - VectLong[0]**2)/2\r\nFvec = np.zeros(2)\r\nCentroElem = []\r\n\r\nfor i in range(elementos):\r\n\r\n\tFuerzas[i] += Ftot/3 + (VectLong[i + 1] - VectLong[i])*T*VectLong[i]/2\r\n\tFuerzas[i+1] += Ftot*2/3 + (VectLong[i + 1] - VectLong[i])*T*VectLong[i]/2\r\n\r\n\tK = E*A/(VectLong[i + 1] - VectLong[i])\r\n\tKGlob[i : i + 2, i : i + 2] += Kloc*K\r\n\r\nDesplazamientos[0:elementos] = np.linalg.solve(KGlob[np.ix_(np.arange(elementos),\\\r\n\tnp.arange(elementos))], Fuerzas[np.arange(elementos)])\r\nRespuesta = np.dot(KGlob[elementos,np.arange(elementos + 1)], Desplazamientos) - Fuerzas[elementos]\r\n\r\nfor i in range(elementos):\r\n\tTensión.append(E*np.array(\r\n\t\t[-1/(VectLong[i+1] - VectLong[i]), 1/(VectLong[i+1] - VectLong[i])]).dot(Desplazamientos[i:i+2]\r\n\t\t\t\t\t\t\t\t))\r\n\tCentroElem.append((VectLong[i + 1] + VectLong[i])/2)\r\n\r\nplt.figure(figsize = (12,8))\r\nplt.plot(np.linspace(0, Largo, 100), d(np.linspace(0, Largo, 100),T,Largo,A,E),'r', label = 'Ecuación')\r\nplt.plot(np.linspace(0, Largo, elementos + 1), Desplazamientos, label = 'Aproximación')\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.figure(figsize = (12,8))\r\nplt.plot(np.linspace(0, Largo, 100), f(np.linspace(0, Largo, 100),T,A),'r', label = 'Ecuación')\r\nplt.plot(CentroElem, Tensión, 's', label = 'Aproximación')\r\nplt.step(CentroElem, Tensión, 'k', where = 'mid', label = 'Step')\r\nplt.legend()\r\nplt.show()\r\n\r\n#Cada elemento tiene tensiones constantes Sigma = E * [-1/L 1/L]*[d1, d2],son constantes porque \r\n# uso una aproximación tal que la tensión dentro de cada elemento es constante\r\n#\r\n#TENY = np.vstack( ([0.0], self.Sig) )\r\n# plt.step(self.MN[:, 0], TENY, 'r',\r\n# where='pre', label='Solución Numérica')", "repo_name": "Modelizacion-de-Materiales/Modelizacion-2021-Ingenieria", "sub_path": "Consultas/ConsultasManuel/Guía2/Ejercicio3.py", "file_name": "Ejercicio3.py", "file_ext": "py", "file_size_in_byte": 2195, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.set_printoptions", "line_number": 2, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.ix_", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.step", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "22001349977", "text": "\"\"\" Unit tests for classes in the optimisers.batch module, including\nFullTrainingSet, ConstantBatchSize, and DynamicBatchSize \"\"\"\nimport os\nimport numpy as np\nimport pytest\nimport optimisers, data, models\nfrom .util import (\n get_output_dir,\n set_random_seed_from_args,\n get_random_dataset,\n get_random_network,\n)\n\n# Get name of output directory, and create it if it doesn't exist\noutput_dir = get_output_dir(\"Batch sizes\")\n\n@pytest.mark.parametrize(\"repeat\", range(3))\ndef test_full_training_set_batch(repeat):\n \"\"\" Test using the full training set as a batch with the\n optimisers.batch.FullTrainingSet class \"\"\"\n set_random_seed_from_args(\"test_full_training_set_batch\", repeat)\n dataset = get_random_dataset()\n batch_getter = optimisers.batch.FullTrainingSet()\n x_batch, y_batch = batch_getter.get_batch(dataset.train)\n assert x_batch.shape == dataset.train.x.shape\n assert y_batch.shape == dataset.train.y.shape\n\n@pytest.mark.parametrize(\"repeat\", range(3))\ndef test_constant_batch_size(repeat):\n \"\"\" Test using a constant batch size with the\n optimisers.batch.ConstantBatchSize class \"\"\"\n set_random_seed_from_args(\"test_constant_batch_size\", repeat)\n dataset = get_random_dataset()\n batch_size = np.random.randint(10, 20)\n batch_getter = optimisers.batch.ConstantBatchSize(batch_size)\n x_batch, y_batch = batch_getter.get_batch(dataset.train)\n assert x_batch.shape == (dataset.train.x.shape[0], batch_size)\n assert y_batch.shape == (dataset.train.y.shape[0], batch_size)\n\ndef test_constant_batch_size_non_integer_fail():\n \"\"\" Test that using a non-integer batch size raises an exception when using\n the optimisers.batch.ConstantBatchSize class \"\"\"\n batch_size = 3.7\n with pytest.raises(TypeError):\n batch_getter = optimisers.batch.ConstantBatchSize(batch_size)\n", "repo_name": "jakelevi1996/backprop2", "sub_path": "Tests/test_batch_sizes.py", "file_name": "test_batch_sizes.py", "file_ext": "py", "file_size_in_byte": 1849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "util.get_output_dir", "line_number": 15, "usage_type": "call"}, {"api_name": "util.set_random_seed_from_args", "line_number": 21, "usage_type": "call"}, {"api_name": "util.get_random_dataset", "line_number": 22, "usage_type": "call"}, {"api_name": "optimisers.batch.FullTrainingSet", "line_number": 23, "usage_type": "call"}, {"api_name": "optimisers.batch", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}, {"api_name": "util.set_random_seed_from_args", "line_number": 32, "usage_type": "call"}, {"api_name": "util.get_random_dataset", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 34, "usage_type": "attribute"}, {"api_name": "optimisers.batch.ConstantBatchSize", "line_number": 35, "usage_type": "call"}, {"api_name": "optimisers.batch", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 44, "usage_type": "call"}, {"api_name": "optimisers.batch.ConstantBatchSize", "line_number": 45, "usage_type": "call"}, {"api_name": "optimisers.batch", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "13454439880", "text": "#!/usr/bin/env python \n# _*_ coding:utf-8 _*_ \n# \n# @Version : 1.0 \n# @Time : 2020/3/18\n# @Author : 圈圈烃\n# @File : parserBodyData\n# @Description:\n#\n#\nfrom bs4 import BeautifulSoup\nimport json\n\n\ndef parserBody():\n \"\"\"解析页面数据\"\"\"\n bodyData = {\"name\": \"e-human-adult-male-body\"}\n bodyData[\"groups\"] = []\n with open(\"../templates/MaleBodyApp/female.html\", \"r\", encoding=\"utf-8\") as f:\n html = f.read()\n soup = BeautifulSoup(html, \"html.parser\")\n svg = soup.find('svg')\n # 未分组\n ngpaths = svg.find_all(\"path\", recursive=False)\n for path in ngpaths:\n group = {\"elements\": []}\n ele = {}\n ele[\"type\"] = \"path\"\n try:\n ele[\"d\"] = path[\"d\"]\n except:\n ele[\"d\"] = \"\"\n ele[\"id\"] = path[\"id\"]\n ele[\"fill\"] = path[\"fill\"]\n ele[\"style\"] = path[\"style\"]\n ele[\"stroke\"] = path[\"stroke\"]\n group[\"resource\"] = \"useless\"\n try:\n ele[\"datagroup\"] = path[\"data-group\"]\n except:\n ele[\"datagroup\"] = \"\"\n try:\n ele[\"strokewidth\"] = path[\"stroke-width\"]\n except:\n ele[\"strokewidth\"] = \"\"\n group[\"elements\"].append(ele)\n bodyData[\"groups\"].append(group)\n # 有分组\n gs = svg.find_all('g')\n for g in gs:\n group = {\"elements\": []}\n try:\n group[\"data-group\"] = g[\"data-group\"]\n except:\n group[\"data-group\"] = \"\"\n try:\n group[\"resource\"] = g[\"resource\"]\n except:\n group[\"resource\"] = \"\"\n paths = g.find_all()\n for path in paths:\n ele = {}\n if path.name == \"g\":\n pass\n else:\n if path.name == \"path\":\n ele[\"type\"] = \"path\"\n try:\n ele[\"d\"] = path[\"d\"]\n except:\n ele[\"d\"] = \"\"\n elif path.name == \"circle\":\n ele[\"type\"] = \"circle\"\n ele[\"cx\"] = path[\"cx\"]\n ele[\"cy\"] = path[\"cy\"]\n ele[\"r\"] = path[\"r\"]\n elif path.name == \"ellipse\":\n ele[\"type\"] = \"ellipse\"\n ele[\"cx\"] = path[\"cx\"]\n ele[\"cy\"] = path[\"cy\"]\n ele[\"rx\"] = path[\"rx\"]\n ele[\"ry\"] = path[\"ry\"]\n else:\n pass\n # 公共属性\n ele[\"id\"] = path[\"id\"]\n ele[\"fill\"] = path[\"fill\"]\n ele[\"style\"] = path[\"style\"]\n ele[\"stroke\"] = path[\"stroke\"]\n try:\n ele[\"datagroup\"] = path[\"data-group\"]\n except:\n ele[\"datagroup\"] = \"\"\n try:\n ele[\"strokewidth\"] = path[\"stroke-width\"]\n except:\n ele[\"strokewidth\"] = \"\"\n group[\"elements\"].append(ele)\n bodyData[\"groups\"].append(group)\n # print(svg)\n # print(bodyData)\n with open(\"../static/MaleBodyApp/assets/illustrations/\" + bodyData[\"name\"] + \".json\", 'w', encoding=\"utf-8\") as fw:\n json.dump(bodyData, fw, ensure_ascii=\"utf-8\")\n\n\nif __name__ == '__main__':\n parserBody()\n", "repo_name": "StuPeter/HumanBodyWiki", "sub_path": "MaleBodyApp/libs/parserBodyData.py", "file_name": "parserBodyData.py", "file_ext": "py", "file_size_in_byte": 3317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "27785371752", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n Figshare\n\n robots.txt as of 02/15/2020\n https://figshare.com/robots.txt\n\n User-agent: *\n Disallow: /search\n Sitemap: https://figshare.com/sitemap/siteindex.xml\n\n Required to configure es:\n\n PUT figshare_api\n {\n \"mappings\": {\n \"enabled\": true,\n \"dynamic_templates\": [\n {\n \"published_date_fild\": {\n \"match_mapping_type\": \"date\",\n \"mapping\": {\n \"type\": \"date\"\n }\n }\n },\n {\n \"object_fields\": {\n \"match_mapping_type\": \"object\",\n \"mapping\": {\n \"enabled\": false\n }\n }\n },\n {\n \"all_other_fields\": {\n \"match_mapping_type\": \"*\",\n \"mapping\": {\n \"index\": false\n }\n }\n }\n ]\n }\n }\n\n\"\"\"\nimport json\nimport logging\nimport os\n\nimport scrapy\nfrom elasticsearch import Elasticsearch\n\nfrom ..helper import JsonLdMixin\n\n\nclass FigshareAPISpider(scrapy.Spider):\n \"\"\"\n\n May need to run multiple times\n\n Example field:\n \"published_date\": \"1996-01-10T00:00:00Z\"\n \"\"\"\n\n name = 'figshare_api'\n base_url = 'https://api.figshare.com/v2/articles?'\n client = Elasticsearch(os.getenv('ES_HOST', 'localhost:9200'))\n query_params = {\n \"page_size\": \"1000\",\n \"order\": \"published_date\",\n \"order_direction\": \"asc\",\n \"item_type\": \"3\" # dataset type\n }\n\n def form_url(self, **kwargs):\n\n params = ['='.join(item_pair) for item_pair in self.query_params.items()]\n url = self.base_url + '&'.join(params)\n for key, value in kwargs.items():\n if value:\n url += f'&{key}={value}'\n return url\n\n def start_requests(self):\n\n res = self.client.indices.get_mapping(index=self.name)\n mapping = res[self.name]['mappings']\n published_date = mapping.get('_meta', {}).get('published_date', '')\n url = self.form_url(published_since=published_date)\n return [scrapy.FormRequest(url, callback=self.parse, cb_kwargs={'published_since': published_date})]\n\n def parse(self, response, published_since='', page=1):\n\n api_res = json.loads(response.body)\n published_date = published_since\n\n if isinstance(api_res, list):\n\n for item in api_res:\n published_date = item['published_date'][:10]\n # skip already scrapped\n if self.client.exists(index=self.name, id=item['id']):\n logging.info('Skipping %s.', item['id'])\n continue\n item.update(_id=item['id'])\n yield item\n\n self.client.indices.put_mapping(index=self.name, body={\"_meta\": {\"published_date\": published_date}})\n\n if page + 1 < 10:\n logging.info('Requesting page %s since %s.', page + 1, published_since)\n yield scrapy.Request(\n url=self.form_url(page=page + 1, published_since=published_since),\n cb_kwargs={\n 'page': page + 1,\n 'published_since': published_since\n }\n )\n else:\n logging.info('Requesting page %s since %s.', 1, published_date)\n yield scrapy.Request(\n url=self.form_url(page= 1, published_since=published_date),\n cb_kwargs={\n 'page': 1,\n 'published_since': published_date\n }\n ) \n\n", "repo_name": "biothings/biothings.crawler", "sub_path": "crawler/spiders/focusedscrape/figshare.py", "file_name": "figshare.py", "file_ext": "py", "file_size_in_byte": 3937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 58, "usage_type": "attribute"}, {"api_name": "elasticsearch.Elasticsearch", "line_number": 69, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 69, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 92, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 96, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 105, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 122, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "2550699534", "text": "from pyniryo import *\nfrom PIL import Image\nimage = Image.open('worksp.png')\n\nrobot = NiryoRobot(\"10.10.10.10\")\n\nstatus, im_work = extract_img_workspace(image, workspace_ratio=1.0)\n# Trying to pick target using camera\nobj_found, shape_ret, color_ret = robot.vision_pick(im_work)\nif obj_found:\n print(\"objet trouvé\")\nrobot.set_learning_mode(True)\n", "repo_name": "ProjetNyrio/ProjetS8", "sub_path": "Interface Complete/demo_scripts/démo_vision.py", "file_name": "démo_vision.py", "file_ext": "py", "file_size_in_byte": 350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "PIL.Image.open", "line_number": 3, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "75204677284", "text": "import os\nimport pandas as pd\nimport streamlit as st\nimport altair as alt\nimport sqlite3\nfrom utils import find_file_id, download_file, load_css\nst.set_page_config(layout=\"wide\")\nload_css()\n\n@st.cache_data(ttl=60*60*12)\ndef get_anki_db():\n file_id = find_file_id(\"name contains 'collection.anki2'\")[0]['id']\n file = download_file(file_id=file_id)\n return file\n\ndef load_anki():\n anki_db = get_anki_db()\n with open(os.path.join('data', 'anki.db'), 'wb') as anki_file:\n anki_file.write(anki_db.getbuffer())\n\n con = sqlite3.connect(os.path.join('data', 'anki.db'))\n return con\n\nanki_con = load_anki()\n\nst.title('Anki')\n\n# Loading cards\ncards = pd.read_sql('SELECT * FROM cards', anki_con)\n# Loading notes\nnotes = pd.read_sql('SELECT * FROM notes', anki_con)\n# Loading revlog\nrevlog = pd.read_sql('SELECT * FROM revlog', anki_con)\nrevlog['id'] = pd.to_datetime(revlog.id, unit='ms', utc=True).dt.tz_convert('Europe/Paris')\n# Loading decks\ndecks = pd.read_json(pd.read_sql('SELECT * FROM col', anki_con).decks[0]).T\n# Get HSK data\nhsk_deck_id = decks[decks.name.str.contains('HSK')].id.iloc[0]\nhsk_cards = cards[cards.did == hsk_deck_id].rename(columns={'id': 'cid'})\nhsk_notes = notes[notes.id.isin(hsk_cards.nid)].rename(columns={'id': 'nid'})\nhsk_notes = hsk_notes.assign(HSK=hsk_notes.tags.str.extract('(HSK\\d)'))\nhsk_notes = hsk_notes.drop(columns=['tags'])\nhsk_revlog = revlog[revlog.cid.isin(hsk_cards.cid)]\n\nhsk_df = pd.merge(left=hsk_cards, right=hsk_notes, on='nid')\nhsk_detailed_revlog = pd.merge(left=hsk_df, right=hsk_revlog, on='cid')\n\n# Info about Anki\nst.write(\"Anki is an app I use every day to learn chinese vocabulary.\")\nst.header('Overall metrics')\noverall_metrics = st.columns(3)\noverall_metrics[0].metric(\n label='Cards reviewed',\n value=len(hsk_detailed_revlog)\n)\noverall_metrics[1].metric(\n label='Current streak',\n value=f\"{hsk_detailed_revlog['id'].dt.date.nunique():.0f} days\"\n)\n\ntotal_time_spent = hsk_detailed_revlog['time'].astype(int).sum() // 1000\noverall_metrics[2].metric(\n label='Total time spent',\n value=f\"{total_time_spent // 3600:.0f}h {(total_time_spent // 60) % 60:.0f}min\"\n)\n\nst.header('HSK progress')\nhsk_metrics = st.columns(6)\nfor i, hsk_level in enumerate(hsk_df.HSK.dropna().unique()):\n hsk_df_ = hsk_df[hsk_df.HSK == hsk_level]\n hsk_metrics[i].metric(\n label=hsk_level,\n value=f\"{len(hsk_df_[hsk_df_.ivl == 200]) / len(hsk_df_):.0%}\"\n )\n\n# hsk_chart_levels = alt.Chart(\n# hsk_df.dropna(subset=['HSK'])\n# ).transform_density(\n# 'ivl',\n# groupby=['HSK'],\n# as_=['ivl', 'density'],\n# extent=[1, 200],\n# ).mark_area().encode(\n# x=alt.X('ivl:Q'),\n# y=alt.Y('density:Q')\n# ).facet(\n# # row=alt.Row('HSK')\n# \"HSK\",\n# columns=6\n# )\n\n# st.altair_chart(hsk_chart_levels, theme='streamlit')\n\n# Display metrics\n# Day changes at 3am\nhistory_plot = alt.Chart(\n hsk_detailed_revlog[['id']] - pd.Timedelta('3H')\n).mark_bar(\n color=st.secrets[\"theme\"]['primaryColor']\n).encode(\n x=alt.X('yearmonthdate(id):T', title='', axis=alt.Axis(format='%Y', tickCount='year', grid=True)),\n y=alt.Y('count():Q', title='Cards Reviewed')\n)\nst.altair_chart(history_plot, use_container_width=True, theme='streamlit')\n\ncumulative_hsk = hsk_detailed_revlog.drop_duplicates(subset=['cid'])\ncumulative_hsk = cumulative_hsk.dropna(subset=['HSK'])\ncumulative_hsk = cumulative_hsk.assign(\n cumcount=(cumulative_hsk\n .sort_values(by='id')\n .groupby('HSK')\n .cumcount()\n )\n)\n\nhsk_plot = alt.Chart(cumulative_hsk).mark_line().encode(\n x='id:T',\n y='cumcount:Q',\n color='HSK'\n)\nst.altair_chart(hsk_plot, use_container_width=True, theme='streamlit')", "repo_name": "hugolmn/quantified-self", "sub_path": "pages/Anki.py", "file_name": "Anki.py", "file_ext": "py", "file_size_in_byte": 3704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.set_page_config", "line_number": 7, "usage_type": "call"}, {"api_name": "utils.load_css", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.find_file_id", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.download_file", "line_number": 13, "usage_type": "call"}, {"api_name": "streamlit.cache_data", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 46, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 49, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 50, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 67, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 68, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 97, "usage_type": "call"}, {"api_name": "streamlit.secrets", "line_number": 99, "usage_type": "attribute"}, {"api_name": "altair.X", "line_number": 101, "usage_type": "call"}, {"api_name": "altair.Axis", "line_number": 101, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 102, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 104, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 116, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "10296710744", "text": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied, ValidationError\nfrom django.http import FileResponse, Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_POST\nfrom django.views.generic import TemplateView, View\n\nfrom weblate.trans.forms import (\n AddCategoryForm,\n AnnouncementForm,\n BaseDeleteForm,\n CategoryRenameForm,\n ComponentRenameForm,\n ComponentSettingsForm,\n ProjectRenameForm,\n ProjectSettingsForm,\n WorkflowSettingForm,\n)\nfrom weblate.trans.models import (\n Announcement,\n Category,\n Component,\n Project,\n Translation,\n WorkflowSetting,\n)\nfrom weblate.trans.tasks import (\n category_removal,\n component_removal,\n create_project_backup,\n project_removal,\n)\nfrom weblate.trans.util import redirect_param, render\nfrom weblate.utils import messages\nfrom weblate.utils.stats import CategoryLanguage, ProjectLanguage\nfrom weblate.utils.views import parse_path, show_form_errors\n\n\n@never_cache\n@login_required\ndef change(request, path):\n obj = parse_path(request, path, (Component, Project, ProjectLanguage))\n if not request.user.has_perm(obj.settings_permission, obj):\n raise Http404\n\n if isinstance(obj, Component):\n return change_component(request, obj)\n if isinstance(obj, ProjectLanguage):\n return change_project_language(request, obj)\n return change_project(request, obj)\n\n\ndef change_project(request, obj):\n if request.method == \"POST\":\n settings_form = ProjectSettingsForm(request, request.POST, instance=obj)\n if settings_form.is_valid():\n settings_form.save()\n messages.success(request, gettext(\"Settings saved\"))\n return redirect(\"settings\", path=obj.get_url_path())\n messages.error(\n request, gettext(\"Invalid settings. Please check the form for errors.\")\n )\n else:\n settings_form = ProjectSettingsForm(request, instance=obj)\n\n return render(\n request,\n \"project-settings.html\",\n {\"object\": obj, \"form\": settings_form},\n )\n\n\ndef change_project_language(request, obj):\n try:\n instance = obj.project.workflowsetting_set.get(language=obj.language)\n except WorkflowSetting.DoesNotExist:\n instance = None\n\n if request.method == \"POST\":\n settings_form = WorkflowSettingForm(request.POST, instance=instance)\n if settings_form.is_valid():\n settings_form.instance.project = obj.project\n settings_form.instance.language = obj.language\n settings_form.save()\n messages.success(request, gettext(\"Settings saved\"))\n return redirect(\"settings\", path=obj.get_url_path())\n messages.error(\n request, gettext(\"Invalid settings. Please check the form for errors.\")\n )\n else:\n settings_form = WorkflowSettingForm(instance=instance)\n\n return render(\n request,\n \"project-language-settings.html\",\n {\"object\": obj, \"form\": settings_form},\n )\n\n\ndef change_component(request, obj):\n if not request.user.has_perm(\"component.edit\", obj):\n raise Http404\n\n if request.method == \"POST\":\n form = ComponentSettingsForm(request, request.POST, instance=obj)\n if form.is_valid():\n form.save()\n messages.success(request, gettext(\"Settings saved\"))\n return redirect(\"settings\", path=obj.get_url_path())\n messages.error(\n request, gettext(\"Invalid settings. Please check the form for errors.\")\n )\n # Get a fresh copy of object, otherwise it will use unsaved changes\n # from the failed form\n obj = Component.objects.get(pk=obj.pk)\n else:\n form = ComponentSettingsForm(request, instance=obj)\n\n if obj.repo_needs_merge():\n messages.warning(\n request,\n gettext(\n \"The repository is outdated. You might not get \"\n \"expected results until you update it.\"\n ),\n )\n\n return render(\n request,\n \"component-settings.html\",\n {\"project\": obj.project, \"object\": obj, \"form\": form},\n )\n\n\n@never_cache\n@login_required\n@require_POST\ndef dismiss_alert(request, path):\n obj = parse_path(request, path, (Component,))\n\n if not request.user.has_perm(\"component.edit\", obj):\n raise Http404\n\n try:\n alert = obj.alert_set.get(name=request.POST[\"dismiss\"])\n if alert.obj.dismissable:\n alert.dismissed = True\n alert.save(update_fields=[\"dismissed\"])\n except ObjectDoesNotExist:\n pass\n\n return redirect_param(obj, \"#alerts\")\n\n\n@login_required\n@require_POST\ndef remove(request, path):\n obj = parse_path(\n request,\n path,\n (Translation, Component, Project, ProjectLanguage, CategoryLanguage, Category),\n )\n\n if not request.user.has_perm(obj.remove_permission, obj):\n raise PermissionDenied\n\n form = BaseDeleteForm(obj, request.POST)\n if not form.is_valid():\n show_form_errors(request, form)\n return redirect_param(obj, \"#organize\")\n\n if isinstance(obj, Translation):\n parent = obj.component\n obj.remove(request.user)\n messages.success(request, gettext(\"The translation has been removed.\"))\n elif isinstance(obj, Component):\n parent = obj.category or obj.project\n component_removal.delay(obj.pk, request.user.pk)\n messages.success(\n request, gettext(\"The translation component was scheduled for removal.\")\n )\n elif isinstance(obj, Category):\n parent = obj.category or obj.project\n category_removal.delay(obj.pk, request.user.pk)\n messages.success(request, gettext(\"The category was scheduled for removal.\"))\n elif isinstance(obj, Project):\n parent = reverse(\"home\")\n project_removal.delay(obj.pk, request.user.pk)\n messages.success(request, gettext(\"The project was scheduled for removal.\"))\n elif isinstance(obj, ProjectLanguage):\n parent = obj.project\n for translation in obj.translation_set:\n translation.remove(request.user)\n\n messages.success(request, gettext(\"A language in the project was removed.\"))\n elif isinstance(obj, CategoryLanguage):\n parent = obj.category\n for translation in obj.translation_set:\n translation.remove(request.user)\n\n messages.success(request, gettext(\"A language in the category was removed.\"))\n\n return redirect(parent)\n\n\ndef perform_rename(form_cls, request, obj, perm: str):\n if not request.user.has_perm(perm, obj):\n raise PermissionDenied\n\n # Make sure any non-rename related issues are resolved first\n try:\n obj.full_clean()\n except ValidationError as err:\n messages.error(\n request,\n gettext(\"Could not change %s due to outstanding issue in its settings: %s\")\n % (obj, err),\n )\n return redirect_param(obj, \"#organize\")\n\n form = form_cls(request, request.POST, instance=obj)\n if not form.is_valid():\n show_form_errors(request, form)\n # Reload the object from DB to revert possible rejected change\n obj.refresh_from_db()\n return redirect_param(obj, \"#organize\")\n\n # Invalidate old stats\n old_stats = list(obj.stats.get_update_objects())\n\n obj = form.save()\n\n # Invalidate new stats\n obj.stats.update_parents(extra_objects=old_stats)\n\n return redirect(obj)\n\n\n@login_required\n@require_POST\ndef rename(request, path):\n obj = parse_path(request, path, (Component, Project, Category))\n if isinstance(obj, Component):\n return perform_rename(ComponentRenameForm, request, obj, \"component.edit\")\n if isinstance(obj, Category):\n return perform_rename(CategoryRenameForm, request, obj, \"project.edit\")\n return perform_rename(ProjectRenameForm, request, obj, \"project.edit\")\n\n\n@login_required\n@require_POST\ndef add_category(request, path):\n obj = parse_path(request, path, (Project, Category))\n if not request.user.has_perm(\"project.edit\", obj) or not obj.can_add_category:\n raise PermissionDenied\n form = AddCategoryForm(request, obj, request.POST)\n if not form.is_valid():\n show_form_errors(request, form)\n return redirect_param(obj, \"#organize\")\n form.save()\n return redirect(form.instance)\n\n\n@login_required\n@require_POST\ndef announcement(request, path):\n obj = parse_path(request, path, (Translation, Component, Project))\n\n if not request.user.has_perm(\"component.edit\", obj):\n raise PermissionDenied\n\n form = AnnouncementForm(request.POST)\n if not form.is_valid():\n show_form_errors(request, form)\n return redirect_param(obj, \"#announcement\")\n\n # Scope specific attributes\n scope = {}\n if isinstance(obj, Translation):\n scope[\"project\"] = obj.component.project\n scope[\"component\"] = obj.component\n scope[\"language\"] = obj.language\n elif isinstance(obj, Component):\n scope[\"project\"] = obj.project\n scope[\"component\"] = obj\n elif isinstance(obj, Project):\n scope[\"project\"] = obj\n\n Announcement.objects.create(\n user=request.user,\n **scope,\n **form.cleaned_data,\n )\n\n return redirect(obj)\n\n\n@login_required\n@require_POST\ndef announcement_delete(request, pk):\n announcement = get_object_or_404(Announcement, pk=pk)\n\n if request.user.has_perm(\"announcement.delete\", announcement):\n announcement.delete()\n\n return JsonResponse({\"responseStatus\": 200})\n\n\n@login_required\ndef component_progress(request, path):\n obj = parse_path(request, path, (Component,))\n return_url = \"show\" if \"info\" in request.GET else \"guide\"\n if not obj.in_progress():\n return redirect(return_url, path=obj.get_url_path())\n\n progress, log = obj.get_progress()\n\n return render(\n request,\n \"component-progress.html\",\n {\n \"object\": obj,\n \"progress\": progress,\n \"log\": \"\\n\".join(log),\n \"return_url\": return_url,\n },\n )\n\n\nclass BackupsMixin:\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.obj = parse_path(request, [kwargs[\"project\"]], (Project,))\n if not request.user.has_perm(\"project.edit\", self.obj):\n raise PermissionDenied\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass BackupsView(BackupsMixin, TemplateView):\n template_name = \"trans/backups.html\"\n\n def post(self, request, *args, **kwargs):\n create_project_backup.delay(self.obj.pk)\n messages.success(\n request, gettext(\"Backup scheduled. It will be available soon.\")\n )\n return redirect(\"backups\", project=self.obj.slug)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"keep_count\"] = settings.PROJECT_BACKUP_KEEP_COUNT\n context[\"keep_days\"] = settings.PROJECT_BACKUP_KEEP_DAYS\n context[\"object\"] = self.obj\n context[\"backups\"] = self.obj.list_backups()\n return context\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass BackupsDownloadView(BackupsMixin, View):\n def get(self, request, *args, **kwargs):\n for backup in self.obj.list_backups():\n if backup[\"name\"] == kwargs[\"backup\"]:\n return FileResponse(\n open(backup[\"path\"], \"rb\"), # noqa: SIM115\n as_attachment=True,\n filename=backup[\"name\"],\n )\n raise Http404\n", "repo_name": "WeblateOrg/weblate", "sub_path": "weblate/trans/views/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 11963, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3905, "dataset": "github-code", "pt": "52", "api": [{"api_name": "weblate.utils.views.parse_path", "line_number": 47, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component", "line_number": 47, "usage_type": "name"}, {"api_name": "weblate.trans.models.Project", "line_number": 47, "usage_type": "name"}, {"api_name": "weblate.utils.stats.ProjectLanguage", "line_number": 47, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 49, "usage_type": "name"}, {"api_name": "weblate.trans.models.Component", "line_number": 51, "usage_type": "argument"}, {"api_name": "weblate.utils.stats.ProjectLanguage", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.views.decorators.cache.never_cache", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 45, "usage_type": "name"}, {"api_name": "weblate.trans.forms.ProjectSettingsForm", "line_number": 60, "usage_type": "call"}, {"api_name": "weblate.utils.messages.success", "line_number": 63, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 63, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "weblate.utils.messages.error", "line_number": 65, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 65, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 66, "usage_type": "call"}, {"api_name": "weblate.trans.forms.ProjectSettingsForm", "line_number": 69, "usage_type": "call"}, {"api_name": "weblate.trans.util.render", "line_number": 71, "usage_type": "call"}, {"api_name": "weblate.trans.models.WorkflowSetting.DoesNotExist", "line_number": 81, "usage_type": "attribute"}, {"api_name": "weblate.trans.models.WorkflowSetting", "line_number": 81, "usage_type": "name"}, {"api_name": "weblate.trans.forms.WorkflowSettingForm", "line_number": 85, "usage_type": "call"}, {"api_name": "weblate.utils.messages.success", "line_number": 90, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 90, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 90, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 91, "usage_type": "call"}, {"api_name": "weblate.utils.messages.error", "line_number": 92, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 92, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 93, "usage_type": "call"}, {"api_name": "weblate.trans.forms.WorkflowSettingForm", "line_number": 96, "usage_type": "call"}, {"api_name": "weblate.trans.util.render", "line_number": 98, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 107, "usage_type": "name"}, {"api_name": "weblate.trans.forms.ComponentSettingsForm", "line_number": 110, "usage_type": "call"}, {"api_name": "weblate.utils.messages.success", "line_number": 113, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 113, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 113, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "weblate.utils.messages.error", "line_number": 115, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 115, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 116, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component.objects.get", "line_number": 120, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "weblate.trans.models.Component", "line_number": 120, "usage_type": "name"}, {"api_name": "weblate.trans.forms.ComponentSettingsForm", "line_number": 122, "usage_type": "call"}, {"api_name": "weblate.utils.messages.warning", "line_number": 125, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 125, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 127, "usage_type": "call"}, {"api_name": "weblate.trans.util.render", "line_number": 133, "usage_type": "call"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 144, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component", "line_number": 144, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 147, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 154, "usage_type": "name"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 157, "usage_type": "call"}, {"api_name": "django.views.decorators.cache.never_cache", "line_number": 140, "usage_type": "name"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 141, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 142, "usage_type": "name"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 163, "usage_type": "call"}, {"api_name": "weblate.trans.models.Translation", "line_number": 166, "usage_type": "name"}, {"api_name": "weblate.trans.models.Component", "line_number": 166, "usage_type": "name"}, {"api_name": "weblate.trans.models.Project", "line_number": 166, "usage_type": "name"}, {"api_name": "weblate.utils.stats.ProjectLanguage", "line_number": 166, "usage_type": "name"}, {"api_name": "weblate.utils.stats.CategoryLanguage", "line_number": 166, "usage_type": "name"}, {"api_name": "weblate.trans.models.Category", "line_number": 166, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 170, "usage_type": "name"}, {"api_name": "weblate.trans.forms.BaseDeleteForm", "line_number": 172, "usage_type": "call"}, {"api_name": "weblate.utils.views.show_form_errors", "line_number": 174, "usage_type": "call"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 175, "usage_type": "call"}, {"api_name": "weblate.trans.models.Translation", "line_number": 177, "usage_type": "argument"}, {"api_name": "weblate.utils.messages.success", "line_number": 180, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 180, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 180, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component", "line_number": 181, "usage_type": "argument"}, {"api_name": "weblate.trans.tasks.component_removal.delay", "line_number": 183, "usage_type": "call"}, {"api_name": "weblate.trans.tasks.component_removal", "line_number": 183, "usage_type": "name"}, {"api_name": "weblate.utils.messages.success", "line_number": 184, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 184, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 185, "usage_type": "call"}, {"api_name": "weblate.trans.models.Category", "line_number": 187, "usage_type": "argument"}, {"api_name": "weblate.trans.tasks.category_removal.delay", "line_number": 189, "usage_type": "call"}, {"api_name": "weblate.trans.tasks.category_removal", "line_number": 189, "usage_type": "name"}, {"api_name": "weblate.utils.messages.success", "line_number": 190, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 190, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 190, "usage_type": "call"}, {"api_name": "weblate.trans.models.Project", "line_number": 191, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 192, "usage_type": "call"}, {"api_name": "weblate.trans.tasks.project_removal.delay", "line_number": 193, "usage_type": "call"}, {"api_name": "weblate.trans.tasks.project_removal", "line_number": 193, "usage_type": "name"}, {"api_name": "weblate.utils.messages.success", "line_number": 194, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 194, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 194, "usage_type": "call"}, {"api_name": "weblate.utils.stats.ProjectLanguage", "line_number": 195, "usage_type": "argument"}, {"api_name": "weblate.utils.messages.success", "line_number": 200, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 200, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 200, "usage_type": "call"}, {"api_name": "weblate.utils.stats.CategoryLanguage", "line_number": 201, "usage_type": "argument"}, {"api_name": "weblate.utils.messages.success", "line_number": 206, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 206, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 206, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 208, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 160, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 161, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 213, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 218, "usage_type": "name"}, {"api_name": "weblate.utils.messages.error", "line_number": 219, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 219, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 221, "usage_type": "call"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 224, "usage_type": "call"}, {"api_name": "weblate.utils.views.show_form_errors", "line_number": 228, "usage_type": "call"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 231, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 241, "usage_type": "call"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 247, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component", "line_number": 247, "usage_type": "name"}, {"api_name": "weblate.trans.models.Project", "line_number": 247, "usage_type": "name"}, {"api_name": "weblate.trans.models.Category", "line_number": 247, "usage_type": "name"}, {"api_name": "weblate.trans.models.Component", "line_number": 248, "usage_type": "argument"}, {"api_name": "weblate.trans.forms.ComponentRenameForm", "line_number": 249, "usage_type": "argument"}, {"api_name": "weblate.trans.models.Category", "line_number": 250, "usage_type": "argument"}, {"api_name": "weblate.trans.forms.CategoryRenameForm", "line_number": 251, "usage_type": "argument"}, {"api_name": "weblate.trans.forms.ProjectRenameForm", "line_number": 252, "usage_type": "argument"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 244, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 245, "usage_type": "name"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 258, "usage_type": "call"}, {"api_name": "weblate.trans.models.Project", "line_number": 258, "usage_type": "name"}, {"api_name": "weblate.trans.models.Category", "line_number": 258, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 260, "usage_type": "name"}, {"api_name": "weblate.trans.forms.AddCategoryForm", "line_number": 261, "usage_type": "call"}, {"api_name": "weblate.utils.views.show_form_errors", "line_number": 263, "usage_type": "call"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 264, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 266, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 255, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 256, "usage_type": "name"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 272, "usage_type": "call"}, {"api_name": "weblate.trans.models.Translation", "line_number": 272, "usage_type": "name"}, {"api_name": "weblate.trans.models.Component", "line_number": 272, "usage_type": "name"}, {"api_name": "weblate.trans.models.Project", "line_number": 272, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 275, "usage_type": "name"}, {"api_name": "weblate.trans.forms.AnnouncementForm", "line_number": 277, "usage_type": "call"}, {"api_name": "weblate.utils.views.show_form_errors", "line_number": 279, "usage_type": "call"}, {"api_name": "weblate.trans.util.redirect_param", "line_number": 280, "usage_type": "call"}, {"api_name": "weblate.trans.models.Translation", "line_number": 284, "usage_type": "argument"}, {"api_name": "weblate.trans.models.Component", "line_number": 288, "usage_type": "argument"}, {"api_name": "weblate.trans.models.Project", "line_number": 291, "usage_type": "argument"}, {"api_name": "weblate.trans.models.Announcement.objects.create", "line_number": 294, "usage_type": "call"}, {"api_name": "weblate.trans.models.Announcement.objects", "line_number": 294, "usage_type": "attribute"}, {"api_name": "weblate.trans.models.Announcement", "line_number": 294, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 300, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 269, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 270, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 306, "usage_type": "call"}, {"api_name": "weblate.trans.models.Announcement", "line_number": 306, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 311, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 303, "usage_type": "name"}, {"api_name": "django.views.decorators.http.require_POST", "line_number": 304, "usage_type": "name"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 316, "usage_type": "call"}, {"api_name": "weblate.trans.models.Component", "line_number": 316, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 319, "usage_type": "call"}, {"api_name": "weblate.trans.util.render", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 314, "usage_type": "name"}, {"api_name": "weblate.utils.views.parse_path", "line_number": 338, "usage_type": "call"}, {"api_name": "weblate.trans.models.Project", "line_number": 338, "usage_type": "name"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 340, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 344, "usage_type": "name"}, {"api_name": "weblate.trans.tasks.create_project_backup.delay", "line_number": 348, "usage_type": "call"}, {"api_name": "weblate.trans.tasks.create_project_backup", "line_number": 348, "usage_type": "name"}, {"api_name": "weblate.utils.messages.success", "line_number": 349, "usage_type": "call"}, {"api_name": "weblate.utils.messages", "line_number": 349, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 350, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 352, "usage_type": "call"}, {"api_name": "django.conf.settings.PROJECT_BACKUP_KEEP_COUNT", "line_number": 356, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 356, "usage_type": "name"}, {"api_name": "django.conf.settings.PROJECT_BACKUP_KEEP_DAYS", "line_number": 357, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 357, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 343, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 343, "usage_type": "argument"}, {"api_name": "django.views.generic.View", "line_number": 364, "usage_type": "name"}, {"api_name": "django.http.FileResponse", "line_number": 368, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 373, "usage_type": "name"}, {"api_name": "django.utils.decorators.method_decorator", "line_number": 363, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 363, "usage_type": "argument"}]} +{"seq_id": "23284401965", "text": "import json\n\nfrom json import JSONDecodeError\n\n\nfrom constants import JSON_FILE, COMMENT_FILE\n\n\nclass PostHandler:\n @staticmethod\n def load_json(file_name):\n \"\"\"\n Функция загружает данные из json файла\n :param file_name: json файл\n :return: возвращает список словарей\n \"\"\"\n try:\n with open(file_name, 'r', encoding='utf-8') as f:\n posts = json.load(f)\n except FileNotFoundError:\n print('Ошибка файл не найден')\n return posts\n except JSONDecodeError:\n print('Ошибка получения данных из Json')\n return posts\n return posts\n\n\n def get_posts_all(self):\n \"\"\"\n Функция загружает возвращает все посты\n :return: все посты из json формата в формате python.\n \"\"\"\n return self.load_json(JSON_FILE)\n\n\n def get_posts_by_user(self, user_name):\n \"\"\"\n Метод возвращает посты определенного пользователя.\n :param user_name: атрибут метода для поиска поста по имени.\n :return: посты определенного пользователя.\n \"\"\"\n posts = []\n posts_data = self.get_posts_all()\n try:\n for poster in posts_data:\n if user_name.lower() == poster['poster_name'].lower():\n posts.append(poster)\n except ValueError:\n print('Такого пользователя нет')\n return posts\n return posts\n\n # post_handler = PostHandler()\n # print(post_handler.get_posts_by_user(\"leo\"))\n\n def get_comments_by_post_id(self, post_id):\n \"\"\"\n Метод возвращает комментарии определенного поста.\n :param post_id:id для поиск�� комментарий определенного поста\n :return:комментарии определенного пользователя\n \"\"\"\n found_comments = []\n found_posts = self.get_post_by_pk(post_id)\n if not found_posts:\n raise ValueError\n\n comments_data = self.load_json(COMMENT_FILE)\n try:\n for comment in comments_data:\n if post_id == comment['post_id']:\n found_comments.append(comment)\n except KeyError:\n print('Нет такого ключа')\n return []\n return found_comments\n\n def search_for_posts(self, query):\n \"\"\"\n Функция возвращает список постов по ключевому слову\n :param query:атрибут метода для поиска поста по ключевому слову\n :return: список постов по ключевому слову\n \"\"\"\n list_query = []\n posts = self.get_posts_all()\n for post in posts:\n if query.lower() in post['content'].lower():\n list_query.append(post)\n return list_query\n\n def get_post_by_pk(self, pk):\n \"\"\"\n Функция возвращает один пост по его идентификатору.\n :param pk:атрибут метода для поиска поста по идентификатору\n :return:возвращаает один пост по его идентификатору\n \"\"\"\n\n posts_data = self.get_posts_all()\n for post in posts_data:\n if post[\"pk\"] == pk:\n return post\n\n# post_handler = PostHandler()\n# print(post_handler.get_post_by_pk(3))\n#post_handler = PostHandler()\n#print(post_handler.search_for_posts(\"еда\"))\n#post_handler = PostHandler()\n#print(post_handler.search_for_posts(\"еда\"))\n\n\n\n\n\n\n", "repo_name": "nika1987/course_paper3", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3992, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 23, "usage_type": "name"}, {"api_name": "constants.JSON_FILE", "line_number": 34, "usage_type": "argument"}, {"api_name": "constants.COMMENT_FILE", "line_number": 68, "usage_type": "argument"}]} +{"seq_id": "443324045", "text": "from unittest.mock import patch\n\nimport pytest\n\nfrom rubicon_ml import domain\nfrom rubicon_ml.client.experiment import Experiment\nfrom rubicon_ml.sklearn.estimator_logger import EstimatorLogger\n\n\ndef test_log_parameters_triggers_experiment_log_parameter(project_client, fake_estimator_cls):\n project = project_client\n experiment = Experiment(domain.Experiment(project_name=project.name), project)\n estimator = fake_estimator_cls()\n\n base_logger = EstimatorLogger(estimator=estimator, experiment=experiment, step_name=\"vect\")\n\n with patch.object(Experiment, \"log_parameter\", return_value=None) as mock_log_parameter:\n base_logger.log_parameters()\n\n assert mock_log_parameter.call_count == 3\n\n # the step name gets prepended to each param\n mock_log_parameter.assert_called_with(name=\"vect__ngram_range\", value=(1, 2))\n\n\ndef test_log_unserializable_param_triggers_exception(project_client, fake_estimator_cls):\n project = project_client\n experiment = Experiment(domain.Experiment(project_name=project.name), project)\n estimator = fake_estimator_cls(params={\"unserializable\": b\"not serializable\"})\n\n base_logger = EstimatorLogger(estimator=estimator, experiment=experiment, step_name=\"vect\")\n\n with patch.object(Experiment, \"log_parameter\", side_effect=Exception(\"test\")):\n with pytest.warns(Warning):\n base_logger.log_parameters()\n", "repo_name": "capitalone/rubicon-ml", "sub_path": "tests/unit/sklearn/test_estimator_logger.py", "file_name": "test_estimator_logger.py", "file_ext": "py", "file_size_in_byte": 1392, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 105, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rubicon_ml.client.experiment.Experiment", "line_number": 12, "usage_type": "call"}, {"api_name": "rubicon_ml.domain.Experiment", "line_number": 12, "usage_type": "call"}, {"api_name": "rubicon_ml.domain", "line_number": 12, "usage_type": "name"}, {"api_name": "rubicon_ml.sklearn.estimator_logger.EstimatorLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 17, "usage_type": "call"}, {"api_name": "rubicon_ml.client.experiment.Experiment", "line_number": 17, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 17, "usage_type": "name"}, {"api_name": "rubicon_ml.client.experiment.Experiment", "line_number": 28, "usage_type": "call"}, {"api_name": "rubicon_ml.domain.Experiment", "line_number": 28, "usage_type": "call"}, {"api_name": "rubicon_ml.domain", "line_number": 28, "usage_type": "name"}, {"api_name": "rubicon_ml.sklearn.estimator_logger.EstimatorLogger", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.mock.patch.object", "line_number": 33, "usage_type": "call"}, {"api_name": "rubicon_ml.client.experiment.Experiment", "line_number": 33, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 33, "usage_type": "name"}, {"api_name": "pytest.warns", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "11348089179", "text": "from __future__ import print_function\nfrom collections import defaultdict\nimport os.path as osp\nimport numpy as np\nimport torch\nfrom sklearn.metrics import average_precision_score\nfrom torch.utils.data import DataLoader\nimport os\nfrom torch import nn\nfrom tqdm import tqdm\nfrom time import time\n#os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n#DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nDEVICE = torch.device(\"cuda\")\nprint(DEVICE)\n\n\ndef may_make_dir(path):\n if path in [None, '']:\n return\n if not osp.exists(path):\n os.makedirs(path)\n\ndef save_ckpt(modules_optims, ep, scores, ckpt_file):\n state_dicts = [m.state_dict() for m in modules_optims]\n ckpt = dict(state_dicts=state_dicts, ep=ep, scores=scores)\n may_make_dir(osp.dirname(osp.abspath(ckpt_file)))\n torch.save(ckpt, ckpt_file)\n\n\ndef load_ckpt(modules_optims, ckpt_file, load_to_cpu=True, verbose=True,strict=True):\n map_location = (lambda storage, loc: storage) if load_to_cpu else None\n ckpt = torch.load(ckpt_file, map_location=map_location)\n for m, sd in zip(modules_optims, ckpt['state_dicts']):\n m.load_state_dict(sd,strict=strict)\n if verbose:\n print('Resume from ckpt {}, \\nepoch {}, \\nscores {}'.format(\n ckpt_file, ckpt['ep'], ckpt['scores']))\n return ckpt['ep'], ckpt['scores']\n\n\ndef _unique_sample(ids_dict, num):\n mask = np.zeros(num, dtype=np.bool)\n for _, indices in ids_dict.items():\n i = np.random.choice(indices)\n mask[i] = True\n return mask\n\n\ndef cmc(distmat, query_ids=None, gallery_ids=None,\n query_cams=None, gallery_cams=None, topk=100,\n separate_camera_set=False,\n single_gallery_shot=False,\n first_match_break=False):\n m, n = distmat.shape\n # Fill up default values\n if query_ids is None:\n query_ids = np.arange(m)\n if gallery_ids is None:\n gallery_ids = np.arange(n)\n if query_cams is None:\n query_cams = np.zeros(m).astype(np.int32)\n if gallery_cams is None:\n gallery_cams = np.ones(n).astype(np.int32)\n # Ensure numpy array\n query_ids = np.asarray(query_ids)\n gallery_ids = np.asarray(gallery_ids)\n query_cams = np.asarray(query_cams)\n gallery_cams = np.asarray(gallery_cams)\n # Sort and find correct matches\n indices = np.argsort(distmat, axis=1)\n\n # print('gallery_ids', gallery_ids.shape )\n # print('query_ids[:, np.newaxis]', query_ids[:, np.newaxis].shape )\n # print('indices',indices.shape)\n\n matches = (gallery_ids[indices] == query_ids[:, np.newaxis])\n # Compute CMC for each query\n ret = np.zeros(topk)\n num_valid_queries = 0\n for i in range(m):\n # Filter out the same id and same camera\n valid = ((gallery_ids[indices[i]] != query_ids[i]) |\n (gallery_cams[indices[i]] != query_cams[i]))\n if separate_camera_set:\n # Filter out samples from same camera\n valid &= (gallery_cams[indices[i]] != query_cams[i])\n if not np.any(matches[i, valid]):\n continue\n if single_gallery_shot:\n repeat = 10\n gids = gallery_ids[indices[i][valid]]\n inds = np.where(valid)[0]\n ids_dict = defaultdict(list)\n for j, x in zip(inds, gids):\n ids_dict[x].append(j)\n else:\n repeat = 1\n for _ in range(repeat):\n if single_gallery_shot:\n # Randomly choose one instance for each id\n sampled = (valid & _unique_sample(ids_dict, len(valid)))\n index = np.nonzero(matches[i, sampled])[0]\n else:\n index = np.nonzero(matches[i, valid])[0]\n delta = 1. / (len(index) * repeat)\n for j, k in enumerate(index):\n if k - j >= topk:\n break\n if first_match_break:\n ret[k - j] += 1\n break\n ret[k - j] += delta\n num_valid_queries += 1\n if num_valid_queries == 0:\n raise RuntimeError(\"No valid query\")\n return ret.cumsum() / num_valid_queries\n\n\ndef mean_ap(distmat, query_ids=None, gallery_ids=None,\n query_cams=None, gallery_cams=None):\n m, n = distmat.shape\n # Fill up default values\n if query_ids is None:\n query_ids = np.arange(m)\n if gallery_ids is None:\n gallery_ids = np.arange(n)\n if query_cams is None:\n query_cams = np.zeros(m).astype(np.int32)\n if gallery_cams is None:\n gallery_cams = np.ones(n).astype(np.int32)\n # Ensure numpy array\n query_ids = np.asarray(query_ids)\n gallery_ids = np.asarray(gallery_ids)\n query_cams = np.asarray(query_cams)\n gallery_cams = np.asarray(gallery_cams)\n # Sort and find correct matches\n indices = np.argsort(distmat, axis=1)\n matches = (gallery_ids[indices] == query_ids[:, np.newaxis])\n # Compute AP for each query\n aps = []\n for i in range(m):\n # Filter out the same id and same camera\n valid = ((gallery_ids[indices[i]] != query_ids[i]) |\n (gallery_cams[indices[i]] != query_cams[i]))\n y_true = matches[i, valid]\n y_score = -distmat[i][indices[i]][valid]\n if not np.any(y_true):\n continue\n aps.append(average_precision_score(y_true, y_score))\n if len(aps) == 0:\n raise RuntimeError(\"No valid query\")\n return np.mean(aps)\n\ndef creat_test_data_set_loader(test_path,data_set_class,test_transform ,batch_test):\n\n test_dataset = data_set_class(test_path, transform=test_transform)\n test_loader = DataLoader(test_dataset, batch_size=batch_test, shuffle=False)\n\n return test_dataset, test_loader\n\ndef creat_train_data_set_loader(train_path,data_set_class,RandomIdSampler,train_transform ,batch_train, batch_id,batch_image):\n train_dataset = data_set_class(train_path, transform=train_transform)\n train_loader_tri = DataLoader(train_dataset,\n sampler=RandomIdSampler(train_dataset, batch_image=batch_image),\n batch_size=batch_id * batch_image)\n\n train_loader_all = DataLoader(train_dataset, batch_size=batch_train, shuffle=True, drop_last=True)\n\n return train_dataset, train_loader_tri, train_loader_all\n\n\n", "repo_name": "Klitter/Pyramidal_Person_ReID", "sub_path": "__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 93, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 148, "usage_type": "call"}, {"api_name": "sklearn.metrics.average_precision_score", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "27077304657", "text": "import numpy as np\nimport sys\nimport json\nfrom tflite_runtime.interpreter import Interpreter\n\nx = sys.argv[1]\ndict_file = json.loads(x)\ndata = dict_file[\"feeds\"][0]\ndata[\"field5\"] = data[\"field5\"][:-4]\ninput_data = [float(data[\"field1\"]), float(data[\"field2\"]),\n float(data[\"field3\"]), float(data[\"field4\"]), float(data[\"field5\"])]\ninput_array = np.array(input_data, dtype = np.float32)\ninput_array = np.resize(input_array, (1,5))\ntflite_interpreter = Interpreter(model_path='converted_model.tflite')\n\ninput_details = tflite_interpreter.get_input_details()\noutput_details = tflite_interpreter.get_output_details()\ntflite_interpreter.allocate_tensors()\n\ntflite_interpreter.set_tensor(input_details[0]['index'], input_array)\ntflite_interpreter.invoke()\noutput_array = tflite_interpreter.get_tensor(output_details[0]['index'])\nresult = np.squeeze(output_array)\nprint(int(result > 0.6))\n", "repo_name": "ariawahyuw/raspi_tflite", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 884, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.resize", "line_number": 13, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter.Interpreter", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "12354197421", "text": "# -*- coding: utf-8 -*-\nimport scrapy\n\ncomune = \"Milano\"\n\nclass AddressesSpider(scrapy.Spider):\n name = \"addresses\"\n start_urls = [\n 'http://serviziweb.tabaccai.it/voucherinps/VistaAdesioni.aspx',\n ]\n\n def parse(self, response):\n formdata = {\n \"ctl00$ContentPlaceHolder1$tbCap\": \"\",\n \"ctl00$ContentPlaceHolder1$tbComune\": comune,\n }\n form_request = scrapy.FormRequest.from_response(response, formdata=formdata, callback=self.after_query)\n self.logger.info(\"form request: %s\", form_request.body)\n return form_request\n\n def after_query(self, response):\n requests = []\n requests.append(scrapy.FormRequest.from_response(response, callback=self.parse_tabaccai))\n pages = response.xpath(\"//table[@id='ctl00_ContentPlaceHolder1_GridView1']/tr[@class='pager']//a/text()\")\n for page in pages:\n formdata = {\n \"__EVENTTARGET\": \"ctl00$ContentPlaceHolder1$GridView1\",\n \"__EVENTARGUMENT\": \"Page$\" + page.extract()\n }\n self.logger.info(\"request page: %s\", page)\n requests.append(scrapy.FormRequest.from_response(response, formdata=formdata, callback=self.parse_tabaccai))\n self.logger.debug(\"requests: \" + str(map( (lambda form: form.body), requests )))\n return requests\n\n def parse_tabaccai(self, response):\n trs = response.xpath(\"//table[@id='ctl00_ContentPlaceHolder1_GridView1']/tr\")\n addresses = trs[1:-2]\n items = []\n for line in addresses:\n from tabaccai.items import Address\n address = Address()\n address['indirizzo'] = line.xpath(\"td[1]/text()\").extract()\n address['cap'] = line.xpath(\"td[2]/text()\").extract()\n address['comune'] = line.xpath(\"td[3]/text()\").extract()\n address['provincia'] = line.xpath(\"td[4]/text()\").extract()\n self.logger.info(\"indirizzo: %s\", address)\n items.append(address)\n return items\n", "repo_name": "antonkojin/voucher", "sub_path": "tabaccai/tabaccai/spiders/addresses.py", "file_name": "addresses.py", "file_ext": "py", "file_size_in_byte": 2027, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scrapy.Spider", "line_number": 6, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest.from_response", "line_number": 17, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest.from_response", "line_number": 23, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 23, "usage_type": "attribute"}, {"api_name": "scrapy.FormRequest.from_response", "line_number": 31, "usage_type": "call"}, {"api_name": "scrapy.FormRequest", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tabaccai.items.Address", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "17760624155", "text": "import csv\nimport json\nimport os\nimport sys\nimport uuid\nfrom inspect import getfullargspec\nfrom math import ceil\n\nimport sqlalchemy\nfrom sqlalchemy import func, inspect, select\nfrom sqlalchemy.sql.expression import text\n\nfrom paths import MIGRATIONS_DIRECTORY, PEPYS_IMPORT_DIRECTORY\nfrom pepys_import.resolvers.command_line_input import create_menu\nfrom pepys_import.utils.sqlalchemy_utils import get_primary_key_for_table\nfrom pepys_import.utils.table_name_utils import table_name_to_class_name\nfrom pepys_import.utils.text_formatting_utils import (\n custom_print_formatted_text,\n format_error_message,\n)\n\nSTORED_PROC_PATH = os.path.join(PEPYS_IMPORT_DIRECTORY, \"database\", \"postgres_stored_procedures\")\n\n\ndef import_from_csv(data_store, path, files, change_id):\n for file in sorted(files):\n # split file into filename and extension\n table_name, _ = os.path.splitext(file)\n if table_name.lower() == \"synonyms\":\n import_synonyms(data_store, os.path.join(path, file), change_id)\n continue\n possible_method = \"add_to_\" + table_name.lower().replace(\" \", \"_\")\n method_to_call = getattr(data_store, possible_method, None)\n if method_to_call:\n # Get all arguments of the method, except the first argument which is 'self'\n arguments = getfullargspec(method_to_call).args[1:]\n possible_arguments = \",\".join(arguments)\n with open(os.path.join(path, file), \"r\") as file_object:\n reader = csv.reader(file_object)\n # extract header\n header = next(reader)\n if not set(header).issubset(set(arguments)):\n custom_print_formatted_text(\n format_error_message(\n f\"Headers and the arguments of DataStore.{possible_method}() don't match!\"\n f\"\\nPossible arguments: {possible_arguments}\"\n f\"\\nPlease check your CSV file.\"\n )\n )\n return\n for row_number, row in enumerate(reader):\n row_as_string = \"\".join(row).strip()\n if row_as_string == \"\":\n continue\n keyword_arguments = dict(zip(header, row))\n try:\n method_to_call(**keyword_arguments, change_id=change_id)\n except Exception as e:\n custom_print_formatted_text(\n format_error_message(f\"Error importing row {row} from {file}\")\n )\n custom_print_formatted_text(format_error_message(f\" Error was '{str(e)}'\"))\n else:\n custom_print_formatted_text(\n format_error_message(f\"Method({possible_method}) not found!\")\n )\n\n\ndef import_synonyms(data_store, filepath, change_id):\n with open(filepath, \"r\") as file_object:\n reader = csv.reader(file_object)\n # extract header\n header = next(reader)\n if not set(header).issubset({\"synonym\", \"table\", \"target_name\"}):\n custom_print_formatted_text(\n format_error_message(\n \"Headers of the Synonyms.csv file are wrong or missing!\"\n \"\\nNecessary arguments: synonym,table,target_name\"\n \"\\nPlease check your CSV file.\"\n )\n )\n return\n # For every row in the CSV\n for row in reader:\n row_as_string = \"\".join(row).strip()\n if row_as_string == \"\":\n continue\n\n values = dict(zip(header, row))\n\n # Search in the given table for the name\n class_name = table_name_to_class_name(values[\"table\"])\n\n try:\n db_class = getattr(data_store.db_classes, class_name)\n pri_key_column_name = db_class.__table__.primary_key.columns.values()[0].name\n except AttributeError:\n custom_print_formatted_text(format_error_message(f\"Error on row {row}\"))\n custom_print_formatted_text(\n format_error_message(f\" Invalid table name {values['table']}\")\n )\n continue\n\n # Try and find a name column to use\n possibilities = [\"name\", \"reference\"]\n\n name_col = None\n for poss in possibilities:\n try:\n name_col = getattr(db_class, poss)\n except AttributeError:\n continue\n\n if name_col is None:\n custom_print_formatted_text(format_error_message(f\"Error on row {row}\"))\n custom_print_formatted_text(\n format_error_message(f\" Cannot find name column for table {values['table']}\")\n )\n continue\n\n results = (\n data_store.session.query(db_class).filter(name_col == values[\"target_name\"]).all()\n )\n\n if len(results) == 0:\n # Nothing to link synonym to so give error\n custom_print_formatted_text(format_error_message(f\"Error on row {row}\"))\n custom_print_formatted_text(\n format_error_message(\n f\" Name '{values['target_name']}' is not found in table {values['table']}\"\n )\n )\n continue\n elif len(results) == 1:\n guid = getattr(results[0], pri_key_column_name)\n # Found one entry, so can create synonym\n data_store.add_to_synonyms(values[\"table\"], values[\"synonym\"], guid, change_id)\n elif len(results) > 1:\n if values[\"table\"] != \"Platforms\":\n custom_print_formatted_text(format_error_message(f\"Error on row {row}\"))\n custom_print_formatted_text(\n format_error_message(\n f\" Name '{values['target_name']}' occurs multiple times in table {values['table']}.\"\n f\" Asking user to resolve is only supported for Platforms table.\"\n )\n )\n continue\n\n results = sorted(results, key=lambda x: x.identifier)\n chosen_item = ask_user_for_synonym_link(data_store, results, values)\n\n if chosen_item is None:\n print(\"Skipping row\")\n continue\n else:\n guid = getattr(chosen_item, pri_key_column_name)\n data_store.add_to_synonyms(values[\"table\"], values[\"synonym\"], guid, change_id)\n\n\ndef ask_user_for_synonym_link(data_store, results, values):\n options = [\n f\"{result.name} / {result.identifier} / {result.nationality_name}\" for result in results\n ]\n\n options += [\"Skip this row\"]\n\n def is_valid(option): # pragma: no cover\n return option.lower() in [str(i) for i in range(1, len(options) + 1)] or option == \".\"\n\n choice = create_menu(\n f\"Choose which Platform to link synonym '{values['target_name']}'' to:\",\n options,\n validate_method=is_valid,\n )\n\n if choice == \".\":\n print(\"Quitting\")\n sys.exit(1)\n elif choice == str(len(options)):\n return None\n elif choice in [str(i) for i in range(1, len(options) + 1)]:\n return results[int(choice) - 1]\n\n\ndef is_schema_created(engine, db_type):\n \"\"\"Returns True if Pepys Tables are created, False otherwise.\"\"\"\n inspector = inspect(engine)\n if db_type == \"sqlite\":\n table_names = inspector.get_table_names()\n # SQLite table numbers vary by mod_spatialite. The version of mod_spatialiate\n # that is installed can vary by platform - so both numbers should be acceptable.\n if len(table_names) >= 77 and len(table_names) <= 79:\n return True\n else:\n table_names = inspector.get_table_names(schema=\"pepys\")\n if len(table_names) == 41:\n return True\n\n if len(table_names) == 0:\n message = \"Database tables are not found! (Hint: Did you initialise the DataStore?)\"\n else:\n message = \"Please run database migration to bring tables up to date.\"\n custom_print_formatted_text(format_error_message(message))\n return False\n\n\ndef create_spatial_tables_for_sqlite(engine):\n \"\"\"Create geometry_columns and spatial_ref_sys metadata table\"\"\"\n\n if not inspect(engine).has_table(\"spatial_ref_sys\"):\n with engine.begin() as connection:\n connection.execute(select(func.InitSpatialMetaData(1)))\n\n\ndef create_spatial_tables_for_postgres(engine):\n \"\"\"Create schema pepys and extension for PostGIS\"\"\"\n query = \"\"\"\n CREATE SCHEMA IF NOT EXISTS pepys;\n CREATE EXTENSION IF NOT EXISTS postgis;\n SET search_path = pepys,public;\n \"\"\"\n with engine.begin() as connection:\n connection.execute(text(query))\n\n\ndef create_stored_procedures_for_postgres(engine):\n stored_procedure_files = [\n os.path.join(STORED_PROC_PATH, \"dashboard_metadata.sql\"),\n os.path.join(STORED_PROC_PATH, \"dashboard_stats.sql\"),\n os.path.join(STORED_PROC_PATH, \"Comments_for.sql\"),\n os.path.join(STORED_PROC_PATH, \"Contacts_for.sql\"),\n os.path.join(STORED_PROC_PATH, \"Datafiles_for.sql\"),\n os.path.join(STORED_PROC_PATH, \"States_for.sql\"),\n ]\n\n with engine.begin() as connection:\n for filename in stored_procedure_files:\n with open(filename) as f:\n procedure_definition = f.read()\n connection.execute(text(procedure_definition))\n\n\ndef create_alembic_version_table(engine, db_type):\n with open(os.path.join(MIGRATIONS_DIRECTORY, \"latest_revisions.json\"), \"r\") as file:\n versions = json.load(file)\n if \"LATEST_POSTGRES_VERSION\" not in versions or \"LATEST_SQLITE_VERSION\" not in versions:\n custom_print_formatted_text(format_error_message(\"Latest revision IDs couldn't found!\"))\n return\n\n if db_type == \"sqlite\":\n # Try and get all entries from alembic_version table\n try:\n with engine.begin() as connection:\n table_contents = connection.execute(\n text(\"SELECT * from alembic_version;\")\n ).fetchall()\n\n if len(table_contents) == 0:\n # Table exists but no version number row, so stamp it:\n sql = \"INSERT INTO alembic_version (version_num) VALUES (:id)\"\n connection.execute(text(sql), {\"id\": versions[\"LATEST_SQLITE_VERSION\"]})\n if len(table_contents) == 1:\n if table_contents[0][0] == versions[\"LATEST_SQLITE_VERSION\"]:\n # Current version already stamped in table - so just continue\n print(\n \"Initialising database - alembic version in database matches latest version.\"\n )\n else:\n # The version in the database doesn't match the current version - so raise an error\n raise ValueError(\n f\"Database revision in alembic_version table ({table_contents[0][0]}) does not match latest revision ({versions['LATEST_SQLITE_VERSION']}).\"\n \"Please run database migration.\"\n )\n if len(table_contents) > 1:\n raise ValueError(\n \"Multiple rows detected in alembic_version table. Database potentially in inconsistent state.\"\n \"Migration functionality will not work. Please contact support.\"\n )\n except sqlalchemy.exc.OperationalError:\n with engine.begin() as connection:\n # Error running select, so table doesn't exist - create it and stamp the current version\n connection.execute(\n text(\n \"\"\"\n CREATE TABLE IF NOT EXISTS alembic_version\n (\n version_num VARCHAR(32) NOT NULL,\n CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num)\n );\n \"\"\"\n )\n )\n sql = \"INSERT INTO alembic_version (version_num) VALUES (:id)\"\n connection.execute(text(sql), {\"id\": versions[\"LATEST_SQLITE_VERSION\"]})\n else:\n # Try and get all entries from alembic_version table\n try:\n with engine.begin() as connection:\n table_contents = connection.execute(\n text(\"SELECT * from pepys.alembic_version;\")\n ).fetchall()\n\n if len(table_contents) == 0:\n # Table exists but no version number row, so stamp it:\n sql = \"INSERT INTO pepys.alembic_version (version_num) VALUES (:id)\"\n connection.execute(text(sql), {\"id\": versions[\"LATEST_POSTGRES_VERSION\"]})\n if len(table_contents) == 1:\n if table_contents[0][0] == versions[\"LATEST_POSTGRES_VERSION\"]:\n # Current version already stamped in table - so just continue\n print(\n \"Initialising database - alembic version in database matches latest version.\"\n )\n else:\n # The version in the database doesn't match the current version - so raise an error\n raise ValueError(\n f\"Database revision in alembic_version table ({table_contents[0][0]}) does not match latest revision ({versions['LATEST_POSTGRES_VERSION']}).\"\n \"Please run database migration.\"\n )\n if len(table_contents) > 1:\n raise ValueError(\n \"Multiple rows detected in alembic_version table. Database potentially in inconsistent state.\"\n \"Migration functionality will not work. Please contact support.\"\n )\n except (sqlalchemy.exc.OperationalError, sqlalchemy.exc.ProgrammingError):\n # Error running select, so table doesn't exist - create it and stamp the current version\n with engine.begin() as connection:\n connection.execute(\n text(\n \"\"\"\n CREATE TABLE IF NOT EXISTS pepys.alembic_version\n (\n version_num VARCHAR(32) NOT NULL,\n CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num)\n );\n \"\"\"\n )\n )\n sql = \"INSERT INTO pepys.alembic_version (version_num) VALUES (:id)\"\n connection.execute(text(sql), {\"id\": versions[\"LATEST_POSTGRES_VERSION\"]})\n\n\ndef cache_results_if_not_none(cache_attribute):\n def real_decorator(f):\n def helper(self, name):\n cache = eval(\"self.\" + cache_attribute)\n if name not in cache:\n result = f(self, name)\n if result:\n self.session.expunge(result)\n cache[name] = result\n return result\n else:\n return cache[name]\n\n return helper\n\n return real_decorator\n\n\ndef shorten_uuid(id): # pragma: no cover\n return str(id)[-6:]\n\n\nclass MissingDataException(Exception):\n pass\n\n\ndef lowercase_or_none(obj):\n if obj is None:\n return None\n else:\n return obj.lower()\n\n\ndef chunked_list(lst, size):\n \"\"\"Split a list into multiple chunks of length size.\n Returns a list containing sublists of length size.\n\n If the list doesn't divide by size exactly, then the\n last sublist will have a length < size.\n \"\"\"\n # Quick 'short-circuit' for a list less than size\n if len(lst) < size:\n return [lst]\n\n n_chunks = ceil(len(lst) / size)\n\n # We're returning a list containing lots of sublists\n # rather than yielding items as a generator\n # This is because we use a tqdm progress bar around this\n # function, and that needs to know the number of sublists\n # to be able to show a proper progress bar\n result = []\n\n for i in range(n_chunks):\n result.append(lst[i * size : (i + 1) * size])\n\n return result\n\n\ndef convert_edit_dict_columns(edit_dict, table_object):\n update_dict = {}\n # Convert the edit_dict we get from the GUI into a dict suitable for use in the update function\n # This involves converting any relationship columns into their ID column\n for col_name, new_value in edit_dict.items():\n attr_from_db_class = getattr(table_object, col_name)\n try:\n if isinstance(\n attr_from_db_class.prop, sqlalchemy.orm.relationships.RelationshipProperty\n ):\n local_column = list(attr_from_db_class.prop.local_columns)[0].key\n update_dict[local_column] = new_value\n else:\n update_dict[col_name] = new_value\n except Exception:\n update_dict[col_name] = new_value\n\n return update_dict\n\n\ndef convert_objects_to_ids(items, table_obj):\n if isinstance(items, list):\n new_id_list = []\n for value in items:\n if not isinstance(value, uuid.UUID):\n value = getattr(value, get_primary_key_for_table(table_obj))\n new_id_list.append(value)\n\n return new_id_list\n else:\n if not isinstance(items, uuid.UUID):\n value = getattr(items, get_primary_key_for_table(table_obj))\n else:\n value = items\n return value\n\n\ndef read_version_from_pepys_install(path):\n init_path = os.path.join(path, \"pepys_import\", \"__init__.py\")\n\n try:\n with open(init_path, \"r\") as f:\n for line in f:\n if \"__version__\" in line:\n splitted = line.split(\"=\")\n # Remove whitespace, double-quotes and single-quotes from either end\n version = splitted[1].strip().strip('\"').strip(\"'\")\n return version\n return None\n except Exception:\n print(f\"WARNING: Cannot read Pepys version from network master install at {path}\")\n return None\n", "repo_name": "debrief/pepys-import", "sub_path": "pepys_import/utils/data_store_utils.py", "file_name": "data_store_utils.py", "file_ext": "py", "file_size_in_byte": 18573, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "paths.PEPYS_IMPORT_DIRECTORY", "line_number": 22, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "inspect.getfullargspec", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 39, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 43, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 44, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 59, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 60, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 62, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 62, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 64, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 65, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 71, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 75, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 76, "usage_type": "call"}, {"api_name": "pepys_import.utils.table_name_utils.table_name_to_class_name", "line_number": 92, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 98, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 98, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 99, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 100, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 115, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 115, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 116, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 117, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 127, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 127, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 128, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 129, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 140, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 140, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 141, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 142, "usage_type": "call"}, {"api_name": "pepys_import.resolvers.command_line_input.create_menu", "line_number": 170, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 178, "usage_type": "call"}, {"api_name": "sqlalchemy.inspect", "line_number": 187, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 203, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 203, "usage_type": "call"}, {"api_name": "sqlalchemy.inspect", "line_number": 210, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 212, "usage_type": "call"}, {"api_name": "sqlalchemy.func.InitSpatialMetaData", "line_number": 212, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 212, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 223, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 231, "usage_type": "call"}, {"api_name": "os.path", "line_number": 231, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path", "line_number": 232, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 244, "usage_type": "call"}, {"api_name": "paths.MIGRATIONS_DIRECTORY", "line_number": 244, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 244, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 245, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.custom_print_formatted_text", "line_number": 247, "usage_type": "call"}, {"api_name": "pepys_import.utils.text_formatting_utils.format_error_message", "line_number": 247, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 255, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 261, "usage_type": "call"}, {"api_name": "sqlalchemy.exc", "line_number": 279, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 283, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 294, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 300, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 306, "usage_type": "call"}, {"api_name": "sqlalchemy.exc", "line_number": 324, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 328, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.text", "line_number": 339, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 386, "usage_type": "call"}, {"api_name": "sqlalchemy.orm", "line_number": 409, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pepys_import.utils.sqlalchemy_utils.get_primary_key_for_table", "line_number": 426, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 431, "usage_type": "attribute"}, {"api_name": "pepys_import.utils.sqlalchemy_utils.get_primary_key_for_table", "line_number": 432, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 439, "usage_type": "call"}, {"api_name": "os.path", "line_number": 439, "usage_type": "attribute"}]} +{"seq_id": "22251840085", "text": "import logging\nimport sys\nimport configparser\nimport multiprocessing as mp\nimport database_utils\nfrom evaluation import evaluate_scenario\nfrom approaches.single_best_solver import SingleBestSolver\nfrom approaches.oracle import Oracle\nfrom approaches.survival_forests.surrogate import SurrogateSurvivalForest\nfrom approaches.survival_forests.auto_surrogate import SurrogateAutoSurvivalForest\nfrom baselines.per_algorithm_regressor import PerAlgorithmRegressor\nfrom baselines.multiclass_algorithm_selector import MultiClassAlgorithmSelector\nfrom baselines.sunny import SUNNY\nfrom baselines.snnap import SNNAP\nfrom baselines.isac import ISAC\nfrom baselines.satzilla11 import SATzilla11\nfrom baselines.satzilla07 import SATzilla07\nfrom sklearn.linear_model import Ridge\nfrom par_10_metric import Par10Metric\nfrom number_unsolved_instances import NumberUnsolvedInstances\n\n\nlogger = logging.getLogger(\"run\")\nlogger.addHandler(logging.StreamHandler())\n\n\ndef initialize_logging():\n logging.basicConfig(filename='logs/log_file.log', filemode='w',\n format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)\n\n\ndef load_configuration():\n config = configparser.ConfigParser()\n config.read_file(open('conf/experiment_configuration.cfg'))\n return config\n\n\ndef print_config(config: configparser.ConfigParser):\n for section in config.sections():\n logger.info(str(section) + \": \" + str(dict(config[section])))\n\n\ndef log_result(result):\n logger.info(\"Finished experiements for scenario: \" + result)\n\n\ndef create_approach(approach_names):\n approaches = list()\n for approach_name in approach_names:\n if approach_name == 'sbs':\n approaches.append(SingleBestSolver())\n if approach_name == 'oracle':\n approaches.append(Oracle())\n if approach_name == 'ExpectationSurvivalForest':\n approaches.append(SurrogateSurvivalForest(criterion='Expectation'))\n if approach_name == 'PolynomialSurvivalForest':\n approaches.append(SurrogateSurvivalForest(criterion='Polynomial'))\n if approach_name == 'GridSearchSurvivalForest':\n approaches.append(SurrogateSurvivalForest(criterion='GridSearch'))\n if approach_name == 'ExponentialSurvivalForest':\n approaches.append(SurrogateSurvivalForest(criterion='Exponential'))\n if approach_name == 'SurrogateAutoSurvivalForest':\n approaches.append(SurrogateAutoSurvivalForest())\n if approach_name == 'PAR10SurvivalForest':\n approaches.append(SurrogateSurvivalForest(criterion='PAR10'))\n if approach_name == 'per_algorithm_regressor':\n approaches.append(PerAlgorithmRegressor())\n if approach_name == 'imputed_per_algorithm_rf_regressor':\n approaches.append(PerAlgorithmRegressor(impute_censored=True))\n if approach_name == 'imputed_per_algorithm_ridge_regressor':\n approaches.append(PerAlgorithmRegressor(\n scikit_regressor=Ridge(alpha=1.0), impute_censored=True))\n if approach_name == 'multiclass_algorithm_selector':\n approaches.append(MultiClassAlgorithmSelector())\n if approach_name == 'sunny':\n approaches.append(SUNNY())\n if approach_name == 'snnap':\n approaches.append(SNNAP())\n if approach_name == 'satzilla-11':\n approaches.append(SATzilla11())\n if approach_name == 'satzilla-07':\n approaches.append(SATzilla07())\n if approach_name == 'isac':\n approaches.append(ISAC())\n return approaches\n\n\n#######################\n# MAIN #\n#######################\n\ninitialize_logging()\nconfig = load_configuration()\nlogger.info(\"Running experiments with config:\")\nprint_config(config)\n\n#fold = int(sys.argv[1])\n#logger.info(\"Running experiments for fold \" + str(fold))\n\ndb_handle, table_name = database_utils.initialize_mysql_db_and_table_name_from_config(\n config)\ndatabase_utils.create_table_if_not_exists(db_handle, table_name)\n\namount_of_cpus_to_use = int(config['EXPERIMENTS']['amount_of_cpus'])\npool = mp.Pool(amount_of_cpus_to_use)\n\n\nscenarios = config[\"EXPERIMENTS\"][\"scenarios\"].split(\",\")\napproach_names = config[\"EXPERIMENTS\"][\"approaches\"].split(\",\")\namount_of_scenario_training_instances = int(\n config[\"EXPERIMENTS\"][\"amount_of_training_scenario_instances\"])\ntune_hyperparameters = bool(int(config[\"EXPERIMENTS\"][\"tune_hyperparameters\"]))\n\nfor fold in range(1, 11):\n\n for scenario in scenarios:\n approaches = create_approach(approach_names)\n\n if len(approaches) < 1:\n logger.error(\"No approaches recognized!\")\n for approach in approaches:\n metrics = list()\n metrics.append(Par10Metric())\n if approach.get_name() != 'oracle':\n metrics.append(NumberUnsolvedInstances(False))\n metrics.append(NumberUnsolvedInstances(True))\n logger.info(\"Submitted pool task for approach \\\"\" +\n str(approach.get_name()) + \"\\\" on scenario: \" + scenario)\n pool.apply_async(evaluate_scenario, args=(scenario, approach, metrics,\n amount_of_scenario_training_instances, fold, config, tune_hyperparameters), callback=log_result)\n\n #evaluate_scenario(scenario, approach, metrics,\n # amount_of_scenario_training_instances, fold, config, tune_hyperparameters)\n print('Finished evaluation of fold')\n\npool.close()\npool.join()\nlogger.info(\"Finished all experiments.\")\n", "repo_name": "alexandertornede/algorithm_survival_analysis", "sub_path": "survival_tests/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 5614, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 24, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 29, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 33, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 38, "usage_type": "attribute"}, {"api_name": "approaches.single_best_solver", "line_number": 48, "usage_type": "name"}, {"api_name": "approaches.single_best_solver.append", "line_number": 51, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 51, "usage_type": "name"}, {"api_name": "approaches.single_best_solver.SingleBestSolver", "line_number": 51, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 53, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 53, "usage_type": "name"}, {"api_name": "approaches.oracle.Oracle", "line_number": 53, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 55, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 55, "usage_type": "name"}, {"api_name": "approaches.survival_forests.surrogate.SurrogateSurvivalForest", "line_number": 55, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 57, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 57, "usage_type": "name"}, {"api_name": "approaches.survival_forests.surrogate.SurrogateSurvivalForest", "line_number": 57, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 59, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 59, "usage_type": "name"}, {"api_name": "approaches.survival_forests.surrogate.SurrogateSurvivalForest", "line_number": 59, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 61, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 61, "usage_type": "name"}, {"api_name": "approaches.survival_forests.surrogate.SurrogateSurvivalForest", "line_number": 61, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 63, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 63, "usage_type": "name"}, {"api_name": "approaches.survival_forests.auto_surrogate.SurrogateAutoSurvivalForest", "line_number": 63, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 65, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 65, "usage_type": "name"}, {"api_name": "approaches.survival_forests.surrogate.SurrogateSurvivalForest", "line_number": 65, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 67, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 67, "usage_type": "name"}, {"api_name": "baselines.per_algorithm_regressor.PerAlgorithmRegressor", "line_number": 67, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 69, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 69, "usage_type": "name"}, {"api_name": "baselines.per_algorithm_regressor.PerAlgorithmRegressor", "line_number": 69, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 71, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 71, "usage_type": "name"}, {"api_name": "baselines.per_algorithm_regressor.PerAlgorithmRegressor", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 72, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 74, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 74, "usage_type": "name"}, {"api_name": "baselines.multiclass_algorithm_selector.MultiClassAlgorithmSelector", "line_number": 74, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 76, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 76, "usage_type": "name"}, {"api_name": "baselines.sunny.SUNNY", "line_number": 76, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 78, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 78, "usage_type": "name"}, {"api_name": "baselines.snnap.SNNAP", "line_number": 78, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 80, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 80, "usage_type": "name"}, {"api_name": "baselines.satzilla11.SATzilla11", "line_number": 80, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 82, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 82, "usage_type": "name"}, {"api_name": "baselines.satzilla07.SATzilla07", "line_number": 82, "usage_type": "call"}, {"api_name": "approaches.single_best_solver.append", "line_number": 84, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 84, "usage_type": "name"}, {"api_name": "baselines.isac.ISAC", "line_number": 84, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 85, "usage_type": "name"}, {"api_name": "database_utils.initialize_mysql_db_and_table_name_from_config", "line_number": 100, "usage_type": "call"}, {"api_name": "database_utils.create_table_if_not_exists", "line_number": 102, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 105, "usage_type": "call"}, {"api_name": "approaches.single_best_solver", "line_number": 117, "usage_type": "name"}, {"api_name": "approaches.single_best_solver", "line_number": 119, "usage_type": "argument"}, {"api_name": "approaches.single_best_solver", "line_number": 121, "usage_type": "name"}, {"api_name": "par_10_metric.Par10Metric", "line_number": 123, "usage_type": "call"}, {"api_name": "number_unsolved_instances.NumberUnsolvedInstances", "line_number": 125, "usage_type": "call"}, {"api_name": "number_unsolved_instances.NumberUnsolvedInstances", "line_number": 126, "usage_type": "call"}, {"api_name": "evaluation.evaluate_scenario", "line_number": 129, "usage_type": "argument"}]} +{"seq_id": "35036172514", "text": "import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\n\ndata = pd.read_csv('dataset/dataset.csv').sample(frac = 0.01)\nresult = pd.DataFrame()\nresult['WEATHER_DELAY'] = data.iloc[:,6]\ndata = data.iloc[:,0:6] \n\nx_train, x_test, y_train, y_test = train_test_split(data.values, result.values, test_size = 0.2) \n\nsvc = SVC()\nsvc.fit(x_train, y_train.ravel())\nprediction = svc.predict(x_test)\n\n\ndata_df = {\n 'test' : y_test.flatten(),\n 'pred' : prediction.flatten(),\n}\n\ndf = pd.DataFrame(data_df)\ndf.to_csv(\"result.csv\")\n\n\n\n\ncm = confusion_matrix(y_test, prediction)\nsum = 0\nfor i in range(cm.shape[0]):\n sum += cm[i][i]\n \naccuracy = sum/x_test.shape[0] \nprint(accuracy)\n", "repo_name": "nsssayom/flight_delay_predictor", "sub_path": "svc.py", "file_name": "svc.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "4727097831", "text": "import requests\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\n\r\nheaders = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}\r\n\r\nbase_url = 'https://www.qiushibaike.com/8hr/page/'\r\n\r\nfor num in range(1,5):\r\n\tprint('第{}页'.format(num))\r\n\r\n\tr = requests.get(base_url + str(num), headers = headers)\r\n\tcontent = r.text\r\n\tsoup = BeautifulSoup(content, 'lxml') \r\n\r\n\tdivs = soup.find_all(name = 'div' ,attrs={\"class\" : re.compile(r\"article block untagged mb15 typs_[A-z]{0,4}\")});\r\n\r\n\tfor div in divs:\r\n\t\tif div.find_all(class_ = 'thumb'):\r\n\t\t\tcontinue\r\n\t\tjoke = div.span.get_text()\r\n\t\tprint(joke)\r\n\t\tprint('--------------------')\r\n\t\t", "repo_name": "JieWHuang/Python_Crawler_Learning-documents", "sub_path": "01.py", "file_name": "01.py", "file_ext": "py", "file_size_in_byte": 704, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "26299068735", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom newspaper import Article\n\ndef get_summary_of_article(\n url:str,\n): \n try:\n article = Article(url)\n article.download()\n article.parse()\n article.nlp()\n return article.summary\n except Exception:\n return \"\"\n\ndef get_keyword_of_article(\n url:str,\n): \n try:\n article = Article(url)\n article.download()\n article.parse()\n article.nlp()\n return article.keywords\n except Exception:\n return []\n\ndef return_information(\n url: str,\n tag_article: str,\n tag_title: str,\n tag_link: str,\n tag_description: str,\n tag_image: str,\n tag_image_h: str,\n tag_image_w: str,\n tag_image_link: str,\n):\n website = requests.get(url)\n data = {}\n content = website.content\n soup = BeautifulSoup(content, \"xml\")\n items = soup.find_all(tag_article)\n for count, item in enumerate(items):\n\n try:\n title = item.find(tag_title).text\n except AttributeError:\n title = None\n\n try:\n link = item.find(tag_link).text\n except AttributeError:\n link = None\n\n try:\n description = item.find(tag_description).text\n except AttributeError:\n description = None\n\n images = {}\n for count_image, image in enumerate(item.find_all(tag_image)):\n try:\n h = image.get(tag_image_h)\n w = image.get(tag_image_w)\n if h is None or w is None:\n key = count_image\n else:\n key = f\"{h}x{w}\"\n link_image = image.get(tag_image_link)\n images[key] = link_image\n except Exception:\n pass\n\n data[f\"{count}\"] = {\n \"link\": link,\n \"images\": images,\n \"title\": title,\n \"description\": description,\n \"summary\":get_summary_of_article(link),\n \"keyword\":get_keyword_of_article(link)\n }\n\n return data\n", "repo_name": "SamirPS/News-Scrapper", "sub_path": "newsscrapper/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "newspaper.Article", "line_number": 9, "usage_type": "call"}, {"api_name": "newspaper.Article", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "30201223060", "text": "import os\nimport ctypes\nimport flet as ft\nfrom typing import Type\nfrom utils.card import Card\nfrom flet_mvc import FletController\nclass HomeController(FletController):\n def get_name_user(self) -> str:\n size = ctypes.pointer(ctypes.c_ulong(0))\n ctypes.windll.secur32.GetUserNameExW(3, None, size)\n buffer = ctypes.create_unicode_buffer(size.contents.value)\n ctypes.windll.secur32.GetUserNameExW(3, buffer, size)\n full_name = buffer.value\n return full_name\n\n def responsive_cards(self,responsive_row:ft.ResponsiveRow) -> None:\n cards : list = self.model.home_card_data()\n if len(cards) == 0:\n cards : list = self.model.home_card_default()\n for card in cards:\n #-------------------------------------------------------------------------|\n # NO TOMAR EN CUENTA ESTE CODIGO\n # if hasattr(controller,card.get('card_route')): \n # try:\n # card_route = getattr(controller,card.get('card_route'))\n # except Exception as e:\n # card_route = self.error_method\n # else:\n # card_route = self.error_method\n #-----------------------------------------------------------------------|\n responsive_row.controls.append(\n Card(\n page=self.page,\n title=card.get('title'),\n description=card.get('description'),\n icon=card.get('icon'),\n icon_color=card.get('color_icon'),\n route=card.get('route'),\n disabled=card.get('disabled'),\n ).build(self.route_card)\n )\n\n def route_card(self,e: ft.ControlEvent) -> None:\n if os.path.exists(os.environ.get('userprofile')+\"\\Desktop\\Configs\"):\n self.page.go(f'/{e.control.data}')\n self._update\n else:\n self.error_directory\n\n def error_directory(self) -> None:\n error_directory = ft.AlertDialog(\n title=ft.Text(\"Error de directorio\"),\n content=ft.Text(\"No se encuentra la carpeta 'Configs' en el escritorio\"),\n )\n self.page.dialog = error_directory\n error_directory.open = True\n self._update()\n\n def _update(self) -> None:\n self.page.update()\n", "repo_name": "TryNeo/DeskNeo", "sub_path": "controllers/home.py", "file_name": "home.py", "file_ext": "py", "file_size_in_byte": 2386, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flet_mvc.FletController", "line_number": 7, "usage_type": "name"}, {"api_name": "ctypes.pointer", "line_number": 9, "usage_type": "call"}, {"api_name": "ctypes.c_ulong", "line_number": 9, "usage_type": "call"}, {"api_name": "ctypes.windll.secur32.GetUserNameExW", "line_number": 10, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ctypes.create_unicode_buffer", "line_number": 11, "usage_type": "call"}, {"api_name": "ctypes.windll.secur32.GetUserNameExW", "line_number": 12, "usage_type": "call"}, {"api_name": "ctypes.windll", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flet.ResponsiveRow", "line_number": 16, "usage_type": "attribute"}, {"api_name": "utils.card.Card", "line_number": 32, "usage_type": "call"}, {"api_name": "flet.ControlEvent", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 44, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flet.AlertDialog", "line_number": 51, "usage_type": "call"}, {"api_name": "flet.Text", "line_number": 52, "usage_type": "call"}, {"api_name": "flet.Text", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "14730548980", "text": "\nfrom tkinter import Frame\nimport cv2 ,random,socket,base64,imutils,time\n\n\nclass UDP_Streamer():\n def __init__(self) -> None:\n self.BUFF_SIZE = 65535\n self.server_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF,self.BUFF_SIZE)\n self.host_name = socket.gethostname()\n self.host_ip = socket.gethostbyname(self.host_name) #192.168.1.102# \n self.client_add=[]\n self.client_add.append( (\"127.0.0.1\",5051) )\n print(\"print Host IP\",self.host_ip)\n self.port = 9999\n self.img_size= (400,600)\n self.socket_address = (self.host_ip,self.port)\n\n def set_frame_size(self,size):\n self.img_size=size\n\n def set_client_add(self,client_add):\n self.client_add.append( client_add )\n\n def send_frame(self,frame):\n try:\n self.encode_frame(frame)\n self.create_packet()\n self.send_buffer()\n except Exception as e:\n print(\"send frame\",e)\n \n def encode_frame(self,frame):\n self.alpha = 1.2 # Contrast control (1.0-3.0)\n self.beta = 0 # Brightness control (0-100)\n self.full_frame=frame\n self.small_frame=cv2.resize(self.full_frame,self.img_size)\n self.frame = cv2.convertScaleAbs(self.small_frame, alpha=self.alpha, beta=self.beta)\n #self.frame = imutils.resize(self.frame,width=self.width)\n encoded,self.buffer = cv2.imencode('.jpg',self.frame,[cv2.IMWRITE_JPEG_QUALITY,80])\n self.buffer = base64.b64encode(self.buffer)\n \n def create_packet(self):\n header=format(0x010203)\n tail=format(0x0405060)\n header=header.encode('utf-8')\n tail=tail.encode('utf-8')\n self.packet= header+self.buffer+tail\n \n def send_buffer(self):\n print(\"packet len \",len(self.packet)) \n total=0\n for y in range(0,len(self.packet),60000):\n chunk=self.packet[y:y+60000]\n #print(udp_buffer)\n total=total+len(chunk)\n for cl in self.client_add:\n try:\n self.server_socket.sendto(chunk,cl)\n except Exception as e:\n print(\"Error in send frame to \",cl,e)\n\n time.sleep(0.01)\n\nif __name__==\"__main__\":\n\n img=cv2.imread(\"test.jpg\")\n \n streamer=UDP_Streamer()\n streamer.set_frame_size((600,400))\n #streamer.encode_frame(img)\n #streamer.create_packet()\n \n vid_file=\"traffic.mp4\"\n vid=cv2.VideoCapture(vid_file)\n\n while True:\n ret,frame=vid.read()\n if ret:\n streamer.send_frame(frame)\n cv2.imshow(\"streamer\",frame)\n cv2.waitKey(1)\n \n #streamer.send_buffer()\n #streamer.send_frame(img)\n #time.sleep(1)\n \n", "repo_name": "Alkasabi/Video-Streamer", "sub_path": "UDP_Server.py", "file_name": "UDP_Server.py", "file_ext": "py", "file_size_in_byte": 2853, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "socket.socket", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 9, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 9, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 10, "usage_type": "attribute"}, {"api_name": "socket.SO_RCVBUF", "line_number": 10, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "42573528478", "text": "from copy import deepcopy\n\nfrom ordered_set import OrderedSet\n\nfrom ..mixin import IdentifiedMixIn, PropertyContainer\nfrom .tree import Tree\n\n\nclass Label(PropertyContainer, IdentifiedMixIn):\n \"\"\"A |Label| class which acts as a |Tree| node and stores a name.\n\n The :attr:`name` is assumed to be the |Label| true content and its :func:`hash` value will reflect that.\n\n The |Label| acts as the core body of any |Tree| creation and modification. In fact, |Label| are what defines\n **actual** trees of which the |Tree| class acts as **views**.\n\n To define a tree, simply links two |Label| together:\n\n .. code-block:: python\n\n >>> label_a = Label('a')\n >>> label_b = Label('b')\n >>> label_a.add(label_b) # We have a tree !\n >>> print(Tree(label_a))\n a\n ╰── b\n >>> label_c = Label('c')\n >>> label_c.attach(label_a)\n >>> print(Tree(label_a))\n a\n ├── b\n ╰── c\n\n .. hint::\n\n Note that a single |Label| is a valid |Tree| in itself with a single element. More generally, any |Label|\n defines its own |Tree|, which is called the |Label|'s `clade`_.\n\n Because |Tree| are dynamic in nature, any attach between two |Label| can be undone with the :meth:`detach` method,\n *e.g.*:\n\n .. code-block:: python\n\n >>> label_a.detach(label_c)\n >>> print(Tree(label_a))\n a\n ╰── b\n >>> label_b.detach(label_a)\n >>> print(Tree(label_a))\n a\n\n Args:\n name (str): The |Label| name. This is the actual value that is \"tagged\" on |Record|.\n id (str): Optional. Default to a random *UUID4*. An id to store along the instance.\n parent (|Label|): Optional. Default to ``None``. A |Label| to be registered as self's parent.\n children (Iterable[|Label|]): Optional. Default to ``{}``. An iterable of |Label| to be registered as\n self's children.\n **properties (Any): Additional properties to store alongside the |Label|.\n\n .. _clade: https://en.wikipedia.org/wiki/Clade\n\n Attributes:\n properties (dict): Properties provided as kwargs in the constructor.\n\n \"\"\"\n\n __slots__ = '_parent', '_children', '_name', '_hash', '_depth', '_ancestors', '_descendants'\n\n def __new__(cls, name, *args, **kwargs):\n \"\"\"Create a new hashable |Label| instance.\"\"\"\n label = super(Label, cls).__new__(cls)\n label._name = name\n label._hash = None\n return label\n\n def __init__(self, name, id=None, parent=None, children=None, **properties):\n super(Label, self).__init__(id=id, **properties)\n\n self._children = OrderedSet()\n self._parent = None\n self._depth = 0\n self._ancestors = ()\n self._descendants = {}\n\n # Setup attributes\n self._name = name\n\n # Setup hash\n self._hash = None\n\n # Setup Label position in Label tree\n self.parent = parent\n self.children = children\n\n @property\n def name(self):\n \"\"\"str: The |Label| name. This is the actual value that is \"tagged\" on |Record|.\n\n Note:\n It is a *read-only* property because a |Label| is assumed to be immutable.\n\n \"\"\"\n return self._name\n\n @property\n def id(self):\n \"\"\"str: An identifier unique for all |Label| instance *independent* on content.\"\"\"\n return self._id\n\n @property\n def parent(self):\n \"\"\"|Label|: A |Label| registered as self's parent or ``None`` if self is a tree root.\n\n Using the property's *setter* is equivalent to a call of :meth:`attach`.\n\n \"\"\"\n return self._parent\n\n @parent.setter\n def parent(self, label):\n if label is None:\n return\n if self._parent is not None:\n self.detach(self._parent)\n self.attach(label)\n\n @property\n def children(self):\n \"\"\"list: A list of |Label| which are registered as self's children.\n\n Using the property's *setter* is equivalent to a joint call of :meth:`detach(self.children) `\n and :meth:`add`.\n\n \"\"\"\n return self._children\n\n @children.setter\n def children(self, labels):\n if labels is None or not labels:\n return\n\n self.detach(*self._children)\n self.add(*labels)\n\n @property\n def ancestors(self):\n \"\"\"(|Label|, ): All of self's ancestors up to the root, going upward from :attr:`parent`.\"\"\"\n return self._ancestors\n\n @property\n def descendants(self): # noqa: D401\n \"\"\"dict: All of self's descendants as a flattened dictionary of the form :code:`{name: label}`.\"\"\"\n return self._descendants\n\n @property\n def depth(self):\n \"\"\"int: The |label| absolute depth in the global implicit label tree.\"\"\"\n return self._depth\n\n def __getnewargs__(self):\n \"\"\"Pass name and identifier as :meth:`~object.__new__` arguments to pickle.\"\"\"\n return self._name,\n\n def __setstate__(self, state):\n \"\"\"Set attributes values from a dictionary of all slotted and in dictionary attributes.\"\"\"\n for key, value in state.items():\n if key == '_hash':\n value = None\n setattr(self, key, value)\n\n def __deepcopy__(self, memo=None):\n \"\"\"Construct a deep copy of a :class:`SlottedDict`.\"\"\"\n memo = {} if memo is None else memo\n\n # Make semi-empty Label\n cls = self.__class__\n result = cls.__new__(cls, deepcopy(self._name, memo))\n result._properties = deepcopy(self._properties, memo)\n result._id = deepcopy(self._id, memo)\n result._name = deepcopy(self._name, memo)\n result._hash = None\n result._depth = deepcopy(self._depth, memo)\n result._children = OrderedSet()\n result._parent = None\n result._ancestors = ()\n result._descendants = {}\n\n # Update memo with semi-empty Label\n memo[id(self)] = result\n\n # Recursively copy Label tree\n result._children = deepcopy(self._children, memo)\n result._descendants = deepcopy(self._descendants, memo)\n result._parent = deepcopy(self._parent, memo)\n result._ancestors = deepcopy(self._ancestors, memo)\n\n return result\n\n def attach(self, label):\n \"\"\"Attach a parent |Label| to self.\n\n Args:\n label (|Label|): A |Label| to register as self's parent.\n\n \"\"\"\n if label is None or label == self.parent:\n return\n\n label.add(self)\n\n # If for some reason (most probably name collision) nothing happened in label realm we'd better back off too...\n if self.id not in {_label.id for _label in label.children}: # True id match to avoid name collision here\n return\n\n self._parent = label\n self.update_ancestry()\n\n def add(self, *labels):\n \"\"\"Attach a child or a group of children |Label| to self.\n\n Args:\n *labels (|Label|): A |Label| to register as self's child.\n\n \"\"\"\n for label in labels:\n if label in self._children:\n continue\n\n if label == self:\n raise ValueError('Invalid tree: adding {} to {}\\'s tree is impossible.'.format(label, self))\n\n self_root = self._ancestors[-1] if self._ancestors else self\n label_root = label.ancestors[-1] if label.ancestors else label\n overlap = dict({self_root.name: self_root}, **self_root.descendants).keys() \\\n & dict({label_root.name: label_root}, **label_root.descendants).keys()\n\n if overlap:\n raise ValueError('Invalid tree: Overlapping tree {} found in:\\n'\n '{}\\n{}.'.format(overlap,\n Tree(self_root),\n Tree(label_root)))\n\n self._children.add(label)\n self.update_descent()\n\n label.attach(self)\n\n def detach(self, *labels):\n \"\"\"Detach one or a group of |Label| as self's parent or child.\n\n If a |Label| was never attached to self to begin with, it is silently ignored.\n\n Args:\n *labels (|Label|): A |Label| to be detached as either self's parent or self's child.\n\n \"\"\"\n for label in labels:\n if label == self._parent:\n # If it is the parent we force remove it (by-passing the property which would lead to recursion)\n self._parent = None\n self.update_ancestry()\n label.detach(self)\n elif label in self._children:\n # If it is a child we force remove it (by-passing the property which would lead to recursion)\n self._children.remove(label)\n self.update_descent()\n label.detach(self)\n # If it is nothing we silently ignore it (thus ending the recursion)\n\n def __repr__(self):\n \"\"\"Pythonic representation of the object.\"\"\"\n return '{}(name={})'.format(self.__class__.__name__, self.name)\n\n def __str__(self):\n \"\"\"Return the |Label| :attr:`name`.\"\"\"\n return self.name\n\n def __hash__(self):\n \"\"\"Return the python hash of the |Label| :attr:`name`.\"\"\"\n if self._hash is None:\n self._hash = hash(self._name)\n return self._hash\n\n def __eq__(self, other):\n \"\"\"Return whether two |Label| have the same name and id.\n\n Args:\n other (|Label|): Another |Label| to compare itself to.\n\n Returns:\n bool: ``True`` if ``other`` is a |Label| and have the same name as self, or is a :class:`str` which equals\n :attr:`name`.\n\n \"\"\"\n try:\n return self.name == other.name\n except AttributeError:\n return self.name == other\n\n def __ne__(self, other):\n \"\"\"Return whether two |Label| do not have the same name and id.\n\n Args:\n other (|Label|): Another |Label| to compare itself to.\n\n Returns:\n bool: ``True`` if ``other`` is not a |Label| or do not have the same name as self, or is a :class:`str`\n which does not equal :attr:`name`.\n\n \"\"\"\n return not self == other\n\n def last_common_ancestor(self, other):\n \"\"\"Return the *most downward* |Label| encountered in self and other :attr:`ancestors`.\n\n Args:\n other (|Label|): Another |Label| to compare ancestors with.\n\n Returns:\n |Label|: The `last common parent`_, or ``None`` if both |Label| share no family.\n\n\n .. _last common parent: https://en.wikipedia.org/wiki/Most_recent_common_ancestor\n\n \"\"\"\n family = OrderedSet((self, ) + self._ancestors).intersection(OrderedSet((other, ) + other.ancestors))\n if family:\n family = sorted(family,\n key=lambda x: ((self, ) + self._ancestors).index(x))\n return family[0]\n return None\n\n def clade(self, other):\n \"\"\"Return the common `clade`_ enclosing both self and other.\n\n Args:\n other (|Label|): Another |Label| to compare ancestors with.\n\n Returns:\n |Tree|: A |Tree| rooted in self and other :meth:`last_common_ancestor`, effectively representing\n their common `clade`_, or ``None`` if both |Label| share no family.\n\n .. _clade: https://en.wikipedia.org/wiki/Clade\n\n \"\"\"\n last_common_ancestor = self.last_common_ancestor(other)\n if last_common_ancestor is None:\n return None\n\n return Tree(last_common_ancestor, type='clade', enclosed=(self, other))\n\n def update_ancestry(self):\n \"\"\"Update :attr:`ancestors` attribute alongside the label's `clade`_.\n\n .. _clade: https://en.wikipedia.org/wiki/Clade\n\n \"\"\"\n if self.parent is None:\n self._ancestors = ()\n else:\n self._ancestors = (self.parent,) + self.parent.ancestors\n self._depth = len(self._ancestors)\n for child in self._children:\n child.update_ancestry()\n\n def update_descent(self):\n \"\"\"Update :attr:`descendants` attribute alongside the label's ancestry.\"\"\"\n self._descendants = {}\n for child in self._children:\n self._descendants[child.name] = child\n self._descendants.update(child.descendants)\n if self._parent is not None:\n self._parent.update_descent()\n", "repo_name": "airbusgeo/playground-plums", "sub_path": "plums/commons/data/taxonomy/label.py", "file_name": "label.py", "file_ext": "py", "file_size_in_byte": 12532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mixin.PropertyContainer", "line_number": 9, "usage_type": "name"}, {"api_name": "mixin.IdentifiedMixIn", "line_number": 9, "usage_type": "name"}, {"api_name": "ordered_set.OrderedSet", "line_number": 79, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 177, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 178, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 179, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 180, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 182, "usage_type": "call"}, {"api_name": "ordered_set.OrderedSet", "line_number": 183, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 192, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 193, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 194, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 195, "usage_type": "call"}, {"api_name": "tree.Tree", "line_number": 240, "usage_type": "call"}, {"api_name": "tree.Tree", "line_number": 241, "usage_type": "call"}, {"api_name": "ordered_set.OrderedSet", "line_number": 326, "usage_type": "call"}, {"api_name": "tree.Tree", "line_number": 350, "usage_type": "call"}]} +{"seq_id": "2459729307", "text": "import numpy as np\nimport pandas as pd\nfrom functools import reduce\nimport seaborn as sns\nfrom matplotlib import pyplot\n\ndef hap_load_and_process(url_or_path_to_csv_file, rename_dict,final_list):\n\n # Method Chain 1 (Load data and deal with missing data)\n\n df1 = (\n pd.read_csv(url_or_path_to_csv_file)\n .rename(columns=rename_dict)\n #.dropna()\n # etc...\n )\n\n # Method Chain 2 (Create new columns, drop others, and do processing)\n\n df2 = (\n df1\n #.assign(status=lambda x: np.where((x.period > 2014), 1, 0))\n .sort_values(\"country\", ascending=True)\n .reset_index(drop=True)\n .loc[:, final_list]\n )\n\n # Make sure to return the latest dataframe\n\n return df2 \n\ndef ind_load_and_process(url_or_path_to_csv_file, ind):\n\n # Method Chain 1 (Load data and deal with missing data)\n\n df1 = (\n pd.read_csv(url_or_path_to_csv_file)\n .rename(columns={\"Location\":\"country\",\"Period\":\"period\",\"Value\":ind})\n #.dropna()\n # etc...\n )\n\n # Method Chain 2 (Create new columns, drop others, and do processing)\n\n df2 = (\n df1\n .assign(status=lambda x: np.where((x.period > 2014), 1, 0))\n .sort_values(['country','period'],ascending=[True, True], ignore_index=True)\n .reset_index(drop=True)\n .loc[:, [\"country\", \"period\", ind,'status']]\n )\n\n # Make sure to return the latest dataframe\n\n return df2 \n\ndef merge_hap(data_frames):\n \n merged= reduce(lambda left,right: pd.merge(left,right,on=['country'],\n how='inner'), data_frames)\n return merged\n \ndef filter_years(df):\n #df['status'] = df.apply(keep_or_discard,axis='columns')\n df=df.loc[df['status'] == 1]\n df=df.drop(columns=['status'])\n return df\n\ndef edit_data(dataFrame):\n \n numbs=list(dataFrame[list(dataFrame.columns)[2]])\n for string in numbs:\n loc = string.find(' ')\n numbs[numbs.index(string)]=string[0:loc].replace(',','')\n\n return numbs\n\ndef clean_ind(df):\n \n df=filter_years(df)\n df[[list(df.columns)[2]]]=edit_data(df)\n \n return df\n\n\nH21RawData=pd.read_csv(\"../data/raw/world-happiness-report-2021.csv\")\nH19RawData=pd.read_csv('../data/raw/2019.csv')\nH18RawData=pd.read_csv('../data/raw/2018.csv')\nH17RawData=pd.read_csv('../data/raw/2017.csv')\nH16RawData=pd.read_csv('../data/raw/2016.csv')\nH15RawData=pd.read_csv('../data/raw/2015.csv')\nLivCostRawData=pd.read_csv('../data/raw/cost_of_living 2020.csv')\nGDPRawData=pd.read_csv('../data/raw/gdp.csv')\nH20RawData=pd.read_csv('../data/raw/world-happiness-report.csv')\n\n\ndef load_process(path_to_csv):\n# Method Chain 1\n df1=(\n pd.read_csv(path_to_csv)\n .drop(['Family','Freedom'],axis=1)\n .dropna(subset=['Happiness Score'])\n )\n# Method Chain 2 \n df2=(\n df1\n .sort_values('Country')\n .rename(columns={'Country':'Country_Sorted'})\n .reset_index\n \n )\n \n return df2\n \n\ndef process_df(DataFrame):\n df=(DataFrame\n .drop(['Family','Freedom'],axis=1)\n .dropna(subset=['Happiness Score'])\n .reset_index(drop=True)\n .groupby('Region')['Happiness Rank']\n .max()\n .sort_values()\n .to_frame()\n )\n return df\n\n\ndef select_col (df):\n df=df[['Country','Happiness Rank','Happiness Score','Economy (GDP per Capita)','Freedom','Trust (Government Corruption)','Generosity']]\n return (df)\n\n\n\n", "repo_name": "ubco-W2020ST2-data301/project-group16-data-301-project", "sub_path": "analysis/scripts/project_functions.py", "file_name": "project_functions.py", "file_ext": "py", "file_size_in_byte": 3537, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 47, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "19256156437", "text": "# nltk data available at C:\\Users\\marcs\\AppData\\Roaming\\nltk_data\n\n# Sioux Valley Self Government Agreement ATRIS Parser\n# Marc St. Pierre 1/3/2023\n\nimport string\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = requests.get('https://www.rcaanc-cirnac.gc.ca/eng/1385741084467/1551118616967')\nsoup = BeautifulSoup(url.content, 'html.parser')\ndbtitle = 'SVprovisionsdb.csv'\nagreement = 'Sioux Valley Dakota Nation Governance Agreement and Tripartite Governance Agreement'\nyear = '2013'\npart = ''\nsection = ''\nroman = ['i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x']\nalpha = list(string.ascii_lowercase)\nprevEnumType = 'lst-spcd' \nintIndex = 0\nalphaIndex = 0\nromanIndex = 0\nprovNum = 0\ndata = []\n\nfor child in soup.h1.next_elements:\n if child.name == 'h2':\n part = child.get_text()\n section = ''\n if child.name == 'h3':\n section = child.get_text()\n if child.name in ['h4', 'h5', 'h6']:\n alphaIndex = 0\n romanIndex = 0\n intIndex = 0\n prevEnumType = 'lst-spcd'\n try:\n int(child.get_text()[0])\n provNum = (child.get_text().split(' ', 1)[0]).strip('\\n')\n text = \" \".join(child.get_text().split(' ')[1:])\n data.append([provNum, part, section, text])\n except ValueError:\n data.append([provNum, part, section, child.get_text()])\n except IndexError:\n data.append([provNum, part, section, child.get_text()])\n if child.name == 'p':\n alphaIndex = 0\n romanIndex = 0\n intIndex = 0\n prevEnumType = 'lst-spcd'\n data.append([provNum, part, section, child.get_text()])\n if child.name == 'li':\n if 'class' in child.parent.attrs:\n enumType = child.parent.attrs['class'][0]\n if enumType == 'lst-spcd' and prevEnumType == 'lst-spcd':\n intIndex += 1\n data.append([str(provNum) + '(' + str(intIndex) + ')', part, section, child.get_text()])\n elif enumType == 'lst-lwr-alph' and prevEnumType == 'lst-spcd':\n alphaIndex = 0\n data.append([str(provNum) + '(' + alpha[alphaIndex] + ')', part, section, child.get_text()])\n elif enumType == 'lst-lwr-alph' and prevEnumType == 'lst-lwr-alph':\n alphaIndex += 1\n data.append([str(provNum) + '(' + alpha[alphaIndex] + ')', part, section, child.get_text()])\n elif enumType == 'lst-lwr-rmn' and prevEnumType == 'lst-lwr-alph':\n romanIndex = 0\n data.append([str(provNum) + '(' + alpha[alphaIndex] + ')' + '(' + roman[romanIndex] + ')', part, section, child.get_text()])\n elif enumType == 'lst-lwr-rmn' and prevEnumType == 'lst-lwr-rmn':\n romanIndex += 1\n data.append([str(provNum) + '(' + alpha[alphaIndex] + ')' + '(' + roman[romanIndex] + ')', part, section, child.get_text()])\n elif enumType == 'lst-lwr-alph' and prevEnumType == 'lst-lwr-rmn':\n alphaIndex += 1\n data.append([str(provNum) + '(' + alpha[alphaIndex], part, section, child.get_text()])\n elif enumType == 'lst-spcd' and prevEnumType == 'lst-lwr-alph':\n intIndex += 1\n data.append([str(provNum), part, section, child.get_text()])\n elif enumType == 'lst-spcd' and prevEnumType == 'lst-lwr-rmn':\n intIndex += 1\n data.append([str(provNum), part, section, child.get_text()])\n else:\n data.append([provNum, part, section, child.get_text()])\n prevEnumType = enumType\n\nwith open(dbtitle, 'w', newline='') as csvfile:\n provisionswriter = csv.writer(csvfile)\n provisionswriter.writerow(['agreement', 'year', 'provisionNum', 'part', 'section', 'provisionText'])\n for i in data:\n row = [agreement, year] + i\n provisionswriter.writerow(row)\n csvfile.close()\n\n\n# for i in range(100):\n# print(data[i])\n\n# for i in data:\n# print(i)\n\n\n\n", "repo_name": "mkstp/comparative-provisions-generator", "sub_path": "parsers/Old_Parsers/siouxValleyParser.py", "file_name": "siouxValleyParser.py", "file_ext": "py", "file_size_in_byte": 3926, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 19, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "19988121580", "text": "\nimport subprocess\nimport json\nimport time\n\nduration = 100 # seconds\nsleep_time = 0.4 # seconds\n\nmeas_duration = 0\nmeas_list = []\nt0 = time.time()\nwhile meas_duration < duration:\n t = time.time_ns()\n res = subprocess.check_output([\"curl\",\"-s\", \"http://10.42.3.1:50000\"])\n res = res.decode('utf-8').strip()\n print(res)\n res = json.loads(res)\n res['timestamp'] = t\n meas_list.append(res)\n time.sleep(sleep_time)\n meas_duration = time.time()-t0\n\nprint(meas_duration)\n\nfor elem in meas_list:\n elem['RSRP'] = elem.pop('Signal Strength')\n elem['RSRQ'] = elem.pop('Signal Quality')\n new_RSRP = float(elem['RSRP'].split()[0])\n elem['RSRP'] = new_RSRP\n new_RSRQ = float(elem['RSRQ'].split()[0])\n elem['RSRQ'] = new_RSRQ\n\nwith open(\"/tmp/results/ntw_meas_data.json\", \"w\") as outfile:\n json.dump(meas_list, outfile)\n\n\n\n \n\n", "repo_name": "nflores99/measurements-service", "sub_path": "ntw-meas/ntw-meas.py", "file_name": "ntw-meas.py", "file_ext": "py", "file_size_in_byte": 862, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time_ns", "line_number": 13, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 14, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "40557206606", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nfrom matplotlib import animation\nfrom scipy.sparse import csr_matrix\n\nfolder = r'C:\\Users\\thoma\\Documents\\MEGAsync\\Cours\\Maths\\10_1_ALN\\Projet\\gif'\n\nR, tmax = 0.065, 60. # en mètres, en secondes\nNx = 1_000 # 35, 1000 stable | 35, 978 instable\nD = 98.8e-6 # Diffusivité thermique de l'aluminium\nTmax = 80 # °C\nTamb = 20 # °C\n\ndx = R/(Nx+1)\ndt = dx**2/(2*D)\nNt = int(tmax/dt)\n\nbeta = D*dt/dx**2\nwhile beta >= 1/2:\n Nt += 1\n dt = tmax/(Nt + 1)\n beta = D * dt / dx ** 2\n\nNt = 2_811_756 # Nt -= 13\ndt = tmax/(Nt + 1)\nbeta = D * dt / dx ** 2\nprint(beta, Nx, Nt)\n\n# Matrice Euler explicite\nM = np.array([[0. for _ in range(Nx+2)] for _ in range(Nx+2)])\n\nM[0][0], M[Nx+1][Nx+1] = 1, 1 # Condition aux bornes\nfor i in range(1, Nx+1):\n M[i][i] = 1-2.*beta\n\nfor i in range(1, Nx+1):\n M[i][i+1] = beta\n\nfor i in range(Nx):\n M[i+1][i] = beta\n\nsparceM = csr_matrix(M)\n# print(M)\n\n# On pose les valeurs de la fonction f en les x_i.\nB = np.transpose(np.array([Tamb for _ in range(Nx+2)]))\nB[0] = Tmax\n\n# print(B)\n\n# Préparation de l'affichage graphique\nx_i = [i * dx * 100 for i in range(Nx + 2)] # Positions des points à l'intérieur de l'intervalle\n# print(x_i)\nit = np.linspace(start=0., stop=1., num=10_000 + 1, endpoint=True)\n\n# Itérations\n\nfig = plt.figure()\nline, = plt.plot([], [])\nplt.xlim(0, R*100)\nplt.ylim(Tamb, Tmax)\n#plt.xlim(2.0, 2.1)\n#plt.ylim(58.5, 63.5)\nplt.xlabel('Position (cm)')\nplt.ylabel('Temperature (°C)')\n\nfor i in range(Nt):\n X = sparceM @ B\n B = X.copy()\n #if i % 50_000 == 0:\n # plt.plot(x_i, X, 'b')\nplt.plot(x_i, X, 'b,', label=\"Solution approchée\")\nplt.plot(x_i, [Tmax-(Tmax-Tamb)*x/(R*100) for x in x_i], \"k\", label=\"Solution exacte\")\nplt.legend()\nplt.show()\n\n'''\ndef animate(i):\n global M, B\n X = sparceM @ B # X = np.linalg.solve(M, B)\n # print(X)\n\n if i % 1_000 == 999:\n plt.plot(x_i, X, 'b')\n # line.set_data(x_i, X)\n B = X.copy()\n\n # filename = folder+'/{:0>9}.png'.format(i)\n\n # save frame\n # plt.savefig(filename)\n\n return line,\n\n\nani = animation.FuncAnimation(fig, animate, frames=Nt, blit=True, interval=.5, repeat=False)\n\n# time.sleep(5)\nplt.show()\n'''", "repo_name": "UnderscorePoY/L3_Python", "sub_path": "AlgebreLineaireNumerique/Projet_ALN/main_anim.py", "file_name": "main_anim.py", "file_ext": "py", "file_size_in_byte": 2231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "9735407078", "text": "from weaviate import Client\n\nimport weaviate\nimport json\nimport sys\n\ndef initializeClient(weaviateUrl: str) -> Client:\n return Client(weaviateUrl)\n\n\ndef createSchemas(client: Client, schemaPath: str):\n with open(schemaPath, \"r\") as schemaFile:\n schemaClasses = json.load(schemaFile)\n schemaClassNames = (schemaClass[\"class\"] for schemaClass in schemaClasses[\"classes\"])\n schemas = client.schema.get()\n existingSchemaClassNames = (schemaClass[\"class\"] for schemaClass in schemas[\"classes\"])\n\n duplicateExists = False\n\n for existingClass in schemas[\"classes\"]:\n for classToCreate in schemaClasses[\"classes\"]:\n if existingClass[\"class\"] == classToCreate[\"class\"]:\n duplicateExists = True\n break\n\n if schemas[\"classes\"] is not None and not duplicateExists:\n print(\"Creating\", \",\".join(schemaClassNames), \"schema(s)\")\n client.schema.create(schemaPath)\n else:\n print(\"Trying to create classes:\", \",\".join(schemaClassNames))\n print(\"These classes already exist:\", \",\".join(existingSchemaClassNames))\n print(\"Skipped creating any new schema\")\n\n\ntry:\n weaviateServerAddress = sys.argv[1].strip()\nexcept IndexError:\n weaviateServerAddress = None\n\ntry:\n weaviateSchema = sys.argv[2].strip()\nexcept IndexError:\n weaviateSchema = None\n\n\n\nif weaviateServerAddress is None or weaviateServerAddress == \"\" or weaviateSchema is None or weaviateSchema == \"\":\n print(\"Missing argument. Usage:\")\n print(\"python3 weaviate-setup.py \")\n exit(0)\n\n\nprint(f\"Using weaviate server: {weaviateServerAddress}\")\nprint(f\"Using weaviate schema from: {weaviateSchema}\")\n\nclient = initializeClient(weaviateUrl = weaviateServerAddress)\n\ncreateSchemas(client, schemaPath = weaviateSchema)\n\n", "repo_name": "arconsis/vector-search", "sub_path": "ImageSearchCoreService/assets/weaviate-setup.py", "file_name": "weaviate-setup.py", "file_ext": "py", "file_size_in_byte": 1864, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "weaviate.Client", "line_number": 8, "usage_type": "call"}, {"api_name": "weaviate.Client", "line_number": 7, "usage_type": "name"}, {"api_name": "weaviate.Client", "line_number": 11, "usage_type": "name"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "4649332739", "text": "from __future__ import absolute_import\n\nimport decimal\n\nfrom django.forms.fields import DecimalField\nfrom django.utils.translation import gettext_lazy as _\n\nfrom fobi.base import FormFieldPlugin, get_theme\nfrom fobi.widgets import NumberInput\n\nfrom . import UID\nfrom .forms import DecimalInputForm\n\n__title__ = \"fobi.contrib.plugins.form_elements.fields.decimal.base\"\n__author__ = \"Artur Barseghyan \"\n__copyright__ = \"2014-2019 Artur Barseghyan\"\n__license__ = \"GPL 2.0/LGPL 2.1\"\n__all__ = (\"DecimalInputPlugin\",)\n\ntheme = get_theme(request=None, as_instance=True)\n\n\nclass DecimalInputPlugin(FormFieldPlugin):\n \"\"\"Decimal input plugin.\"\"\"\n\n uid = UID\n name = _(\"Decimal\")\n group = _(\"Fields\")\n form = DecimalInputForm\n\n def get_form_field_instances(\n self, request=None, form_entry=None, form_element_entries=None, **kwargs\n ):\n \"\"\"Get form field instances.\"\"\"\n widget_attrs = {\n \"class\": theme.form_element_html_class,\n \"type\": \"number\",\n \"placeholder\": self.data.placeholder,\n }\n field_kwargs = {\n \"label\": self.data.label,\n \"help_text\": self.data.help_text,\n \"required\": self.data.required,\n }\n\n if self.data.initial is not None:\n data_initial = decimal.Decimal(str(self.data.initial))\n field_kwargs.update({\"initial\": data_initial})\n\n if self.data.max_value is not None:\n data_max_value = decimal.Decimal(str(self.data.max_value))\n field_kwargs[\"max_value\"] = data_max_value\n widget_attrs[\"max\"] = data_max_value\n\n if self.data.min_value is not None:\n data_min_value = decimal.Decimal(str(self.data.min_value))\n field_kwargs[\"min_value\"] = data_min_value\n widget_attrs[\"min\"] = data_min_value\n\n if self.data.max_digits is not None:\n data_max_digits = int(self.data.max_digits)\n field_kwargs[\"max_digits\"] = data_max_digits\n\n if self.data.decimal_places is not None:\n data_decimal_places = int(self.data.decimal_places)\n field_kwargs[\"decimal_places\"] = data_decimal_places\n\n field_kwargs[\"widget\"] = NumberInput(attrs=widget_attrs)\n\n return [(self.data.name, DecimalField, field_kwargs)]\n", "repo_name": "barseghyanartur/django-fobi", "sub_path": "src/fobi/contrib/plugins/form_elements/fields/decimal/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 2329, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 474, "dataset": "github-code", "pt": "50", "api": [{"api_name": "fobi.base.get_theme", "line_number": 20, "usage_type": "call"}, {"api_name": "fobi.base.FormFieldPlugin", "line_number": 23, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 27, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "forms.DecimalInputForm", "line_number": 29, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 47, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 51, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 56, "usage_type": "call"}, {"api_name": "fobi.widgets.NumberInput", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms.fields.DecimalField", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "36520003393", "text": "from flask import Flask, request\nimport sqlite3\nfrom dotenv import load_dotenv\nimport requests\nfrom twilio.twiml.messaging_response import MessagingResponse\n\napp = Flask(__name__)\n\n\ndef get_db_connection():\n conn = sqlite3.connect('database.db')\n conn.row_factory = sqlite3.Row\n return conn\n\n@app.route('/bot', methods=['POST'])\ndef bot():\n incoming_msg = request.values.get('Body', '').lower()\n phone = request.values.get('WaId')\n name = request.values.get('ProfileName')\n resp = MessagingResponse()\n msg = resp.message()\n responded = False\n if 'menu' in incoming_msg:\n conn = get_db_connection()\n menu_items = conn.execute('select id, item_name, image_url from menu').fetchall()\n message = \"Hi \"+name+\", todays menu \\n\"\n for item in menu_items:\n message = message +str(item['id'])+\" \"+ item['item_name']+\" \\n\"\n message = message + \" Please select your choice\" \n msg.body(message)\n msg.media(\"https://i.imgur.com/jG0KQx5.png\")\n conn.close()\n responded = True\n if 'selection' in incoming_msg:\n conn = get_db_connection()\n menu_items = conn.execute('select menu.item_name, menu.image_url, user.id,user_selection.menu_id from user, user_selection, menu where user.phone='+phone+' and user_selection.user_id=user.id and menu.id=user_selection.menu_id').fetchall()\n if menu_items:\n message = \"Your selection is \"+menu_items[0]['item_name']\n menu_image = menu_items[0]['image_url']\n msg.body(message)\n if menu_image:\n msg.media(menu_image)\n else:\n msg.body(\"Please choose a dish from the menu.\")\n msg.media(\"https://i.imgur.com/jG0KQx5.png\")\n conn.close()\n responded = True\n\n if 'choose' in incoming_msg:\n conn = get_db_connection()\n item_id = incoming_msg.split(' ')[1]\n conn.execute('INSERT OR REPLACE INTO user(phone) values (?)', [int(phone)])\n conn.execute('INSERT OR REPLACE INTO user_selection (user_id, menu_id) select id as user_id, '+item_id+' as menu_id from user where phone='+phone+'')\n menu_items = conn.execute('select menu.item_name, menu.image_url, user.id,user_selection.menu_id from user, user_selection, menu where phone='+phone+' and user_selection.user_id=user.id and menu.id=user_selection.menu_id').fetchall()\n message = 'Congrats '+name+\"!!\\n You have selected \\n\"+menu_items[0]['item_name']+\" for your meal.\"\n menu_image = menu_items[0]['image_url']\n msg.body(message)\n if menu_image:\n msg.media(menu_image)\n conn.commit()\n conn.close()\n responded = True\n\n if not responded:\n msg.body('Hi '+name+' please choose \\n 1. menu to see menu \\n 2. selection to see your selection \\n 3. choose to select item.')\n return str(resp)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n", "repo_name": "vmuneeb/whatsapp-bot-demo", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2941, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request.values.get", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "twilio.twiml.messaging_response.MessagingResponse", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "11806682951", "text": "from flask import Flask, render_template, request, session, redirect, url_for, flash\n\napp = Flask(__name__)\napp.config.from_object(\"config\")\n\n\nfrom controllers import user as userController\nfrom controllers import post as postController\n\n@app.route(\"/login\")\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route(\"/loginSubmit\", methods=[\"POST\"])\ndef loginSubmit():\n isLoginSuccessful, message = userController.login(request.form[\"username\"], request.form[\"password\"])\n flash(message)\n if isLoginSuccessful:\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n\n@app.route(\"/registrationSubmit\", methods=[\"POST\"])\ndef registrationSubmit():\n isRegistrationSuccesful, message = userController.register(\n request.form[\"username\"],\n request.form[\"password\"],\n request.form[\"fullName\"]\n )\n flash(message)\n if isRegistrationSuccesful:\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"register\"))\n\n\n@app.route(\"/logout\")\ndef logout():\n userController.logout()\n flash(\"Succesfully logged out.. !\")\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/home\")\ndef home():\n allPosts = postController.findAll()\n loggedInUserInfo = userController.getLoggedInUserInfo()\n\n return render_template(\"home.html\", posts=allPosts, loggedInUserInfo=loggedInUserInfo)\n\n@app.route(\"/newPostSubmit\", methods=[\"POST\"])\ndef newPostSubmit():\n isNewPostCreated, message = postController.add(request.form[\"content\"])\n flash(message)\n return redirect(url_for(\"home\"))\n\n\nif __name__ == \"__main__\":\n app.run()\n", "repo_name": "nitish-kr/twitterClone", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1721, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "controllers.user.login", "line_number": 17, "usage_type": "call"}, {"api_name": "controllers.user", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}, {"api_name": "controllers.user.register", "line_number": 32, "usage_type": "call"}, {"api_name": "controllers.user", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 41, "usage_type": "call"}, {"api_name": "controllers.user.logout", "line_number": 46, "usage_type": "call"}, {"api_name": "controllers.user", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 48, "usage_type": "call"}, {"api_name": "controllers.post.findAll", "line_number": 53, "usage_type": "call"}, {"api_name": "controllers.post", "line_number": 53, "usage_type": "name"}, {"api_name": "controllers.user.getLoggedInUserInfo", "line_number": 54, "usage_type": "call"}, {"api_name": "controllers.user", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "controllers.post.add", "line_number": 60, "usage_type": "call"}, {"api_name": "controllers.post", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "23640891446", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreate motion plots similar to the ones created by BrainVoyager, using the *3DMC.sdm files\r\n\"\"\"\r\n__date__ = \"25-01-2023\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport numpy as np\r\nimport bvbabel\r\nimport os.path\r\nimport sys\r\nfrom PyQt5.QtWidgets import QFileDialog, QApplication\r\n\r\napp = QApplication(sys.argv)\r\n\r\n# change default plotting properties\r\nmpl.rcParams['axes.labelsize'] = 'small'\r\nmpl.rcParams['xtick.labelsize'] = 'small'\r\nmpl.rcParams['ytick.labelsize'] = 'small'\r\nmpl.rcParams['axes.titlesize'] = 'medium'\r\n\r\n# set plotdisp to True if you would like to see the plots while running the script\r\nplotdisp = False\r\n\r\nmotionfiles, _ = QFileDialog.getOpenFileNames(None, 'Select *3DMC.sdm files', 'H:/AppleGame/derivatives/workflow_id-3_type-1_name-func-preprocessing/', 'Motion Files (*3DMC.sdm)')\r\n\r\n\r\nfor f in range(len(motionfiles)):\r\n sdm_header, sdm_data = bvbabel.sdm.read_sdm(motionfiles[f])\r\n\r\n motion = np.empty((0, sdm_header['NrOfDataPoints']))\r\n for pred in range(len(sdm_data)):\r\n motion = np.vstack([motion, sdm_data[pred]['ValuesOfPredictor']])\r\n\r\n # Plot the Motion\r\n plt.style.use('dark_background')\r\n fig_motion = plt.figure('Motion', figsize=(10, 8))\r\n ax_motion = fig_motion.add_subplot(111)\r\n plt.subplots_adjust(right=0.75)\r\n ax_motion.set_ylim(np.floor(np.amin(motion)).astype(int), np.ceil(np.amax(motion)).astype(int))\r\n ax_motion.set_yticks(range(np.floor(np.amin(motion)).astype(int), np.ceil(np.amax(motion)).astype(int) + 1))\r\n ax_motion.grid(axis='y')\r\n ax_motion.spines['right'].set_visible(False)\r\n ax_motion.spines['top'].set_visible(False)\r\n\r\n ax_motion.set_title('Rigid Body Motion Parameters - 3 Translations, 3 Rotations')\r\n\r\n # plot the motion\r\n plot_transx, = ax_motion.plot(motion[0, :], linewidth=1, color=np.array(sdm_data[0]['ColorOfPredictor']) / 255,\r\n label=sdm_data[0]['NameOfPredictor'])\r\n plot_transy, = ax_motion.plot(motion[1, :], linewidth=1, color=np.array(sdm_data[1]['ColorOfPredictor']) / 255,\r\n label=sdm_data[1]['NameOfPredictor'])\r\n plot_transz, = ax_motion.plot(motion[2, :], linewidth=1, color=np.array(sdm_data[2]['ColorOfPredictor']) / 255,\r\n label=sdm_data[2]['NameOfPredictor'])\r\n plot_transx, = ax_motion.plot(motion[3, :], linewidth=1, color=np.array(sdm_data[3]['ColorOfPredictor']) / 255,\r\n label=sdm_data[3]['NameOfPredictor'])\r\n plot_transy, = ax_motion.plot(motion[4, :], linewidth=1, color=np.array(sdm_data[4]['ColorOfPredictor']) / 255,\r\n label=sdm_data[4]['NameOfPredictor'])\r\n plot_transz, = ax_motion.plot(motion[5, :], linewidth=1, color=np.array(sdm_data[5]['ColorOfPredictor']) / 255,\r\n label=sdm_data[5]['NameOfPredictor'])\r\n\r\n handles, labels = ax_motion.get_legend_handles_labels()\r\n\r\n fig_motion.legend(handles=handles, loc=\"center right\", frameon=False, framealpha=1, fontsize='small')\r\n\r\n fig_motion.savefig((motionfiles[f].split('.')[0] + '_MotionPlot.png'), dpi=600, format='png')\r\n if plotdisp == True:\r\n plt.show()\r\n plt.clf()\r\n\r\n # subtract first timepoint from all 6 timeseries, so that they start at 0\r\n motion_run = motion[:, 0:] - motion[:, 0, None]\r\n\r\n print('')\r\n print('Maximum movement per run:')\r\n print('Filename:', motionfiles[f].split('/')[-1])\r\n print('Maximum Motion with Respect to Reference Run:', np.max(np.absolute(motion)), ',',\r\n sdm_data[np.argmax(np.amax(np.absolute(motion), 1))]['NameOfPredictor'])\r\n print('Maximum Motion within the Run:', np.max(np.absolute(motion_run)), ',',\r\n sdm_data[np.argmax(np.amax(np.absolute(motion_run), 1))]['NameOfPredictor'])\r\n print('Maximum Motion Range within the Run:', np.max(np.amax(motion, 1) - np.amin(motion, 1)), ',',\r\n sdm_data[np.argmax(np.amax(motion, 1) - np.amin(motion, 1))]['NameOfPredictor'])\r\n\r\n# Restore the default plotting parameters after script is finished\r\nmpl.rcParams.update(mpl.rcParamsDefault)", "repo_name": "petitro/Grid-Cells", "sub_path": "4.2_GetPlots.py", "file_name": "4.2_GetPlots.py", "file_ext": "py", "file_size_in_byte": 4174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 20, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileNames", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 26, "usage_type": "name"}, {"api_name": "bvbabel.sdm.read_sdm", "line_number": 30, "usage_type": "call"}, {"api_name": "bvbabel.sdm", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.floor", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.rcParams.update", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 86, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParamsDefault", "line_number": 86, "usage_type": "attribute"}]} +{"seq_id": "519588236", "text": "import asyncio\nfrom bleak import BleakClient\n\nclass BleClient:\n address = ''\n write_characteristic_id = ''\n read_characteristic_id = ''\n loop = None\n\n def __init__(self, address, write_characteristic_id, read_characteristic_id):\n self.address = address\n self.write_characteristic_id = write_characteristic_id\n self.read_characteristic_id = read_characteristic_id\n self.loop = asyncio.get_event_loop()\n\n def read(self, commands):\n return self.loop.run_until_complete(self.__read(commands))\n\n def write(self, commands, values):\n return self.loop.run_until_complete(self.__write(commands, values))\n\n def __create_read_command(self, c):\n r_command = bytearray([c])\n r_command.append(0x00) # 長さ0\n return r_command\n\n def __create_write_command(self, c, v):\n wc = c + 0x80 # write command\n w_command = bytearray([wc])\n w_command.extend(bytearray([len(v)]))\n w_command.extend(v)\n return w_command\n\n async def __read(self, commands):\n async with BleakClient(self.address) as client:\n if not client.is_connected:\n return None\n ret = []\n for c in commands:\n r_command = self.__create_read_command(c)\n await client.write_gatt_char(self.write_characteristic_id, r_command)\n value = await client.read_gatt_char(self.read_characteristic_id)\n ret.append(value[2:])\n return ret\n\n async def __write(self, commands, values):\n async with BleakClient(self.address) as client:\n if not client.is_connected:\n return None\n ret = []\n compare = []\n for i, c in enumerate(commands):\n # write\n v = values[i]\n w_command = self.__create_write_command(c, v)\n await client.write_gatt_char(self.write_characteristic_id, w_command)\n\n # read\n r_command = self.__create_read_command(c)\n await client.write_gatt_char(self.write_characteristic_id, r_command)\n value = await client.read_gatt_char(self.read_characteristic_id)\n v2 = value[2:]\n ret.append(v2)\n if v == v2:\n compare.append(True)\n else:\n compare.append(False)\n return ret, compare\n\n", "repo_name": "yasunori/bleclient", "sub_path": "bleclient.py", "file_name": "bleclient.py", "file_ext": "py", "file_size_in_byte": 2463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asyncio.get_event_loop", "line_number": 14, "usage_type": "call"}, {"api_name": "bleak.BleakClient", "line_number": 35, "usage_type": "call"}, {"api_name": "bleak.BleakClient", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "29431105688", "text": "#!/usr/bin/env Python\n\nimport ldap\nimport getConfig\n\n\ndef ldapConn(base_ou, search_filter, attr=None):\n connect = ldap.initialize('ldap://{0}:{1}'.format(getConfig.ldap_server,\n getConfig.ldap_port))\n connect.bind_s(getConfig.ldapcred, getConfig.ldappass)\n try:\n if attr is None:\n result = connect.search_s(base_ou,\n ldap.SCOPE_SUBTREE,\n search_filter)\n else:\n result = connect.search_s(base_ou,\n ldap.SCOPE_SUBTREE,\n search_filter,\n attr)\n connect.unbind_s()\n except ldap.LDAPError as e:\n connect.unbind_s()\n return e\n else:\n return result\n", "repo_name": "aldevar/openldapi", "sub_path": "ldapConn.py", "file_name": "ldapConn.py", "file_ext": "py", "file_size_in_byte": 858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "ldap.initialize", "line_number": 8, "usage_type": "call"}, {"api_name": "getConfig.ldap_server", "line_number": 8, "usage_type": "attribute"}, {"api_name": "getConfig.ldap_port", "line_number": 9, "usage_type": "attribute"}, {"api_name": "getConfig.ldapcred", "line_number": 10, "usage_type": "attribute"}, {"api_name": "getConfig.ldappass", "line_number": 10, "usage_type": "attribute"}, {"api_name": "ldap.SCOPE_SUBTREE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ldap.SCOPE_SUBTREE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "ldap.LDAPError", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "23080451356", "text": "import sys\nimport os\nimport yaml\nimport argparse\nimport subprocess\n\nfrom benchmark_utils import PaddleInferBenchmark\n\n\"\"\"\n{'CPU_UTILIZATION': 0.8, 'MAX_GPU_MEMORY': 0, 'GPU_UTILIZATION': '0 %', 'DAG': {'50': 670.256, '60': 670.256, '70': 670.765, '80': 671.23, '90': 687.546, '95': 687.546, '99': 687.546, 'avg': 670.755625, 'qps': 0.8, 'query_count': 8, 'succ': 1.0}, 'demo': {'midp': 669.484375, 'postp': 0.184875, 'prep': 1.001875}}\n\"\"\"\n\n\nclass LogHandler(object):\n def __init__(self):\n self.fstr = \"\"\n\n def print(self):\n print(self.fstr)\n\n def dump(self, filename):\n with open(filename, 'w') as fout:\n fout.write(self.fstr)\n\n def append(self, new_str):\n self.fstr += new_str + \"\\n\"\n\n\ndef handle_benchmark(benchmark_config, benchmark_raw, indentifier):\n model_info = {\n 'model_name': benchmark_config[\"model_name\"],\n 'precision': benchmark_config[\"precision\"],\n }\n data_info = {\n 'batch_size': benchmark_config[\"batch_size\"],\n 'shape': benchmark_config[\"input_shape\"],\n 'data_num': int(benchmark_raw[\"Total count\"]),\n }\n perf_info = {\n 'client_mode' : benchmark_raw[\"client_mode\"],\n 'server_mode' : benchmark_raw[\"server_mode\"],\n 'thread_num' : benchmark_raw[\"thread_num\"],\n 'preprocess_time_s': \"\",\n 'inference_time_ms': float(benchmark_raw[\"mean\"][0:-2]), # *** ms\n 'median(ms)': float(benchmark_raw[\"median\"][0:-2]),\n 'postprocess_time_s': \"\",\n 'total_time_s': float(benchmark_raw[\"Total cost\"][0:-1]),\n 'each_time_s': float(benchmark_raw[\"Each thread cost\"][0:-2]),\n 'inference_time_ms_80': float(benchmark_raw[\"80_percent\"][0:-2]), # *** ms\n 'inference_time_ms_90': float(benchmark_raw[\"90_percent\"][0:-2]), # *** ms\n 'inference_time_ms_99': float(benchmark_raw[\"99_percent\"][0:-2]), # *** ms\n 'qps': benchmark_raw[\"AVG_QPS\"]\n }\n resource_info = {\n 'cpu_rss_mb': \"\",\n 'cpu_vms_mb': \"\",\n 'cpu_shared_mb': \"\",\n 'cpu_dirty_mb': \"\",\n 'cpu_util': benchmark_raw[\"CPU_UTILIZATION\"],\n 'gpu_rss_mb': \"\",\n 'gpu_util': f'{float(benchmark_raw[\"GPU_UTILIZATION\"].split(\" \")[0]):.2f}',\n 'gpu_mem': benchmark_raw[\"MAX_GPU_MEMORY\"],\n 'gpu_mem_util': int(benchmark_raw[\"MAX_GPU_MEMORY\"].split(\" \")[0]) / total_memory * 100\n }\n\n server_log = PaddleInferBenchmark(\n benchmark_config, model_info, data_info, perf_info, resource_info, indentifier)\n server_log(indentifier)\n\n\ndef parse_args(): # pylint: disable=doc-string-missing\n parser = argparse.ArgumentParser(\"serve\")\n parser.add_argument(\n \"--benchmark_cfg\",\n type=str,\n required=True,\n help=\"benchmark config yaml. including general info, model info, data info, conf info\"\n )\n parser.add_argument(\n \"--benchmark_log\",\n type=str,\n required=True,\n help=\"benchmark log, generated by a web service or pipeline.\")\n parser.add_argument(\n \"--output\",\n type=str,\n default=\"std_benchmark.log\",\n help=\"the output filename, default std_benchmark.log\")\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n benchmark_cfg_filename = args.benchmark_cfg\n sub = subprocess.Popen(\"nvidia-smi --id=0 --query-gpu=memory.total --format=csv,noheader\", encoding=\"utf-8\",\n shell=True, stdout=subprocess.PIPE)\n memory_str = sub.stdout.read() or \"16160 Mib\"\n total_memory = int(memory_str.split(' ')[0])\n f = open(benchmark_cfg_filename, 'r')\n benchmark_config = yaml.load(f, yaml.FullLoader)\n f.close()\n benchmark_log_filename = args.benchmark_log\n f = open(benchmark_log_filename, 'r')\n lines = f.readlines()\n lines.append(\"\\n\")\n line_no = 0\n while line_no < len(lines):\n if len(lines[line_no]) > 5 and lines[line_no].startswith(\"#---\"):\n iden = lines[line_no][5: -5]\n line_no += 1\n line_count = lines[line_no:].index(\"\\n\")\n sub_log = lines[line_no: line_no + line_count]\n sub_dict = yaml.safe_load(\"\".join(sub_log))\n client_mode = iden.split(\" \")[8]\n server_mode = iden.split(\" \")[10]\n sub_dict[\"client_mode\"] = client_mode\n sub_dict[\"server_mode\"] = server_mode\n handle_benchmark(benchmark_config, sub_dict, iden)\n line_no += line_count\n else:\n line_no += 1\n", "repo_name": "ZhangYulongg/ServingAPI", "sub_path": "benchmark/parse_profile.py", "file_name": "parse_profile.py", "file_ext": "py", "file_size_in_byte": 4504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "benchmark_utils.PaddleInferBenchmark", "line_number": 66, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 72, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 95, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 96, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 100, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 100, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 113, "usage_type": "call"}]} +{"seq_id": "9678664336", "text": "import sys\nimport os\nimport pygame\nimport math\nfrom menu import GameState,resource_path\nfile_path = os.path.dirname(__file__)\n\nif getattr(sys, 'assets', False):\n os.chdir(sys._MEIPASS)\n\n# constants\nsize = width, height = 1040, 720 # pixels\nkm_to_pixel = 0.0008 # real world units to computer screen units\nsun_diameter = 695508*2 # km\nearth_diameter = 6378.1*2 # km\nmoon_diameter = 1738.1*2 # km\nlunar_distance = 384400 # km\nzoom_size = 75 # window for seeing phases\nWHITE = (255, 255, 255) # colors\nBLACK = (0, 0, 0)\nphase_dict = { 0:'(5) Full',\n 1:'(6) Waning Gibbous',\n 2:'(7) Third Quarter',\n 3:'(8) Waning Crescent',\n 4:'(1) New',\n 5:'(2) Waxing Crescent',\n 6:'(3) First Quarter', \n 7:'(4) Waxing Gibbous' }\n\ndef earth_moon(screen):\n ### LOAD SURFACES ###\n\n # background surface\n background = pygame.image.load(resource_path(\"assets/earth_moon/space.jpg\"))\n background = pygame.transform.scale(background, size)\n background = background.convert()\n\n # earth surface\n earth = pygame.image.load(resource_path(\"assets/earth_moon/earth_from_above_shadow.png\"))\n earth = pygame.transform.scale(earth, (int(earth_diameter*km_to_pixel), int(earth_diameter*km_to_pixel)))\n earth = earth.convert()\n earth_rect = earth.get_rect()\n earth_rect.centerx, earth_rect.centery = width/2, height/2\n\n # tiny moon surface\n moon = pygame.image.load(resource_path(\"assets/earth_moon/moon_from_above.png\"))\n moon = pygame.transform.scale(moon, (int(moon_diameter*km_to_pixel), int(moon_diameter*km_to_pixel)))\n moon = moon.convert()\n moon_rect = moon.get_rect()\n moon_rect.centerx, moon_rect.centery = earth_rect.centerx, earth_rect.centery-int(lunar_distance*km_to_pixel)\n\n # arrow (pointing to tiny moon) surface\n arrow = pygame.image.load(resource_path(\"assets/earth_moon/arrow.png\"))\n arrow = pygame.transform.scale(arrow, (30, 10))\n arrow_rect = arrow.get_rect()\n arrow_rect.centerx, arrow_rect.centery = arrow_rect.centerx, arrow_rect.centery-arrow.get_height()/2\n\n # zoom window suface\n zoom = pygame.Rect(arrow_rect.centerx + arrow_rect.width + 2, arrow_rect.centery - zoom_size/2 + 2, zoom_size-4, zoom_size-4)\n zoom_border = pygame.Rect(arrow_rect.centerx + arrow_rect.width, arrow_rect.centery - zoom_size/2, zoom_size, zoom_size)\n\n # zoomed moon surface\n moon_zoomed = pygame.image.load(resource_path(\"assets/earth_moon/moon_from_above_shadow.png\"))\n moon_zoomed = pygame.transform.scale(moon_zoomed, (int(zoom_size*0.8), int(zoom_size*0.8)))\n moon_zoomed.convert()\n moon_zoomed_rect = moon_zoomed.get_rect()\n moon_zoomed_rect.center = zoom.center\n\n # phase surfaces\n phase_border = pygame.Rect(earth_rect.centerx + 5*earth_rect.width, earth_rect.centery - zoom_size/2, zoom_size, zoom_size)\n phases = []\n for i in range(1, 8+1):\n phase = pygame.image.load(resource_path(\"assets/earth_moon/phase_{}.png\").format(i))\n phase = pygame.transform.scale(phase, (int(zoom_size*0.8),int(zoom_size*0.8)))\n phase.convert()\n phase_rect = phase.get_rect()\n phase_rect.center = phase_border.center\n phases.append((phase, phase_rect))\n phase_i = -1\n\n # zig-zag (separating sun from earth-moon system) surface\n zig_zag = pygame.image.load(resource_path(\"assets/earth_moon/zig-zag.png\"))\n zig_zag = pygame.transform.scale(zig_zag, (50, height))\n zig_zag.convert()\n zig_zag_rect = zig_zag.get_rect()\n zig_zag_rect.center = (150, height/2)\n\n # sun surface\n sun = pygame.image.load(resource_path(\"assets/earth_moon/sun.png\"))\n sun = pygame.transform.scale(sun, (int(sun_diameter*km_to_pixel), int(sun_diameter*km_to_pixel)))\n sun.convert()\n sun_rect = sun.get_rect()\n sun_rect.center = (-450, height/2)\n\n # text surfaces\n font = pygame.font.SysFont('Sans', 18)\n title = font.render('Lunar Phases:', True, WHITE)\n phase_title = font.render('Phase seen on Earth:', True, WHITE)\n zoom_title = font.render('Lunar surface:', True, WHITE)\n sun_title = font.render('The Sun:', True, WHITE)\n story = []\n with open(resource_path('assets/earth_moon/story.txt')) as file:\n story = [font.render(line.rstrip('\\n'), True, WHITE) for line in file]\n story_i = 0\n progress = font.render('{} of {}'.format(story_i+1, len(story)), True, WHITE)\n stats = []\n stats.append('1 pixel = '+f'{int(1/km_to_pixel):,}'+' km')\n stats.append('Sun\\'s diameter = '+f'{sun_diameter:,}'+' km')\n stats.append('Earth\\'s diameter = '+f'{earth_diameter:,}'+' km')\n stats.append('Moon\\'s diameter = '+f'{moon_diameter:,}'+' km')\n stats.append('Earth <-> Moon distance = '+f'{lunar_distance:,}'+' km')\n stats = [font.render(stat, True, WHITE) for stat in stats]\n guidance = font.render('ESC: exit.', True, WHITE)\n \n ### BEGIN ANIMATION ###\n\n # variables\n angle = 0\n speed = 20\n next_tick = 500\n radius = int(lunar_distance*km_to_pixel)\n\n # method that moves moon along circular path\n def move_coords(angle, radius):\n theta = math.radians(angle)\n return (earth_rect.centerx + radius * math.cos(theta), \n earth_rect.centery + radius * math.sin(theta))\n\n # animation\n while True:\n # events\n for event in pygame.event.get():\n if event.type == pygame.QUIT: \n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n # Was it the Escape key? If so, stop the loop.\n return GameState.NEWGAME\n elif event.key == pygame.K_LEFT:\n # move forward in story\n if story_i > 0:\n story_i -= 1\n progress = font.render('{} of {}'.format(story_i+1, len(story)), True, WHITE)\n elif event.key == pygame.K_RIGHT:\n # move backward in story\n if story_i < len(story)-1:\n story_i += 1\n progress = font.render('{} of {}'.format(story_i+1, len(story)), True, WHITE)\n\n # move moon along orbit\n ticks = pygame.time.get_ticks() \n if ticks > next_tick:\n next_tick += speed\n angle -= 1\n angle %= 360\n phase_i = len(phases) - (angle//45+1)\n moon_rect.center = move_coords(angle, radius)\n arrow_rect.center = moon_rect.left + arrow_rect.width, moon_rect.top\n zoom_border.center = arrow_rect.centerx + 2.5*arrow_rect.width, arrow_rect.centery\n zoom.center = zoom_border.center\n moon_zoomed_rect.center = zoom.center\n\n # determine current phase of moon\n phase_label = font.render(phase_dict[phase_i], True, WHITE)\n\n # display everything\n screen.blit(background, (0, 0))\n screen.blit(title, (width//2 - title.get_rect().width//2, title.get_rect().height))\n screen.blit(earth, earth_rect)\n pygame.draw.rect(screen, WHITE, phase_border, 2)\n screen.blit(*phases[phase_i])\n screen.blit(moon, moon_rect)\n screen.blit(arrow, arrow_rect)\n pygame.draw.rect(screen, WHITE, zoom_border, 2)\n pygame.draw.rect(screen, BLACK, zoom, 0)\n screen.blit(moon_zoomed, moon_zoomed_rect)\n screen.blit(phase_title, (phase_border.centerx - phase_title.get_rect().width//2, \n phase_border.centery - phase_border.height))\n screen.blit(phase_label, (phase_border.centerx - phase_label.get_rect().width//2, \n phase_border.centery + phase_border.height))\n screen.blit(zoom_title, (zoom_border.centerx - zoom_title.get_rect().width//2, \n zoom_border.centery - zoom_border.height))\n screen.blit(zig_zag, zig_zag_rect)\n screen.blit(sun, sun_rect)\n screen.blit(sun_title, (10, sun_title.get_rect().height))\n screen.blit(story[story_i], ((width-story[story_i].get_rect().width)//2, height-2*story[story_i].get_rect().height))\n for stat_i, stat in enumerate(stats):\n screen.blit(stat, ((width-stat.get_rect().width-10, (stat_i+1)*stat.get_rect().height)))\n screen.blit(progress, ((width-progress.get_rect().width-10, height-2*progress.get_rect().height)))\n screen.blit(guidance, (10, height-2*guidance.get_rect().height))\n\n # update display\n pygame.display.update()", "repo_name": "lessleyH/Astro1_Moon", "sub_path": "phases.py", "file_name": "phases.py", "file_ext": "py", "file_size_in_byte": 8593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "sys._MEIPASS", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 34, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 39, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 39, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 46, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 53, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 54, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 63, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 64, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 73, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 82, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 89, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.transform.scale", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 96, "usage_type": "attribute"}, {"api_name": "menu.resource_path", "line_number": 102, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 125, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 126, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 133, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 136, "usage_type": "attribute"}, {"api_name": "menu.GameState.NEWGAME", "line_number": 138, "usage_type": "attribute"}, {"api_name": "menu.GameState", "line_number": 138, "usage_type": "name"}, {"api_name": "pygame.K_LEFT", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.time.get_ticks", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 170, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 170, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 175, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 193, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 193, "usage_type": "attribute"}]} +{"seq_id": "4736300320", "text": "from collections import namedtuple\n\nimport bitstruct\n\nfrom .errors import Error\n\nFrameId = namedtuple('FrameId',\n [\n 'priority',\n 'reserved',\n 'data_page',\n 'pdu_format',\n 'pdu_specific',\n 'source_address',\n ])\n\n\nPGN = namedtuple('PGN',\n [\n 'reserved',\n 'data_page',\n 'pdu_format',\n 'pdu_specific'\n ])\n\n\ndef is_pdu_format_1(pdu_format):\n return (pdu_format < 240)\n\n\ndef frame_id_pack(priority,\n reserved,\n data_page,\n pdu_format,\n pdu_specific,\n source_address):\n \"\"\"Pack given values as a frame id and return it as an integer.\n\n \"\"\"\n\n try:\n packed = bitstruct.pack('u3u1u1u8u8u8',\n priority,\n reserved,\n data_page,\n pdu_format,\n pdu_specific,\n source_address)\n except bitstruct.Error:\n if priority > 7:\n raise Error(f'Expected priority 0..7, but got {priority}.') from None\n elif reserved > 1:\n raise Error(f'Expected reserved 0..1, but got {reserved}.') from None\n elif data_page > 1:\n raise Error(f'Expected data page 0..1, but got {data_page}.') from None\n elif pdu_format > 255:\n raise Error(f'Expected PDU format 0..255, but got {pdu_format}.') from None\n elif pdu_specific > 255:\n raise Error(f'Expected PDU specific 0..255, but got {pdu_specific}.') from None\n elif source_address > 255:\n raise Error(f'Expected source address 0..255, but got {source_address}.') from None\n else:\n raise Error('Internal error.') from None\n\n return bitstruct.unpack('u29', packed)[0]\n\n\ndef frame_id_unpack(frame_id):\n \"\"\"Unpack given frame id and return a tuple of priority, reserved,\n data page, PDU format, PDU specific and source address.\n\n \"\"\"\n\n try:\n packed = bitstruct.pack('u29', frame_id)\n except bitstruct.Error:\n raise Error(f'Expected a frame id 0..0x1fffffff, but got {hex(frame_id)}.') from None\n\n return FrameId(*bitstruct.unpack('u3u1u1u8u8u8', packed))\n\n\ndef pgn_pack(reserved, data_page, pdu_format, pdu_specific=0):\n \"\"\"Pack given values as a parameter group number (PGN) and return it\n as an integer.\n\n \"\"\"\n\n if pdu_format < 240 and pdu_specific != 0:\n raise Error(\n 'Expected PDU specific 0 when PDU format is 0..239, but got {}.'.format(\n pdu_specific))\n\n try:\n packed = bitstruct.pack('u1u1u8u8',\n reserved,\n data_page,\n pdu_format,\n pdu_specific)\n except bitstruct.Error:\n if reserved > 1:\n raise Error(f'Expected reserved 0..1, but got {reserved}.') from None\n elif data_page > 1:\n raise Error(f'Expected data page 0..1, but got {data_page}.') from None\n elif pdu_format > 255:\n raise Error(f'Expected PDU format 0..255, but got {pdu_format}.') from None\n elif pdu_specific > 255:\n raise Error(f'Expected PDU specific 0..255, but got {pdu_specific}.') from None\n else:\n raise Error('Internal error.') from None\n\n return bitstruct.unpack('u18', packed)[0]\n\n\ndef pgn_unpack(pgn):\n \"\"\"Unpack given parameter group number (PGN) and return a tuple of\n Reserved, Data Page, PDU Format and PDU Specific.\n\n \"\"\"\n\n try:\n packed = bitstruct.pack('u18', pgn)\n except bitstruct.Error:\n raise Error(f'Expected a parameter group number 0..0x3ffff, but got {hex(pgn)}.') from None\n\n return PGN(*bitstruct.unpack('u1u1u8u8', packed))\n\n\ndef pgn_from_frame_id(frame_id):\n \"\"\"Get the parameter group number (PGN) from given frame id.\n\n \"\"\"\n\n unpacked = frame_id_unpack(frame_id)\n\n if unpacked.pdu_format < 240:\n pdu_specific = 0\n else:\n pdu_specific = unpacked.pdu_specific\n\n return pgn_pack(unpacked.reserved,\n unpacked.data_page,\n unpacked.pdu_format,\n pdu_specific)\n", "repo_name": "cantools/cantools", "sub_path": "src/cantools/j1939.py", "file_name": "j1939.py", "file_ext": "py", "file_size_in_byte": 4509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1613, "dataset": "github-code", "pt": "50", "api": [{"api_name": "collections.namedtuple", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 18, "usage_type": "call"}, {"api_name": "bitstruct.pack", "line_number": 42, "usage_type": "call"}, {"api_name": "bitstruct.Error", "line_number": 49, "usage_type": "attribute"}, {"api_name": "errors.Error", "line_number": 51, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 53, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 55, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 57, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 59, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 61, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 63, "usage_type": "call"}, {"api_name": "bitstruct.unpack", "line_number": 65, "usage_type": "call"}, {"api_name": "bitstruct.pack", "line_number": 75, "usage_type": "call"}, {"api_name": "bitstruct.Error", "line_number": 76, "usage_type": "attribute"}, {"api_name": "errors.Error", "line_number": 77, "usage_type": "call"}, {"api_name": "bitstruct.unpack", "line_number": 79, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 89, "usage_type": "call"}, {"api_name": "bitstruct.pack", "line_number": 94, "usage_type": "call"}, {"api_name": "bitstruct.Error", "line_number": 99, "usage_type": "attribute"}, {"api_name": "errors.Error", "line_number": 101, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 103, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 105, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 107, "usage_type": "call"}, {"api_name": "errors.Error", "line_number": 109, "usage_type": "call"}, {"api_name": "bitstruct.unpack", "line_number": 111, "usage_type": "call"}, {"api_name": "bitstruct.pack", "line_number": 121, "usage_type": "call"}, {"api_name": "bitstruct.Error", "line_number": 122, "usage_type": "attribute"}, {"api_name": "errors.Error", "line_number": 123, "usage_type": "call"}, {"api_name": "bitstruct.unpack", "line_number": 125, "usage_type": "call"}]} +{"seq_id": "22725532864", "text": "from tqdm import tqdm\r\nimport pandas as pd\r\nimport json\r\n\r\ncrimes = pd.read_csv('data\\cartodb-query.csv')\r\ndata = crimes[[\"dispatch_date\", \"dispatch_time\",\"location_block\", \"text_general_code\", \"point_x\", \"point_y\"]]\r\n\r\npolice_stations = pd.read_csv('data\\Police Station w coordinates.csv').reindex(columns=[\"OBJECTID\", \"DISTRICT_NUMBER\",\r\n \"LOCATION\", \"TELEPHONE_NUMBER\",\r\n \"RULEID\", \"Longitude\", \"Latitude\"])\r\n# Calculating the Manhattan distance\r\ndef manhattan_distance(x1, y1, x2, y2):\r\n return abs(x1 - x2) + abs(y1 - y2)\r\n\r\ndistances =[]\r\n# Calculating distance to nearest police station for each crime\r\nfor index, row in tqdm(data.iterrows(), total=data.shape[0], desc=\"Processing\", ascii=False):\r\n crime_x = row['point_x']\r\n crime_y = row['point_y']\r\n min_distance = float('inf')\r\n for _, station in police_stations.iterrows():\r\n station_x = station['Longitude']\r\n station_y = station['Latitude']\r\n distance = manhattan_distance(crime_x, crime_y, station_x, station_y)\r\n min_distance = min(min_distance, distance)\r\n distances.append(min_distance)\r\nwith open('data\\min_distances.json', 'w') as f:\r\n json.dump({\"min_distances\":distances}, f)\r\n", "repo_name": "drexelai/crime-ml", "sub_path": "getPoliceStationDistances.py", "file_name": "getPoliceStationDistances.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 17, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "2413369764", "text": "from flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef homepage():\n return \"Welcome to dis homepage\"\n\n@app.route(\"/bmi//\")\ndef bmi(weight, height):\n bmi = weight/((height/100)**2)\n if bmi < 16:\n return \"Serverly underweight\"\n elif 16 <= bmi < 18.5:\n return \"Underweight\"\n elif 18.5 <= bmi < 25:\n return \"Normal\"\n elif 25 <= bmi < 30:\n return \"Overweight\"\n elif 30 <= bmi:\n return \"Obese\"\n\n\nif __name__ == \"__main__\":\n app.run(debug = True)", "repo_name": "hogitayden/nguyenhoanggiang-fundametal-c4e22", "sub_path": "web1/homework/way1_ex1_ser.py", "file_name": "way1_ex1_ser.py", "file_ext": "py", "file_size_in_byte": 535, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "30251983358", "text": "from django import forms\n# from django.contrib.auth import get_user_model\nfrom leads.models import User\n# User = get_user_model()\n\n\nclass AgentModelForm(forms.ModelForm):\n class Meta:\n model = User\n fields = (\n 'email',\n 'username',\n 'first_name',\n 'last_name'\n )\n\n\n# class LeadModelForm(forms.ModelForm):\n# class Meta:\n# model = Lead\n# fields = (\n# 'first_name',\n# 'last_name',\n# 'age',\n# 'agent',\n# 'description',\n# 'phone_number',\n# 'email',\n# )\n", "repo_name": "om4rkh4lid/django-leads-project", "sub_path": "agents/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.forms.ModelForm", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 7, "usage_type": "name"}, {"api_name": "leads.models.User", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "73674392156", "text": "import os\nfrom flask import Flask\nfrom config import Config\nfrom .filters import (\n valueformat,\n fromtimestamp,\n uptimeformat,\n totaluptimeformat\n)\n\n\ndef create_app(test_config=None):\n app = Flask(__name__)\n\n app.config.from_object(Config)\n\n app.jinja_env.filters['valueformat'] = valueformat\n app.jinja_env.filters['fromtimestamp'] = fromtimestamp\n app.jinja_env.filters['uptimeformat'] = uptimeformat\n app.jinja_env.filters['totaluptimeformat'] = totaluptimeformat\n\n if test_config is not None:\n app.config.from_mapping(test_config)\n\n from . import main, search\n app.register_blueprint(main.bp)\n app.register_blueprint(search.bp)\n\n from . import db\n db.init_app(app)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n return app\n", "repo_name": "dreamer20/pc_status", "sub_path": "pc_status/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 15, "usage_type": "argument"}, {"api_name": "filters.valueformat", "line_number": 17, "usage_type": "name"}, {"api_name": "filters.fromtimestamp", "line_number": 18, "usage_type": "name"}, {"api_name": "filters.uptimeformat", "line_number": 19, "usage_type": "name"}, {"api_name": "filters.totaluptimeformat", "line_number": 20, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "25105273531", "text": "#!/usr/bin/env python\n\nimport bot_token\n\nfrom telegram.ext import Updater, CommandHandler, CallbackContext\nfrom telegram import Update, bot, ParseMode\n\nfrom time import time\n\n# used to block spam\nusers = dict()\n\n\ndef is_banned(user: str) -> bool:\n with open(\"ban.log\", \"r\") as f:\n if user in f.readlines():\n return True\n return False\n\n\ndef is_admin(user: str) -> bool:\n with open(\"admin.log\", \"r\") as f:\n if user in f.readlines():\n return True\n return False\n\n\ndef start(update: Update, unused: CallbackContext):\n if is_banned(str(update.message.chat.username) + \",\" + str(update.message.chat.id)):\n return\n with open(\"users.log\", \"r+\") as users_log:\n content = users_log.readlines()\n if not str(update.message.chat.username) + \",\" + str(update.message.chat.id) in content:\n users_log.write(str(update.message.chat.username) + \",\" + str(update.message.chat.id))\n users_log.flush()\n update.message.reply_text(\"Benvenuto al Webinar di Sicurezza Informatica.\\n\" +\n \"Il bot sarà attivo solo durante i webinar.\\n\" +\n \"Per scoprire tutte le funzionalità, digita il comando /help\", parse_mode=ParseMode.MARKDOWN)\n\n\ndef bot_help(update: Update, unused: CallbackContext):\n update.message.reply_text(\"/start Avvio del bot\\n\" +\n \"/ask Chiedici qualcosa privatamente, specificando se vuoi una risposta in privato\\n\" +\n \"/info Link utili\\n\" +\n \"/beer Contribuisci offrendoci una birra\\n\" +\n \"/today Argomenti del giorno\\n\", parse_mode=ParseMode.MARKDOWN)\n\n\ndef ask(update: Update, unused: CallbackContext):\n if is_banned(str(update.message.chat.username) + \",\" + str(update.message.chat.id)):\n return\n if update.message.chat.username in users.keys() and time() - users[update.message.chat.username] <= 30:\n update.message.reply_text(\"Errore, puoi fare una domanda ogni 30 secondi\", parse_mode=ParseMode.MARKDOWN)\n return\n users[update.message.chat.username] = time()\n update.message.reply_text(\"La tua domanda è stata notificata, presto riceverai una risposta\",\n parse_mode=ParseMode.MARKDOWN)\n print(update.message.chat.username, \"ha chiesto:\", update.message.text)\n with open(\"admin.log\", \"r\") as admin_log:\n for user in admin_log.readlines():\n user_id = user.split(\",\")[1]\n bot.Bot(bot_token.TOKEN).send_message(chat_id=user_id,\n text=\"Se devi bannare l'utente \" + update.message.chat.username +\n \"puoi usare /ban \" + update.message.chat.username,\n parse_mode=ParseMode.MARKDOWN)\n\n\ndef info(update: Update, unused: CallbackContext):\n # gruppo informatica, contatti e link (cartella/materiale/...)\n update.message.reply_text(\"Siamo due studenti universitari che, dopo numerose richieste, abbiamo deciso di\" +\n \"creare questa serie di incontri per farvi conoscere il mondo delle CTF e della \" +\n \"CyberSecurity\\n\" +\n \"Se vuoi contattarci in privato e non durante i meet, scrivici a\\n\" +\n # \"e-mail\\n\\tharlockofficial.github@gmail.com\\n\\ts01spacecowboy@gmail.com\\nte\"+\n \"```telegram```\\n\\t@HarlockOfficial\\n\\t@SpaceCowboyS01\", parse_mode=ParseMode.MARKDOWN)\n\n\ndef beer(update: Update, unused: CallbackContext):\n # paypal link\n update.message.reply_text(\"Se ti dovessero piacere i webinar, sentiti libero di offrirci una birra!\\n\" +\n \"https://www.paypal.me/eserciziinformatica\", parse_mode=ParseMode.MARKDOWN)\n\n\ndef today(update: Update, unused: CallbackContext):\n update.message.reply_text(\"\", parse_mode=ParseMode.MARKDOWN)\n pass\n\n\ndef ban(update: Update, unused: CallbackContext):\n if not is_admin(str(update.message.chat.username) + \",\" + str(update.message.chat.id)):\n return\n user_to_ban = update.message.text\n with open(\"users.log\", \"r\") as users_log:\n users_list = users_log.readlines()\n for i in range(len(users_list)):\n if user_to_ban == users_list[i].split(\",\")[0]:\n with open(\"ban.log\", \"a\") as ban_log:\n ban_log.write(users_list[i])\n ban_log.flush()\n break\n\n\ndef main():\n updater = Updater(bot_token.TOKEN, use_context=True)\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", bot_help))\n dp.add_handler(CommandHandler(\"ask\", ask))\n dp.add_handler(CommandHandler(\"info\", info))\n dp.add_handler(CommandHandler(\"beer\", beer))\n dp.add_handler(CommandHandler(\"today\", today))\n\n # admin function\n dp.add_handler(CommandHandler(\"ban\", ban))\n\n # Invia Notifica a tutti i loggati (chiunque ha fatto start)\n with open(\"users.log\", \"r\") as f:\n user_list = f.readlines()\n for user in user_list:\n user_id = user.split(\",\")[1]\n bot.Bot(bot_token.TOKEN).send_message(chat_id=user_id, text=\"Il meet inizierà a breve\", parse_mode=ParseMode.MARKDOWN)\n\n print(\"Bot Started\")\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "HarlockOfficial/Programmazione-Informatica-CyberSecurity-Bot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "telegram.Update", "line_number": 28, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 28, "usage_type": "name"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 38, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 41, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 41, "usage_type": "name"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 46, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 46, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 49, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 49, "usage_type": "name"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 53, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 53, "usage_type": "name"}, {"api_name": "time.time", "line_number": 55, "usage_type": "call"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 57, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 57, "usage_type": "name"}, {"api_name": "telegram.bot.Bot", "line_number": 62, "usage_type": "call"}, {"api_name": "telegram.bot", "line_number": 62, "usage_type": "name"}, {"api_name": "bot_token.TOKEN", "line_number": 62, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 65, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 65, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 68, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 68, "usage_type": "name"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 75, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 78, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 78, "usage_type": "name"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 81, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 81, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 84, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 84, "usage_type": "name"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 85, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 85, "usage_type": "name"}, {"api_name": "telegram.Update", "line_number": 89, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackContext", "line_number": 89, "usage_type": "name"}, {"api_name": "telegram.ext.Updater", "line_number": 104, "usage_type": "call"}, {"api_name": "bot_token.TOKEN", "line_number": 104, "usage_type": "attribute"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 107, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 108, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 109, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 110, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 111, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 112, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 115, "usage_type": "call"}, {"api_name": "telegram.bot.Bot", "line_number": 122, "usage_type": "call"}, {"api_name": "telegram.bot", "line_number": 122, "usage_type": "name"}, {"api_name": "bot_token.TOKEN", "line_number": 122, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode.MARKDOWN", "line_number": 122, "usage_type": "attribute"}, {"api_name": "telegram.ParseMode", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "70941275355", "text": "import requests\n\nfrom tqdm import tqdm\n\nimport numpy as np\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\nimport re\n\nimport os\nfrom io import StringIO\n\nimport numpy as np\n\nfrom astroquery.mast import Observations\nfrom astroquery.simbad import Simbad\n\nfrom . import calculations as c\n\nclass Target:\n \"\"\"\n Performance of web reconnaisanse on an interesting target.\n \n Attributes:\n :aliases: (list of strings) names by which the target is known in other catalogs.\n :input_name: (str) name of target you're interested in.\n :webb_approved: (bool) whether or not the target has been included in approved webb program.\n :hst_approved: (bool) whether or not the target has been included in a public HST program.\n :webb_proposal_link: (list of strings) if there are associated JWST proposals, these\n are the associated URLs.\n :webb_proposal_names: (list of strings) if there are associated JWST proposals, these\n are the associated proposal names.\n :hst_data: (dict) keys are HST proposals, vals are links to associated data producs.\n :exoplanet_archive_data: (dict)\n :arxiv_links: (list) list to PDFs of arxiv papers that have self.input_name or self.aliases in \n their abstracts\n \n Methods\n --------\n :__init__: initializes.\n :scrape_all: master run method.\n :find_aliases: finds aliases.\n :search_webb_site: manual scraping, not preferred.\n \n \n \"\"\"\n \n def __init__(self, input_name):\n # instantiates object\n self.input_name = input_name\n self.aliases = []\n self.webb_approved = None\n self.hst_approved = None\n self.webb_proposal_link = []\n self.webb_proposal_names = []\n self.hst_data = {}\n self.exoplanet_archive_data = {}\n self.arxiv_links = []\n self.planet_properties = None\n \n def scrape_all(self):\n \"\"\"\n The preferred scraping method.\n This calls all other main scraping methods.\n \"\"\"\n self.find_aliases()\n self.scrape_arxiv()\n self.scrape_webb_MAST()\n self.scrape_HST()\n self.scrape_planet_properties()\n \n def scrape_planet_properties(self):\n \"\"\"\n Uses exo_MAST to get planet properties for this target.\n\n \"\"\"\n # First, we need to find the \"canonical name\" of the planet — this is how the parameters\n # can be subsequently searched.\n planet_name = self.input_name\n planet_request = requests.get(\n f'https://exo.mast.stsci.edu/api/v0.1/exoplanets/identifiers/?name={planet_name}')\n \n no_nulls = planet_request.text.replace('null', 'np.nan')\n correct_trues = no_nulls.replace('true', 'True')\n correct_bools = correct_trues.replace('false', 'False')\n planet_names = eval(correct_bools)\n canonical_name = planet_names['canonicalName']\n\n properties_request = requests.get(\n f'https://exo.mast.stsci.edu/api/v0.1/exoplanets/{str.lower(canonical_name)}/properties')\n \n no_nulls = properties_request.text.replace('null', 'np.nan')\n correct_trues = no_nulls.replace('true', 'True')\n correct_bools = correct_trues.replace('false', 'False')\n planet_properties = eval(correct_bools)[0]\n self.planet_properties = planet_properties\n \n \n def run_all_calculations(self, verbose=False):\n \"\"\"\n Calculates the TSM and ESM (Kempton+ 18) for this target, using known planet\n properties.\n \"\"\"\n if not self.planet_properties:\n self.scrape_planet_properties()\n TSM = c.TSM(self.planet_properties, verbose=verbose)\n ESM = c.ESM(self.planet_properties, verbose=verbose)\n self.TSM, self.ESM = TSM, ESM\n \n \n def search_webb_site(self, URL):\n \"\"\"\n Checks whether self has been approved via GTO or ERS. Needs debugging as missing above targets still.\n Adds any links to webb_proposal_links. changed webb_approved. not validated to ERS.\n \"\"\"\n if not self.aliases:\n print('Not checking aliases.')\n page = requests.get(URL)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n\n all_targets = []\n\n gto_pages = []\n for link in soup.find_all('a'):\n if link.has_attr('href'):\n str_begin = '/jwst/observing-programs/program-information?id='\n if link.attrs['href'][:48] == str_begin:\n gto_page = 'http://www.stsci.edu/' + link.attrs['href'] # give better name\n gto_pages.append(gto_page)\n\n for gto_page in tqdm(gto_pages, position=0, leave=True):\n ID = gto_page[-4:]\n pdf_link = f'http://www.stsci.edu/jwst/phase2-public/{ID}.pdf'\n urlretrieve(pdf_link, \"tmp.pdf\")\n text = convert_pdf_to_txt(\"tmp.pdf\")\n start = text.find(\"Science Target\") + len(\"Science Target\")\n end = text.find(\"ABSTRACT\")\n target_table = text[start:end]\n targets = list(set(re.findall(r\"\"\"\\(\\d\\)\\ (\\w+)\"\"\", target_table)))\n # targets += list(set(re.findall(r\"\"\"\\(\\d\\)(\\w+)\"\"\", target_table)))\n targets += list(set(re.findall(r\"\"\"\\(\\d\\)\\ (\\w+-\\w+)\"\"\", target_table)))\n targets += list(set(re.findall(r\"\"\"\\(\\d\\)\\ (\\w+-\\w+-\\w+)\"\"\", target_table))) # for HAT-P-35, for example\n targets += list(set(re.findall(r\"\"\"\\(\\d\\)\\ (\\w+ \\w+)\"\"\", target_table)))\n in_targets = [a for a in self.aliases if a in targets]\n if len(in_targets) > 0 and self.input_name in _targets:\n self.webb_approved = True\n self.webb_proposal_links.append(pdf_link)\n os.remove('tmp.pdf')\n if self.webb_approved is None: # has not been changed to True\n self.webb_approved = False\n return\n \n def search_GTO(self):\n \"\"\"\n Manually scrapes the JWST GTO page.\n \"\"\"\n URL = 'http://www.stsci.edu/jwst/observing-programs/approved-gto-programs'\n self.search_webb_site(URL)\n \n def search_ERS(self):\n \"\"\"\n Manually scrapes the JWST ERS page.\n \"\"\"\n URL = 'http://www.stsci.edu/jwst/observing-programs/approved-ers-programs'\n self.search_webb_site(URL)\n \n def search_webb(self):\n \"\"\"\n Manually scrapes both the JWST ERS and GTO pages.\n \"\"\"\n self.search_GTO()\n self.search_ERS()\n \n def find_aliases(self):\n \"\"\"\n Uses astroquery and Simbad to find any aliases of input_name; these are then \n put into the self.aliases list.\n \"\"\"\n try:\n self.aliases = list(Simbad.query_objectids(self.input_name)['ID'])\n except TypeError as e:\n if str(e) == \"\"\"'NoneType' object is not subscriptable\"\"\":\n print(f'SIMBAD could not resolve {self.input_name}. Attempting to scrape ExoFOP.')\n if self.input_name[:3] != 'TIC':\n print(f'Could not scrape {self.input_name}; please try again after changing input_name to a TICID.')\n else:\n self.scrape_exoFOP_aliases(self.input_name[5:])\n \n \n def scrape_HST(self):\n \"\"\"\n Checks MAST for the target's relevant HST proposals/data.\n Modifies hst_approved: if there are observations, sets it to True; otherwise False.\n Appends links to relevant HST data to hst_data.\n \"\"\"\n obs = Observations.query_object(self.input_name, radius=\".02 deg\") # should work. waliases\n HST_obs = obs[obs['obs_collection']=='HST']\n if len(HST_obs) > 0:\n self.hst_approved = True\n for ob in HST_obs:\n self.hst_data[ob['obs_title']] = ob['dataURL']\n if self.hst_approved is None:\n self.hst_approved = False\n \n def scrape_webb_MAST(self):\n \"\"\"\n Checks MAST for the target's relevant JWST proposals/data.\n Modifies webb_approved: if there are relevant proposals, sets it to True; otherwise False.\n Appends the names of these proposals to webb_proposal_names.\n \"\"\"\n obs = Observations.query_object(self.input_name, radius=\".02 deg\") # should work. waliases\n JWST_obs = obs[obs['obs_collection']=='JWST']\n if len(JWST_obs) > 0:\n self.webb_approved = True\n for ob in JWST_obs:\n self.webb_proposal_names.append(ob['obs_title'])\n if self.webb_approved is None:\n self.webb_approved = False\n return\n \n \n def scrape_arxiv(self, progress=False):\n \"\"\"\n Searches through arXiv abstracts for the target.\n Appends links of relevant arXiv pdfs to arxiv_links.\n If progress=True, outputs a tqdm progress bar.\n \"\"\"\n if self.aliases:\n for alias in tqdm(self.aliases, position=0, leave=True, desc='Scraping arXiv'):\n query_URL = f'https://arxiv.org/search/astro-ph?query={alias}&searchtype=abstract&abstracts=show&order=-announced_date_first&size=50'\n page = requests.get(query_URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n for link in soup.find_all('a'):\n try:\n paper = link.get('href')\n if 'pdf'in paper and paper not in self.arxiv_links:\n self.arxiv_links.append(paper)\n except TypeError:\n continue\n else: # I'm sure there's a more elegant way to do this!\n query_URL = f'https://arxiv.org/search/astro-ph?query={self.input_name}&searchtype=abstract&abstracts=show&order=-announced_date_first&size=50'\n page = requests.get(query_URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n for link in soup.find_all('a'):\n try:\n paper = link.get('href')\n if 'pdf'in paper and paper not in self.arxiv_links:\n self.arxiv_links.append(paper)\n except TypeError:\n continue\n \n \n def scrape_exoplanet_archive(self):\n if not self.aliases:\n print('Not checking aliases.')\n raise NotImplementedError\n \n \n def scrape_exoFOP_aliases(self, ticid):\n \"\"\"\n This manually scrapes exoFOP for aliases, given a TICID.\n \"\"\"\n URL = f'https://exofop.ipac.caltech.edu/tess/target.php?id={ticid}'\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n aliases = soup.find_all('table')[7].td.string.split(', ')\n aliases_formatted = [a if a[0] != ' ' else a[1:] for a in aliases]\n aliases_to_add = [a for a in \\\n tqdm(aliases_formatted, leave=True, position=0, desc='Checking aliases to add') \\\n if a not in self.aliases]\n self.aliases += aliases_to_add\n \n \n ", "repo_name": "arjunsavel/webb_scraping", "sub_path": "webb_scraping/target.py", "file_name": "target.py", "file_ext": "py", "file_size_in_byte": 11233, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 119, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 121, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 133, "usage_type": "call"}, {"api_name": "urllib.request.urlretrieve", "line_number": 136, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 141, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 143, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 144, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 145, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 150, "usage_type": "call"}, {"api_name": "astroquery.simbad.Simbad.query_objectids", "line_number": 182, "usage_type": "call"}, {"api_name": "astroquery.simbad.Simbad", "line_number": 182, "usage_type": "name"}, {"api_name": "astroquery.mast.Observations.query_object", "line_number": 198, "usage_type": "call"}, {"api_name": "astroquery.mast.Observations", "line_number": 198, "usage_type": "name"}, {"api_name": "astroquery.mast.Observations.query_object", "line_number": 213, "usage_type": "call"}, {"api_name": "astroquery.mast.Observations", "line_number": 213, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 231, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 233, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 234, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 244, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 245, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 266, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 267, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "15328613794", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 28 14:37:12 2012\r\n\r\n@author: IntelligentSystems\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom itertools import groupby\r\n\r\ndef make_perturbations(offset,num_trials,p,max_consecutive):\r\n num_perturbations = p * num_trials\r\n trials = np.zeros(num_trials-offset)\r\n trials[0:num_perturbations] = 1\r\n valid = False\r\n while not valid:\r\n np.random.shuffle(trials)\r\n consecutive = max([len(list(x)) for k,x in groupby(trials)])\r\n valid = consecutive <= max_consecutive\r\n trials = np.concatenate((np.zeros(offset), trials))\r\n return trials", "repo_name": "kampff-lab/shuttling-analysis", "sub_path": "make_perturbations.py", "file_name": "make_perturbations.py", "file_ext": "py", "file_size_in_byte": 607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "itertools.groupby", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "12135234169", "text": "#!/usr/bin/env python\nimport sys\nfrom itertools import cycle\n\ninput_file = sys.argv[1]\n\nwith open(input_file) as file:\n input_values = [int(line.strip('\\n')) for line in file.readlines()]\n\n# Part 1\n\n\ndef part1():\n return sum(input_values)\n\n# Part 2\n\n\ndef part2():\n reached_freqs = set()\n freq = 0\n for value in cycle(input_values):\n freq += value\n if freq not in reached_freqs:\n reached_freqs.add(freq)\n else:\n return freq\n\n\nprint(\" Part 1 : {}\".format(part1()))\nprint(\" Part 2 : {}\".format(part2()))\n", "repo_name": "xmadsen/adventofcode", "sub_path": "2018/solution/day1.py", "file_name": "day1.py", "file_ext": "py", "file_size_in_byte": 565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "itertools.cycle", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "9964648885", "text": "import hashlib\nimport os\nimport string\nfrom src.helpers.logging import SubParserLogger\nfrom pefile import PE, SECTION_CHARACTERISTICS, retrieve_flags\nfrom typing import Any\nfrom src.helpers import Command\n\nclass PEParser(Command):\n \"\"\"\n PE Parser: Collects information using the pefile library \n\n Attributes\n ----------\n - name: str\n MD5 of the sample\n\n - path: str\n Path to the samples location\n\n - logger: SubParserLogger\n SubParserLogger Object for output to console and file \n\n Methods\n -------\n - convert_char(self, char): char\n Converts a char into a readable char\n\n - convert_to_printable(self, s): str\n Converts chars into a readable string\n\n - check_verinfo(self, pe): str\n Returns the examined PE's version information\n\n - execute(self): dict\n Collects PE Information and is executed by the command invoker\n\n - information(self): dict\n Parser information for compatible file types\n \"\"\"\n def __init__(self, md5: str = None, path: os.path = None) -> None:\n \"\"\"\n Constructor for pe parser\n\n Parameters\n ----------\n - md5: str\n MD5 hash of the sample\n \n - logger: SubParserLogger\n SubParserLogger Object for output to console and file\n\n - path: str\n Full path to the location of the sample\n \"\"\"\n super().__init__()\n self.name = md5\n self.path = path\n self.logger = SubParserLogger()\n \n \n def information(self):\n \"\"\"\n Compatiblity information for parser\n\n Returns\n -------\n Dictionary\n \"\"\"\n return {\n \"name\": \"PEParser\",\n \"file_magic\": {\n \"short_type\": \"pe\",\n \"other_types\": [\n \"pe32\",\n \"ms-dos\",\n \"application/x-dosexec\"\n ]\n }\n }\n\n # region Convert Char\n def convert_char(self, char):\n \"\"\" \n Converts a char into a readable char\n\n Returns\n -------\n Char\n \"\"\"\n if (char in string.ascii_letters or\n char in string.digits or\n char in string.punctuation or\n char in string.whitespace):\n return char\n else:\n return r'\\x%02x' % ord(char)\n # endregion\n\n # region Convert To Printable\n def convert_to_printable(self, s):\n \"\"\"\n Converts chars into a readable string\n\n Returns\n -------\n String\n \"\"\"\n return ''.join([self.convert_char(c) for c in s])\n # endregion \n\n # region Check Version Info\n def check_verinfo(self, pe):\n \"\"\"\n Returns the examined PE's version information\n\n Returns\n -------\n String\n \"\"\"\n ret = []\n\n if hasattr(pe, 'VS_VERSIONINFO'):\n if hasattr(pe, 'FileInfo'):\n for entry in pe.FileInfo:\n if hasattr(entry, 'StringTable'):\n for st_entry in entry.StringTable:\n for str_entry in st_entry.entries.items():\n ret.append(self.convert_to_printable(str_entry[0]) +\n ': ' +\n self.convert_to_printable(str_entry[1]))\n elif hasattr(entry, 'Var'):\n for var_entry in entry.Var:\n if hasattr(var_entry, 'entry'):\n ret.append(self.convert_to_printable(var_entry.entry.keys()[0]) + ': ' + var_entry.entry.values()[0])\n return '\\n'.join(ret)\n # endregion\n\n # region Execute ( For Command Object )\n def execute(self) -> Any:\n \"\"\"\n Collects PE Information and is executed by the command invoker\n\n Returns\n -------\n Dictionary\n \"\"\"\n\n _pe = PE(self.path)\n\n with open(self.path,\"rb\") as sample:\n _data = {\n \"md5\": hashlib.md5(sample.read()).hexdigest(),\n \"sha256\": hashlib.sha256(sample.read()).hexdigest(),\n \"number_sections\": _pe.FILE_HEADER.NumberOfSections,\n \"entry_point\": hex(_pe.OPTIONAL_HEADER.AddressOfEntryPoint),\n \"image_base\": hex(_pe.OPTIONAL_HEADER.ImageBase),\n \"pe_signature\": hex(_pe.NT_HEADERS.Signature),\n \"sections\": None,\n }\n\n # region PE file metadata\n\n _file_version = self.check_verinfo(_pe)\n _data[\"file_version\"] = (\"none\" if _file_version == \"\" else _file_version)\n\n # endregion\n\n # region Section Collection\n sections = []\n for section in _pe.sections:\n section_data = {}\n section_read = False\n section_write = False\n section_execute = False\n contains_code = False\n contains_init = False\n\n for permission in sorted(\n retrieve_flags(SECTION_CHARACTERISTICS, \"IMAGE_SCN_\")\n ):\n if getattr(section, permission[0]):\n if permission[0] == \"IMAGE_SCN_MEM_READ\":\n section_read = True\n if permission[0] == \"IMAGE_SCN_MEM_WRITE\":\n section_write = True\n if permission[0] == \"IMAGE_SCN_MEM_EXECUTE\":\n section_execute = True\n if permission[0] == \"IMAGE_SCN_CNT_CODE\":\n contains_code = True\n if permission[0] == \"IMAGE_SCN_CNT_INITIALIZED_DATA\":\n contains_init = True\n\n section_data = {\n \"name\": (section.Name.decode(\"utf-8\").replace(\"\\x00\", \"\") if not section.Name is None else \"\"),\n \"virtual_address\": hex(section.VirtualAddress),\n \"virtual_size\": hex(section.Misc_VirtualSize),\n \"section_raw_size\": hex(section.SizeOfRawData),\n \"read\": section_read,\n \"write\": section_write,\n \"execute\": section_execute,\n \"contains_code\": contains_code,\n \"contains_init\": contains_init,\n \"entropy\": section.get_entropy(),\n }\n\n sections.append(section_data)\n\n _data[\"sections\"] = sections\n # endregion\n\n # region Atter/Import Collection\n cnt = 0\n\n imports = {}\n if hasattr(_pe, \"DIRECTORY_ENTRY_IMPORT\"):\n for entry in _pe.DIRECTORY_ENTRY_IMPORT:\n library_name = (entry.dll.decode(\"utf-8\").replace(\"\\x00\", \"\") if not entry.dll is None else \"\")\n functions = {}\n\n for func in entry.imports:\n imp_name = func.name\n if not imp_name:\n imp_name = str(func.ordinal)\n else:\n imp_name = (imp_name.decode(\"utf-8\").replace(\"\\x00\", \"\") if not imp_name is None else \"\")\n \n functions[imp_name] = hex(func.address)\n\n imports[library_name] = [functions]\n\n\n _data[\"imports\"] = imports\n\n _data[\"total_imports\"] = cnt\n\n cnt = 0\n export_data = []\n if hasattr(_pe, \"DIRECTORY_ENTRY_EXPORT\"):\n for entry in _pe.DIRECTORY_ENTRY_EXPORT.symbols:\n exp_name = (entry.name.decode(\"utf-8\") if not entry.name is None else \"\")\n\n if not exp_name:\n exp_name = str(entry.ordinal)\n else:\n exp_name = exp_name.replace(\"\\x00\", \"\")\n\n export_data.append({exp_name: hex(entry.address)})\n cnt = cnt + 1\n\n _data[\"exports\"] = export_data\n\n _data[\"total_exports\"] = cnt\n # endregion\n \n return {\"parser\" : \"PEParser\", \"data\" : _data}\n # endregion\n", "repo_name": "jstrosch/subparse", "sub_path": "parser/src/parsers/PEParser.py", "file_name": "PEParser.py", "file_ext": "py", "file_size_in_byte": 8199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 49, "dataset": "github-code", "pt": "50", "api": [{"api_name": "src.helpers.Command", "line_number": 9, "usage_type": "name"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "src.helpers.logging.SubParserLogger", "line_number": 59, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 91, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 92, "usage_type": "attribute"}, {"api_name": "string.punctuation", "line_number": 93, "usage_type": "attribute"}, {"api_name": "string.whitespace", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pefile.PE", "line_number": 149, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 153, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 154, "usage_type": "call"}, {"api_name": "pefile.retrieve_flags", "line_number": 180, "usage_type": "call"}, {"api_name": "pefile.SECTION_CHARACTERISTICS", "line_number": 180, "usage_type": "argument"}, {"api_name": "typing.Any", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "36664650267", "text": "import psycopg2\nimport traceback\nfrom backend.DBManager import DBManager\nfrom backend.PedidoProduto import PedidoProduto\n\nclass PedidoProdutoDAO:\n\n def listar_todos(self) -> list:\n \"retorna todos os pedido_produtos\"\n\n pedido_produto_list = []\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(\"SELECT pedido_id, produto_id, quantidade FROM pedido_produto\")\n\n rows_in_table = cursor.fetchall()\n for row in rows_in_table:\n pp = PedidoProduto()\n pp.pedido_id = row[0]\n pp.produto_id = row[1]\n pp.quantidade = row[2]\n\n pedido_produto_list.append(pp)\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return pedido_produto_list\n\n\n def listar(self, _pedido_id, _produto_id) -> PedidoProduto:\n \"retorna uma linha de pedido_produto. Param: pedido_id e produto_id\"\n\n pedido_produto = None\n\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"SELECT pedido_id, produto_id, quantidade FROM pedido_produto WHERE pedido_id = {_pedido_id } AND produto_id = {_produto_id}\")\n\n row = cursor.fetchone()\n\n if row is not None and len(row) > 0:\n pedido_produto = PedidoProduto()\n pedido_produto.pedido_id = row[0]\n pedido_produto.produto_id = row[1]\n pedido_produto.quantidade = row[2]\n\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return pedido_produto\n \n\n def adicionar(self, _pedido_id, _produto_id, _quantidade) -> bool:\n \"Adiciona um novo pedido_produto no banco de dados. params: pedido_id, produto_id e quantidade\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"INSERT INTO pedido_produto ( pedido_id, produto_id, quantidade) VALUES ({_pedido_id}, {_produto_id}, {_quantidade})\")\n \n connection.commit()\n\n if cursor.rowcount == 1:\n success = True\n\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success\n \n\n def atualizar(self, _pedido_id, _produto_id, _quantidade) -> bool:\n \"Atualiza a quantidade de produtos de um estoque no banco de dados. params: estoque.id, produto.id e quantidade\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"UPDATE pedido_produto SET quantidade = {_quantidade} WHERE pedido_id = {_pedido_id } AND produto_id = {_produto_id}\")\n connection.commit()\n if cursor.rowcount == 1:\n success = True\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success\n\n\n def remover(self, _pedido_id, _produto_id) -> bool:\n \"Remove um produto em um pedido. params: pedido.id e produto.id\"\n\n success = False\n try:\n connection = DBManager.connect_with_database()\n\n cursor = connection.cursor()\n cursor.execute(f\"DELETE FROM pedido_produto WHERE pedido_id = {_pedido_id} AND produto_id = {_produto_id}\")\n connection.commit()\n if cursor.rowcount == 1:\n success = True\n except (Exception, psycopg2.Error) as error:\n traceback.print_exc()\n finally:\n if connection:\n cursor.close()\n connection.close()\n return success", "repo_name": "paulohgs/wofi", "sub_path": "backend/PedidoProdutoDAO.py", "file_name": "PedidoProdutoDAO.py", "file_ext": "py", "file_size_in_byte": 4263, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 13, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 13, "usage_type": "name"}, {"api_name": "backend.PedidoProduto.PedidoProduto", "line_number": 20, "usage_type": "call"}, {"api_name": "psycopg2.Error", "line_number": 27, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 28, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 42, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 42, "usage_type": "name"}, {"api_name": "backend.PedidoProduto.PedidoProduto", "line_number": 50, "usage_type": "call"}, {"api_name": "psycopg2.Error", "line_number": 56, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 57, "usage_type": "call"}, {"api_name": "backend.PedidoProduto.PedidoProduto", "line_number": 36, "usage_type": "name"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 70, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 70, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 80, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 81, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 94, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 94, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 101, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 102, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager.connect_with_database", "line_number": 115, "usage_type": "call"}, {"api_name": "backend.DBManager.DBManager", "line_number": 115, "usage_type": "name"}, {"api_name": "psycopg2.Error", "line_number": 122, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "27069664866", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.post_list, name='posts'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^post/(?P[-\\w]+)/publish/$', views.post_publish, name='post_publish'),\n url(r'^post/(?P[-\\w]+)/edit/$', views.post_edit, name='post_edit'),\n url(r'^post/(?P[-\\w]+)/remove/$', views.post_remove, name='post_remove'),\n url(r'^post/(?P[-\\w]+)/restore/$', views.post_restore, name='post_restore'),\n url(r'^post/(?P[-\\w]+)/delete/$', views.post_delete, name='post_delete'),\n url(r'^post/(?P[-\\w]+)/(?P\\d+)/$', views.update_reaction, name='update_reaction'),\n url(r'^post/(?P[-\\w]+)/$', views.post_view, name='post_view'),\n url(r'^drafts/$', views.post_draft_list, name='post_draft_list'),\n url(r'^deleted/$', views.post_deleted_list, name='post_deleted_list'),\n url(r'^category/new/$', views.category_new, name='category_new'),\n url(r'^category/(?P[-\\w]+)/edit/$', views.category_edit, name='category_edit'),\n url(r'^comment/(?P\\d+)/approve/$', views.comment_approve, name='comment_approve'),\n url(r'^comment/(?P\\d+)/remove/$', views.comment_remove, name='comment_remove'),\n url(r'^comment/(?P[-\\w]+)/edit-comment/(?P\\d+)/$', views.comment_edit, name='comment_edit'),\n url(r'^category/(?P[-\\w]+)/$', views.change_category, name='change_category'),\n url(r'^category/(?P\\d+)/delete/$', views.category_delete, name='category_delete'),\n]", "repo_name": "Ebonsignori/archived-ebonsignori.com", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1543, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "19926108935", "text": "import time\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n## Constants.\n\n\nLAST_YEAR = 2022\nURL_ROOT = \"https://www.baseball-reference.com\"\nBOLD_START = \"\\033[1m\"\nBOLD_END = \"\\033[0m\"\n\n\n## Main.\n\n\ndef main():\n matchups = get_todays_matchups()\n print(f\"\\n\\t{len(matchups)} matchups\\n\")\n\n team_batting_1st_inning_runs_per_inning = (\n get_1st_inning_runs_per_inning_for_batting_teams(LAST_YEAR)\n )\n\n for matchup_idx, matchup in enumerate(matchups):\n team_abbrev = matchup[\"team_abbrev\"]\n pitcher_player_id = matchup[\"pitcher_player_id\"]\n pitcher_name = matchup[\"pitcher_name\"]\n\n team_batting_runs_per_inning = team_batting_1st_inning_runs_per_inning[\n team_abbrev\n ]\n pitcher_runs_per_inning = get_1st_inning_runs_per_inning_for_pitcher(\n pitcher_player_id, LAST_YEAR\n )\n\n print(\n f\"\\t{matchup_idx + 1}.\"\n f\" {team_abbrev} batting\"\n f\" ({BOLD_START}{display_number(team_batting_runs_per_inning)}{BOLD_END} runs per inning)\"\n f\" vs.\"\n f\" {pitcher_name} pitching\"\n f\" ({BOLD_START}{display_number(pitcher_runs_per_inning)}{BOLD_END} runs per inning)\"\n )\n\n print(\"\\nPress Enter to exit...\")\n\n input()\n\n\n## Helpers.\n\n\nlast_get = -1\n\n\ndef rate_limited_get(*args, **kwargs):\n \"\"\"\n This function is for abiding by the website's rules and avoiding getting blocked\n (https://www.sports-reference.com/bot-traffic.html).\n \"\"\"\n global last_get\n while time.time() - last_get < 3.1:\n time.sleep(0.1)\n\n results = requests.get(*args, **kwargs)\n\n last_get = time.time()\n\n return results\n\n\ndef display_number(x):\n if x is None:\n return \"N/A\"\n else:\n return \"{:.2f}\".format(x)\n\n\ndef get_todays_matchups():\n game_previews_url = f\"{URL_ROOT}/previews\"\n fetch_res = rate_limited_get(game_previews_url)\n if fetch_res.status_code != 200:\n print(f\"Unable to get today's matchups at {game_previews_url}\")\n return\n\n raw_html = fetch_res.content.decode().replace(\"4 bits, flags-->4 bits)\r\n # if dup = 0 then qos = 0\r\n # calculate remaining length\r\n packet_type = {\r\n # \"Reserved\": '0000', # we use only 4 bits, so the rest 4\r\n # are the length of the packet... cannot be null\r\n \"CONNECT\": '00010000',\r\n \"CONNACK\": '00100000',\r\n \"PUBLISH_DUP0_Q0_R0\": '00110000',\r\n \"PUBLISH_DUP0_Q3_R1\": '00110111',\r\n \"PUBLISH_DUP0_Q3_R0\": '00110110',\r\n \"PUBLISH_DUP0_Q2_R1\": '00110101',\r\n \"PUBLISH_DUP0_Q2_R0\": '00110100',\r\n \"PUBLISH_DUP0_Q1_R1\": '00110011',\r\n \"PUBLISH_DUP0_Q1_R0\": '00110010',\r\n \"PUBLISH_DUP0_Q0_R1\": '00110001',\r\n # \"PUBLISH_DUP1_Q0_R0\": '00111000',\r\n \"PUBLISH_DUP1_Q1_R0\": '00111010',\r\n \"PUBLISH_DUP1_Q2_R0\": '00111100',\r\n \"PUBLISH_DUP1_Q3_R0\": '00111110',\r\n # \"PUBLISH_DUP1_Q0_R1\": '00111001',\r\n \"PUBLISH_DUP1_Q1_R1\": '00111011',\r\n \"PUBLISH_DUP1_Q2_R1\": '00111101',\r\n \"PUBLISH_DUP1_Q3_R1\": '00111111',\r\n \"PUBACK\": '01000000',\r\n \"PUBREC\": '01010000',\r\n \"PUBREL\": '01100010',\r\n \"PUBCOMP\": '01110000',\r\n \"SUBSCRIBE\": '10000010',\r\n \"SUBACK\": '10010000',\r\n \"UNSUBSCRIBE\": '10100010',\r\n \"UNSUBACK\": '10110000',\r\n \"PINGREQ\": '11000000',\r\n \"PINGRESP\": '11010000',\r\n \"DISCONNECT\": '11100000'\r\n }\r\n bits_to_hex = [create_hex_from_bits(value) for value in packet_type.values()]\r\n return bits_to_hex\r\n\r\n\r\n# REFERENCES: https://www.rabbitmq.com/resources/specs/amqp0-9-1.pdf\r\ndef is_amqp(payload, current_packet, type_protocol): # type_protocol = TCP/UDP\r\n \"\"\"Decides whether the packet is amqp or not\"\"\"\r\n answer = \"no\"\r\n if 'AMQP' in str(current_packet.layers):\r\n answer = \"yes\"\r\n return answer\r\n if \"AMQP\" in payload: # initiates the connection (first amqp packet)\r\n answer = \"yes\"\r\n return answer\r\n else:\r\n if len(payload) >= 8: # Header, having a fixed size (8 byte);\r\n # frame end is always %xce\r\n end_code = \"\\\\xce\"\r\n index = payload.find(end_code)\r\n right_index = len(payload) - 5\r\n if index == right_index:\r\n answer = \"yes\"\r\n return answer\r\n # also packets from given ports\r\n src_port = current_packet[type_protocol].srcport\r\n dst_port = current_packet[type_protocol].dstport\r\n if (src_port == 5671) or (dst_port == 5671) or (src_port == 5672) or (dst_port == 5672):\r\n answer = \"yes\"\r\n return answer\r\n\r\n return answer\r\n\r\n\r\ndef create_coap_first_byte():\r\n # version is always 01 = coap version 1\r\n ver = '01'\r\n my_types = {\"confirmable\": '00', \"Non-cofirmable\": '01', \"Acknowledgement\": '10', \"Reset\": '11'}\r\n token_length = {0: '0000', 1: '0001', 2: '0010', 3: '0011', 4: '0100', 5: '0101', 6: '0110', 7: '0111',\r\n 8: '1000'}\r\n first_byte = []\r\n for protocol_type, token in product(my_types.values(), token_length.values()): # removed nested loop\r\n combination = ver + protocol_type + token\r\n hex_combination = create_hex_from_bits(combination)\r\n if hex_combination not in first_byte:\r\n # create possible combinations\r\n first_byte.append(hex_combination)\r\n return first_byte\r\n\r\n\r\ndef is_coap(payload, current_packet, type_protocol, first_byte):\r\n # first byte is the list with the possible combinations for a coap byte\r\n answer = \"no\"\r\n if 'COAP' in str(current_packet.layers):\r\n answer = \"yes\"\r\n return answer\r\n if len(payload) >= 4: # fixed header 4 bytes\r\n for byte in first_byte:\r\n if byte in payload[:7]:\r\n answer = \"yes\"\r\n return answer\r\n\r\n src_port = current_packet[type_protocol].srcport\r\n dst_port = current_packet[type_protocol].dstport\r\n if (src_port == 5683) or (dst_port == 5683):\r\n answer = \"yes\"\r\n return answer\r\n\r\n return answer\r\n\r\n\r\ndef is_mqtt(payload, list_with_codes, current_packet, type_protocol):\r\n \"\"\"decide if the packet uses mqtt or not\"\"\"\r\n answer = \"no\"\r\n if 'MQTT' in str(current_packet.layers):\r\n answer = \"yes\"\r\n return answer\r\n if \"MQTT\" in payload:\r\n answer = \"yes\"\r\n return answer\r\n\r\n for mqtt_code in list_with_codes: # e.g xe0\r\n exact_code = \"\\\\\" + mqtt_code + \"\\\\\" # e.g \\xe0\\\r\n if exact_code in payload[:7]:\r\n answer = \"yes\"\r\n return answer\r\n\r\n # also packets from given ports\r\n src_port = current_packet[type_protocol].srcport\r\n dst_port = current_packet[type_protocol].dstport\r\n if (src_port == 1883) or (dst_port == 1883) or (src_port == 8883) or (dst_port == 8883):\r\n answer = \"yes\"\r\n return answer\r\n\r\n return answer\r\n\r\n\r\ndef if_file_exists_delete_it():\r\n \"\"\"because write_all method appends on file, we need to delete it if was previously existed\r\n in order to avoid a wrong output\"\"\"\r\n if os.path.exists(\"all.csv\"):\r\n print(\"file deleted\")\r\n os.remove(\"all.csv\")\r\n\r\n\r\ndef is_malicious_dataset():\r\n \"\"\"check if the packets come from honeypot or not\"\"\"\r\n value = input(\"Are the packets from the honeypot? (yes/no answer)\")\r\n if value.lower() == \"yes\":\r\n print(\"malicious dataset\")\r\n return True\r\n else:\r\n print(\"the dataset is not malicious\")\r\n return False\r\n\r\n\r\ndef write_headers():\r\n \"\"\"will write only the headers\"\"\"\r\n if os.path.exists(\"all.csv\"):\r\n print(\"output already exists\")\r\n else:\r\n fileName = 'all.csv'\r\n with open(fileName, 'w', newline='') as csv_file:\r\n csv_file = csv.writer(csv_file, delimiter=',')\r\n csv_file.writerow(\r\n ['app_protocol', 'transport_protocol', 'layer', 'mac_src', 'mac_dst', 'src_ip', 'dst_ip',\r\n 'src_port', 'dst_port', 'pkt_size', 'is_encrypted', 'payload size', 'payload_ratio', 'previous_ratio',\r\n 'packet_time_diff', 'payload', 'p_date', 'p_time', 'flag'])\r\n\r\n\r\ndef write_all(app_protocol, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip,\r\n src_port, dst_port, pkt_size, is_encrypted, payload_size, payload_ratio, previous_ratio,\r\n packet_time_diff, payload, p_date, p_hour,flag):\r\n \"\"\"creates a file with all packets\"\"\"\r\n fileName = 'all.csv'\r\n # append mode\r\n with open(fileName, 'a', newline='') as csv_file: # automatically close the file\r\n csv_file = csv.writer(csv_file, delimiter=',')\r\n\r\n csv_file.writerow([app_protocol, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip,\r\n src_port, dst_port, pkt_size, is_encrypted, payload_size, payload_ratio, previous_ratio,\r\n packet_time_diff, payload, p_date, p_hour, flag])\r\n\r\n # References: https://stackoverflow.com/questions/4959741/python-print-mac-address-out-of-6-byte-string\r\n\r\n\r\ndef prettify(mac_string):\r\n \"\"\"convert byte format to hex format, about mac\"\"\"\r\n return ':'.join('%02x' % ord(b) for b in mac_string)\r\n\r\n\r\ndef extract_characteristics_from_packet(pkt, previous_packet_time, previous_packet_size, payload):\r\n \"\"\" this method extracts the most important characteristics of the packets, probably will be used in Ml\"\"\"\r\n # 1st vital characteristic is packet length\r\n pkt_size = len(pkt)\r\n # 2nd vital characteristic is whether the packet is encrypted\r\n is_encrypted = 0\r\n layer_level = 0\r\n searching_layers = True # e.g. Ethernet, Ip, Icmp, Raw\r\n while searching_layers:\r\n layer = pkt.layers\r\n if layer is not None:\r\n if 'SSL' in layer: # encryption protocols\r\n is_encrypted = 1 # encrypted packet\r\n else:\r\n searching_layers = False\r\n layer_level += 1 # check next layer\r\n # 3rd vital characteristic is the payload size\r\n # 4rth vital characteristic is the payload ratio\r\n payload_size = len(payload)\r\n payload_ratio = (payload_size / pkt_size) * 100\r\n # 5th vital characteristic is previous packet ratio\r\n # defaults to 0 for the first packet of the session\r\n if previous_packet_size != 0:\r\n previous_ratio = (pkt_size / previous_packet_size) * 100\r\n else:\r\n previous_ratio = 1\r\n previous_packet_size = pkt_size\r\n # 6th vital characteristic is time difference\r\n if previous_packet_time != 0:\r\n packet_time_diff = pkt.sniff_time - previous_packet_time\r\n else:\r\n packet_time_diff = 0\r\n previous_packet_time = pkt.sniff_time\r\n\r\n # 7th vital characteristic is payload content\r\n # convert to hex type\r\n payload_fix_format_type = binascii.hexlify(bytes(payload))\r\n\r\n return pkt_size, is_encrypted, payload_size, payload_ratio, previous_packet_time, \\\r\n previous_packet_size, previous_ratio, packet_time_diff, payload_fix_format_type\r\n\r\n\r\ndef find_first_layer_protocol(pkt):\r\n \"\"\"get mac address src and dst and first layer\"\"\"\r\n try:\r\n mac_src_in_bytes = str(pkt.eth.src)\r\n mac_src = prettify(mac_src_in_bytes)\r\n mac_dst_in_bytes = str(pkt.eth.dst)\r\n mac_dst = prettify(mac_dst_in_bytes)\r\n layer = \"Ethernet\"\r\n except AttributeError:\r\n mac_src = \"\"\r\n mac_dst = \"\"\r\n layer = \"CookedLinux\"\r\n\r\n return mac_src, mac_dst, layer\r\n\r\n\r\ndef get_date_and_time(pkt):\r\n \"\"\"get the date and the time of the packet\"\"\"\r\n pkt_time = int(float(pkt.sniff_timestamp))\r\n p_date = datetime.fromtimestamp(pkt_time).strftime('%Y-%m-%d ') # format: 2020-10-08\r\n p_hour = datetime.fromtimestamp(pkt_time).strftime('%H:%M:%S') # format: 22:40:41\r\n return p_date, p_hour\r\n\r\n\r\ndef get_ip_addresses(pkt):\r\n \"\"\"get src and dst mac\"\"\"\r\n # get ip addresses\r\n src_ip = pkt[\"IP\"].src\r\n dst_ip = pkt[\"IP\"].dst\r\n return src_ip, dst_ip\r\n\r\n\r\ndef get_ports(pkt, type_protocol):\r\n \"\"\"get ports: dst and src\"\"\"\r\n src_port = pkt[type_protocol].srcport\r\n dst_port = pkt[type_protocol].dstport\r\n print(\"Port: \" + src_port)\r\n return src_port, dst_port\r\n\r\n\r\ndef store_data(app_protocol, pkt, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip, src_port, dst_port, p_date,\r\n p_hour, payload, flag):\r\n global previous_packet_size\r\n global previous_packet_time\r\n\r\n pkt_size, is_encrypted, payload_size, payload_ratio, previous_packet_time, \\\r\n previous_packet_size, previous_ratio, packet_time_diff, payload = extract_characteristics_from_packet(\r\n pkt, previous_packet_time, previous_packet_size, payload)\r\n\r\n write_all(app_protocol, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip,\r\n src_port, dst_port, pkt_size, is_encrypted, payload_size, payload_ratio, previous_ratio,\r\n packet_time_diff, payload, p_date, p_hour, flag)\r\n\r\n\r\ndef pcap_pkt_reader():\r\n \"\"\"extracts the basic information of the packets, only for the 3 basic IoT protocols\"\"\"\r\n file_name = 'save.pcap'\r\n # if this file is not in the system...\r\n if not os.path.isfile(file_name):\r\n print('\"{}\" does not exist'.format(file_name), file=sys.stderr)\r\n sys.exit(-1)\r\n # if the file is in the system...\r\n else:\r\n print('file found!')\r\n # packets_list = rdpcap(file_name)\r\n\r\n tcp_counter = 0\r\n udp_counter = 0\r\n mqtt_counter = 0\r\n coap_counter = 0\r\n amqp_counter = 0\r\n\r\n # initialise list with mqtt codes\r\n list_with_mqtt_codes = create_mqtt_first_byte()\r\n list_with_coap_codes = create_coap_first_byte()\r\n\r\n # counter = 0\r\n pkt_id = 0\r\n # if_file_exists_delete_it() # delete the previous output file\r\n write_headers() # initialise the file\r\n flag = is_malicious_dataset()\r\n try:\r\n packets = pyshark.FileCapture(file_name)\r\n print(\"read packets\")\r\n for pkt in packets:\r\n pkt_id += 1\r\n print(pkt_id)\r\n # date and time of the packet\r\n p_date, p_hour = get_date_and_time(pkt)\r\n\r\n if ('HTTP') not in str(pkt.layers):\r\n # TCP CASE\r\n if \"TCP\" in pkt.transport_layer:\r\n try:\r\n type_protocol = \"TCP\"\r\n payload = pkt.tcp.payload\r\n payload = bytes.fromhex(payload.replace(\":\", \"\"))\r\n except AttributeError:\r\n payload = \"\"\r\n continue\r\n if \"UDP\" in pkt.transport_layer:\r\n try:\r\n type_protocol = \"UDP\"\r\n print(type_protocol)\r\n if 'COAP' in str(pkt.layers):\r\n print(\"COAP\")\r\n payload = pkt.data.data\r\n payload = bytes.fromhex(payload.replace(\":\", \"\"))\r\n except AttributeError:\r\n payload = \"\"\r\n continue\r\n\r\n # get mac addresses and first layer\r\n mac_src, mac_dst, layer = find_first_layer_protocol(pkt)\r\n\r\n # get ip addresses\r\n src_ip, dst_ip = get_ip_addresses(pkt)\r\n\r\n # get port numbers\r\n src_port, dst_port = get_ports(pkt, type_protocol)\r\n\r\n # check for IoΤ protocols\r\n # search tcp payload in order to find the application layer level\r\n\r\n str_payload = str(payload)\r\n # print(\"Payload: \"+str_payload)\r\n if str_payload: # not b'' TCP payload is zero so there is no header fro the application layer\r\n\r\n # capture AMQP protocol:\r\n amqp_answer = is_amqp(str_payload, pkt, type_protocol)\r\n if amqp_answer == \"yes\":\r\n amqp_counter += 1\r\n app_protocol = \"AMQP\"\r\n # store the information about amqp protocol\r\n store_data(app_protocol, pkt, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip, src_port,\r\n dst_port, p_date, p_hour, payload,flag)\r\n else:\r\n # capture mqtt protocol\r\n mqtt_answer = is_mqtt(str_payload, list_with_mqtt_codes, pkt, type_protocol)\r\n if mqtt_answer == \"yes\":\r\n mqtt_counter += 1\r\n app_protocol = \"MQTT\"\r\n # store the information about mqtt protocol\r\n store_data(app_protocol, pkt, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip,\r\n src_port, dst_port, p_date, p_hour, payload,flag)\r\n else:\r\n # capture Coap protocol:\r\n coap_answer = is_coap(str_payload, pkt, type_protocol, list_with_coap_codes)\r\n if coap_answer == \"yes\":\r\n coap_counter += 1\r\n app_protocol = \"COAP\"\r\n store_data(app_protocol, pkt, type_protocol, layer, mac_src, mac_dst, src_ip, dst_ip,\r\n src_port, dst_port, p_date, p_hour, payload, flag)\r\n finally:\r\n packets.close()\r\n\r\n print(\"end\")\r\n # print(f\"we have {udp_counter} udp packets.\")\r\n # print(f\"we have {mqtt_counter} MQTT packets.\")\r\n # print(f\"we have {coap_counter} CoAp packets.\")\r\n # print(f\"we have {amqp_counter} AMQP packets.\")\r\n\r\n\r\npcap_pkt_reader()\r\n", "repo_name": "MarilenaKokkini/IoT-Packets-Classification-Thesis-Aueb", "sub_path": "final_packet_reader.py", "file_name": "final_packet_reader.py", "file_ext": "py", "file_size_in_byte": 16451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.product", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path", "line_number": 177, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 182, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 196, "usage_type": "call"}, {"api_name": "binascii.hexlify", "line_number": 246, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 271, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 271, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 272, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 272, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path", "line_number": 310, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 311, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 312, "usage_type": "call"}, {"api_name": "pyshark.FileCapture", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "6873244494", "text": "\"\"\"Test case for chess.py.\"\"\"\n\nimport unittest\nfrom chess import Chessercise\n\n\nclass ChessTestCase(unittest.TestCase):\n \"\"\"ChessTestCase class for unit testing Chessercise class.\"\"\"\n def setUp(self):\n \"\"\"Setup() method.\"\"\"\n self.obj = Chessercise()\n\n def test_find_coordinates(self):\n \"\"\"Test case for find coordinates function.\"\"\"\n # \"D2\" is algebraic notation of cooridnate (3,1)\n input1 = 'd2'\n res = self.obj.find_coordinates(input1)\n self.assertEqual((3, 1), res)\n\n input2 = 'd9'\n res = self.obj.find_coordinates(input2)\n self.assertNotEqual(res, (3, 1))\n\n def test_get_algebraic_coordinate(self):\n \"\"\"Test case for get algebraic coordinate function.\"\"\"\n # (3,1) are the coordinates for algebraic notation d2\n input1 = [3, 1]\n res = self.obj.get_algebraic_coordinate(input1[0], input1[1])\n self.assertEqual('d2', res)\n\n input2 = [9, 9]\n res = self.obj.get_algebraic_coordinate(input2[0], input2[1])\n self.assertNotEqual(res, (9, 9))\n\n def test_find_queen_possible_moves(self):\n \"\"\"Test case for find possible queen moves function.\"\"\"\n input1 = 'g5'\n res = self.obj.find_queen_possible_moves(input1)\n self.assertEqual(res, 'h6, h4, f6, e7, d8, f4, e3, d2, c1, a5, b5, '\n 'c5, d5, e5, f5, h5, g1, g2, g3, g4, g6, g7, g8')\n\n def test_find_knight_possible_moves(self):\n \"\"\"Test case for find possible knight moves function.\"\"\"\n input1 = 'g4'\n res = self.obj.find_knight_possible_moves(input1)\n self.assertEqual(res, 'h6, h2, f6, f2, e5, e3')\n input2 = 'a1'\n res = self.obj.find_knight_possible_moves(input2)\n self.assertEqual(res, 'b3, c2')\n input3 = 'a8'\n res = self.obj.find_knight_possible_moves(input3)\n self.assertEqual(res, 'b6, c7')\n input4 = 'h1'\n res = self.obj.find_knight_possible_moves(input4)\n self.assertEqual(res, 'g3, f2')\n input5 = 'h8'\n res = self.obj.find_knight_possible_moves(input5)\n self.assertEqual(res, 'g6, f7')\n\n def test_find_rook_possible_moves(self):\n \"\"\"Test case for find rook possible moves function.\"\"\"\n input1 = 'e4'\n res = self.obj.find_rook_possible_moves(input1)\n self.assertEqual(\n res, 'a4, b4, c4, d4, f4, g4, h4, e1, e2, e3, e5, e6, e7, e8')\n\n###################### Test Coverage ####################################\n# nosetests test_chess.py - -with-coverage\n# .....\n# Name Stmts Miss Cover Missing\n# ----------------------------------------\n# chess.py 103 18 83 % 162-186\n# ----------------------------------------------------------------------\n# Ran 5 tests in 0.010s\n", "repo_name": "inovizz/chess-moves", "sub_path": "test_chess.py", "file_name": "test_chess.py", "file_ext": "py", "file_size_in_byte": 2791, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "chess.Chessercise", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "32759214121", "text": "from math import atan2, sqrt\nimport pygame as pg\n\nfrom src.util.settings import HEIGHT, WIDTH\n\nclass PlayerController():\n def __init__(self, index):\n self.index = index\n self.queued_ability = -1\n \n def movement_input(self):\n\n keys = pg.key.get_pressed()\n horizontal_input = 0\n vertical_input = 0\n\n if keys[pg.K_w]:\n vertical_input = -1\n\n if keys[pg.K_s]:\n vertical_input = 1\n\n if keys[pg.K_a]:\n horizontal_input = -1\n\n if keys[pg.K_d]:\n horizontal_input = 1\n \n\n return {\n 'i': self.index,\n 'x': horizontal_input,\n 'y': vertical_input\n }\n \n def ability_input(self, entity):\n keys = pg.key.get_pressed()\n # checks which number key is pressed\n for i in range(pg.K_1, pg.K_1+len(entity.abilities[self.index])):\n if keys[i]:\n self.queued_ability = i-pg.K_1\n \n # checks if the input is validated with a left click\n if pg.mouse.get_pressed()[0] and self.queued_ability!=-1:\n angle = atan2(pg.mouse.get_pos()[1]-HEIGHT//2, pg.mouse.get_pos()[0]-WIDTH//2)\n ability = {\n 'i': self.index,\n 'ability': entity.abilities[self.index][self.queued_ability],\n 'angle': angle,\n }\n self.queued_ability = -1\n return ability\n \n # cancel the ability if it is invalidated with the right click\n if pg.mouse.get_pressed()[2] and self.queued_ability!=-1:\n self.queued_ability = -1\n \n return {\n 'i': self.index,\n 'ability': -1,\n 'angle': 0,\n }", "repo_name": "HuMangoPP/hello_universe", "sub_path": "src/game_state/player_controller.py", "file_name": "player_controller.py", "file_ext": "py", "file_size_in_byte": 1745, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.key.get_pressed", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.K_1", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.K_1", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 44, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mouse.get_pos", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 45, "usage_type": "attribute"}, {"api_name": "src.util.settings.HEIGHT", "line_number": 45, "usage_type": "name"}, {"api_name": "src.util.settings.WIDTH", "line_number": 45, "usage_type": "name"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "16698138540", "text": "from math import floor\n\nimport torch\n\n\ndef sequential_output_shape(self, shape):\n \"\"\"Computes the output shape of a torch.nn.Sequential.\n\n Optimistically assumes any layer without method does not change shape.\n \"\"\"\n for element in self:\n for cls, method in output_shape_methods.items():\n if isinstance(element, cls):\n shape = method(element, shape)\n break\n\n return shape\n\n\ndef sequential_feature_dim(self):\n \"\"\"Computes the feature dimension of a torch.nn.Sequential.\n\n Returns None if feature dimension cannot be determined.\n \"\"\"\n feature_dim = None\n for element in reversed(self):\n for cls, method in feature_dim_methods.items():\n if isinstance(element, cls):\n feature_dim = method(element)\n if feature_dim is not None:\n return feature_dim\n\n\ndef conv2d_output_shape(module, h_w):\n \"\"\"Computes the output shape of 2d convolutional operators.\"\"\"\n # grab operator properties\n props = module.kernel_size, module.stride, module.padding, module.dilation\n # diagonalize into tuples as needed\n props = [tuple((p, p)) if not isinstance(p, tuple) else p for p in props]\n # \"transpose\" operator properties -- list indices are height/width rather than property id\n props = list(zip(*props))\n\n h = conv1d_output_shape(h_w[0], *props[0]) # calculate h from height parameters of props\n w = conv1d_output_shape(h_w[1], *props[1]) # calculate w from width parameters of props\n\n assert (h > 0) & (w > 0), \"Invalid parameters\"\n\n return h, w\n\n\ndef conv1d_output_shape(lngth, kernel_size, stride, padding, dilation):\n \"\"\"Computes the change in dimensions for a 1d convolutional operator.\"\"\"\n return floor( ((lngth + (2 * padding) - (dilation * (kernel_size - 1)) - 1) / stride) + 1) # noqa\n\n\ndef convtranspose2d_output_shape(*args, **kwargs):\n raise NotImplementedError\n\n\noutput_shape_methods = { # order is important here; torch.nn.Module must be last\n torch.nn.Sequential: sequential_output_shape,\n torch.nn.Conv2d: conv2d_output_shape,\n torch.nn.MaxPool2d: conv2d_output_shape,\n torch.nn.Linear: lambda module, shape: module.out_features,\n torch.nn.AdaptiveAvgPool2d: lambda module, shape: module.output_size,\n torch.nn.Module: lambda module, shape: shape,\n }\n\nfeature_dim_methods = {\n torch.nn.Sequential: sequential_feature_dim,\n torch.nn.Conv2d: lambda module: module.out_channels,\n torch.nn.ConvTranspose2d: lambda module: module.out_channels,\n torch.nn.Linear: lambda module: module.out_features,\n }\n", "repo_name": "wandb/lit_utils", "sub_path": "nn/shapes.py", "file_name": "shapes.py", "file_ext": "py", "file_size_in_byte": 2616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.floor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "13218317060", "text": "\"\"\"Модуль с формой для задания параметров игры.\"\"\"\n\nimport tkinter as tk\n\nimport config as conf\nfrom scripts.gui.base_form import BaseForm\nfrom scripts.game.game_screen import GameManager\n\n\nclass Frame(tk.Frame):\n \"\"\"Создает участок формы для ввода параметров генетического алгоритма.\"\"\"\n\n def __init__(self, master):\n super().__init__(master)\n\n # Метки со значениями полей ввода\n labels = [\n \"Число повторений\", \"Вероятность мутации, %\",\n \"Множитель, x (размер популяции - 10*x)\", \"Количество поворотов\"\n ]\n for i, label in enumerate(labels):\n tk.Label(self, text=label).grid(row=i, column=0, sticky=\"W\")\n\n # Поля для ввода\n self.repeat = tk.Entry(self)\n self.mutation = tk.Entry(self)\n self.bird_multiplier = tk.Entry(self)\n self.rotations = tk.Entry(self)\n\n # Позиционирование полей для ввода\n self.repeat.grid(row=0, column=1, sticky=\"E\", pady=10)\n self.mutation.grid(row=1, column=1, sticky=\"E\", pady=10)\n self.bird_multiplier.grid(row=2, column=1, sticky=\"E\", pady=10)\n self.rotations.grid(row=3, column=1, sticky=\"E\", pady=10)\n\n self.set_default_values()\n\n def set_default_values(self):\n \"\"\"Устанавливает генетические параметры по умолчанию.\"\"\"\n self.repeat.insert(0, \"1\")\n self.mutation.insert(0, \"40\")\n self.bird_multiplier.insert(0, \"1\")\n self.rotations.insert(0, \"10\")\n\n\nclass Form(BaseForm):\n \"\"\"Генерирует форму для передачи данных и запуска игры.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.create_elements()\n self.set_window_params(\"Параметры\", conf.ICONS[\"settings\"], False)\n\n def create_elements(self):\n self.frame = Frame(self)\n self.frame.pack(side=\"top\", padx=10)\n # Кнопка запуска и ее позиционирование\n tk.Button(self, text=\"Запустить\", command=self.launch_game).pack(pady=10)\n\n def transform_data_to_dict(self):\n \"\"\"Возвращает словарь с данными, введенными в форму.\"\"\"\n return {\n \"repeat\": int(self.frame.repeat.get()),\n \"mutation\": int(self.frame.mutation.get()) / 100,\n \"multiplier\": int(self.frame.bird_multiplier.get()),\n \"rotations\": int(self.frame.rotations.get())\n }\n\n def launch_game(self):\n \"\"\"Запускает игру с передачей необходимых параметров.\"\"\"\n manager = GameManager(self.transform_data_to_dict())\n self.destroy()\n manager.set_game_data()\n manager.create_games()\n manager.call_save_form()\n", "repo_name": "neuro-rsu/python-bird", "sub_path": "scripts/gui/game_form.py", "file_name": "game_form.py", "file_ext": "py", "file_size_in_byte": 3056, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Frame", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 25, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 26, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 27, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 28, "usage_type": "call"}, {"api_name": "scripts.gui.base_form.BaseForm", "line_number": 46, "usage_type": "name"}, {"api_name": "config.ICONS", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 58, "usage_type": "call"}, {"api_name": "scripts.game.game_screen.GameManager", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "26302689390", "text": "import base64\nimport json\nimport math\nimport os\nimport random\nimport re\nimport ssl\nfrom io import BytesIO\nfrom json.decoder import JSONDecodeError\nfrom traceback import format_exc\n\ntry:\n import certifi\nexcept ImportError:\n certifi = None\n\nfrom PIL import Image, ImageDraw, ImageFont\n \nfrom requests.exceptions import MissingSchema\nfrom telethon import Button\nfrom telethon.tl.types import DocumentAttributeAudio, DocumentAttributeVideo\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\n\nasync def async_searcher(\n url: str,\n post: bool = None,\n headers: dict = None,\n params: dict = None,\n json: dict = None,\n data: dict = None,\n ssl=None,\n re_json: bool = False,\n re_content: bool = False,\n real: bool = False,\n *args,\n **kwargs,\n):\n try:\n import aiohttp\n except ImportError:\n raise DependencyMissingError(\n \"'aiohttp' is not installed!\\nthis function requires aiohttp to be installed.\"\n )\n async with aiohttp.ClientSession(headers=headers) as client:\n if post:\n data = await client.post(\n url, json=json, data=data, ssl=ssl, *args, **kwargs\n )\n else:\n data = await client.get(url, params=params, ssl=ssl, *args, **kwargs)\n if re_json:\n return await data.json()\n if re_content:\n return await data.read()\n if real:\n return data\n return await data.text()\n\n\ndef _unquote_text(text):\n return text.replace(\"'\", \"'\").replace('\"', '\"')\n\n\ndef json_parser(data, indent=None, ascii=False):\n parsed = {}\n try:\n if isinstance(data, str):\n parsed = json.loads(str(data))\n if indent:\n parsed = json.dumps(\n json.loads(str(data)), indent=indent, ensure_ascii=ascii\n )\n elif isinstance(data, dict):\n parsed = data\n if indent:\n parsed = json.dumps(data, indent=indent, ensure_ascii=ascii)\n except JSONDecodeError:\n parsed = eval(data)\n return parsed\n\n\ndef check_filename(filroid):\n if os.path.exists(filroid):\n no = 1\n while True:\n ult = \"{0}_{2}{1}\".format(*os.path.splitext(filroid) + (no,))\n if os.path.exists(ult):\n no += 1\n else:\n return ult\n return filroid\n", "repo_name": "Lover-Music/Lover-Managment-Bot", "sub_path": "Loverbot/quotstuff/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 2398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 81, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 82, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}]} +{"seq_id": "10951201109", "text": "import torch\nimport numpy as np\n\n\ndef symmetric(A):\n return 0.5 * (A + A.t())\n\ndef is_nan_or_inf(A):\n C1 = torch.nonzero(A == float('inf'))\n C2 = torch.nonzero(A != A)\n if len(C1.size()) > 0 or len(C2.size()) > 0:\n return True\n return False\n\ndef is_pos_def(x):\n x = x.cpu().numpy()\n return np.all(np.linalg.eigvals(x) > 0)\n\ndef matrix_operator(A, operator):\n u, s, v = A.svd()\n if operator == 'sqrtm':\n s.sqrt_()\n elif operator == 'rsqrtm':\n s.rsqrt_()\n elif operator == 'logm':\n s.log_()\n elif operator == 'expm':\n s.exp_()\n else:\n raise('operator %s is not implemented' % operator)\n \n output = u.mm(s.diag().mm(u.t()))\n \n return output\n\ndef tangent_space(A, ref, inverse_transform=False):\n ref_sqrt = matrix_operator(ref, 'sqrtm')\n ref_sqrt_inv = matrix_operator(ref, 'rsqrtm')\n middle = ref_sqrt_inv.mm(A.mm(ref_sqrt_inv))\n if inverse_transform:\n middle = matrix_operator(middle, 'logm')\n else:\n middle = matrix_operator(middle, 'expm')\n out = ref_sqrt.mm(middle.mm(ref_sqrt))\n return out\n\ndef untangent_space(A, ref):\n return tangent_space(A, ref, True)\n\ndef parallel_transform(A, ref1, ref2):\n print(A.size(), ref1.size(), ref2.size())\n out = untangent_space(A, ref1)\n out = tangent_space(out, ref2)\n return out\n\ndef orthogonal_projection(A, B):\n out = A - B.mm(symmetric(B.transpose(0,1).mm(A)))\n return out\n\ndef retraction(A, ref):\n data = A + ref\n Q, R = data.qr()\n # To avoid (any possible) negative values in the output matrix, we multiply the negative values by -1\n sign = (R.diag().sign() + 0.5).sign().diag()\n out = Q.mm(sign)\n return out\n", "repo_name": "adavoudi/spdnet", "sub_path": "spdnet/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 51, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nonzero", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linalg.eigvals", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "70995314084", "text": "from pathlib import Path\n\nimport pytest\n\nfrom mutalyzer_retriever.sources.ensembl import fetch\nfrom mutalyzer_retriever.configuration import settings\n\nAPI_BASE = settings[\"ENSEMBL_API\"]\nAPI_BASE_GRCH37 = settings[\"ENSEMBL_API_GRCH37\"]\n\nAPI_BASE_MAP = {\n \"ENSG00000147889\": {\"version\": 18, \"species\": \"homo_sapiens\"},\n \"ENSMUSG00000022346\": {\"version\": 18, \"species\": \"mus_musculus\"},\n}\nAPI_BASE_GRCH37_MAP = {\"ENSG00000147889\": {\"version\": 12, \"species\": \"homo_sapiens\"}}\n\n\n@pytest.fixture(autouse=True)\ndef patch_retriever(monkeypatch):\n monkeypatch.setattr(\"mutalyzer_retriever.sources.ensembl.fetch_gff3\", _fetch_gff3)\n monkeypatch.setattr(\n \"mutalyzer_retriever.sources.ensembl._get_reference_information\",\n _get_reference_information,\n )\n\n\ndef _get_content(relative_location):\n data_file = Path(__file__).parent.joinpath(relative_location)\n with open(str(data_file), \"r\") as file:\n content = file.read()\n return content\n\n\ndef _fetch_gff3(feature_id, api_base, timeout=1):\n if api_base == API_BASE_GRCH37:\n return _get_content(\n f\"data/{feature_id}.{API_BASE_GRCH37_MAP[feature_id]['version']}.gff3\"\n )\n return _get_content(f\"data/{feature_id}.gff3\")\n\n\ndef _get_reference_information(reference_id, api_base, timeout=1):\n if api_base == API_BASE and reference_id in API_BASE_MAP.keys():\n return API_BASE_MAP[reference_id]\n if api_base == API_BASE_GRCH37 and reference_id in API_BASE_GRCH37_MAP.keys():\n return API_BASE_GRCH37_MAP[reference_id]\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSG00000147889\")])\ndef test_ensembl_fetch_no_version(reference_id):\n assert fetch(reference_id)[0] == _get_content(f\"data/{reference_id}.gff3\")\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSG00000147889.18\")])\ndef test_ensembl_fetch_version_newest(reference_id):\n assert fetch(reference_id)[0] == _get_content(f\"data/{reference_id}.gff3\")\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSG00000147889.12\")])\ndef test_ensembl_fetch_version_grch37(reference_id):\n assert fetch(reference_id)[0] == _get_content(f\"data/{reference_id}.gff3\")\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSG00000147889.15\")])\ndef test_ensembl_fetch_other_version(reference_id):\n with pytest.raises(NameError):\n fetch(reference_id)[0]\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSMUSG00000022346.18\")])\ndef test_ensembl_fetch_no_version_mouse(reference_id):\n assert fetch(reference_id)[0] == _get_content(f\"data/{reference_id}.gff3\")\n\n\n@pytest.mark.parametrize(\"reference_id\", [(\"ENSMUSG00000022346\")])\ndef test_ensembl_fetch_version_newest_mouse(reference_id):\n assert fetch(reference_id)[0] == _get_content(f\"data/{reference_id}.gff3\")\n", "repo_name": "mutalyzer/retriever", "sub_path": "tests/test_fetch.py", "file_name": "test_fetch.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mutalyzer_retriever.configuration.settings", "line_number": 8, "usage_type": "name"}, {"api_name": "mutalyzer_retriever.configuration.settings", "line_number": 9, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "call"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 51, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 49, "usage_type": "attribute"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 61, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 59, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 66, "usage_type": "call"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 67, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 64, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 64, "usage_type": "attribute"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 72, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 70, "usage_type": "attribute"}, {"api_name": "mutalyzer_retriever.sources.ensembl.fetch", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 75, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "42237229882", "text": "# encoding: utf-8\n\n\"\"\"\nFile: __init__.py.py\nAuthor: Rock Johnson\n\"\"\"\nimport os\nimport sys\nimport shlex\nimport functools\nfrom pkgutil import iter_modules\nfrom argparse import ArgumentParser\nfrom importlib import import_module\nfrom difflib import get_close_matches\n\nfrom commander.base import BaseCommand\n\nPATH = None\n\n__version__ = '1.0.0'\n\n\ndef find_commands(app_dir):\n \"\"\"\n 给定应用程序的目录路径,返回所有可用命令名称的列表.\n \"\"\"\n commands_dir = os.path.join(app_dir, 'commands')\n\n if app_dir not in sys.path:\n sys.path.append(app_dir)\n return [name for _, name, is_pkg in iter_modules([commands_dir])\n if not is_pkg and not name.startswith('_')]\n\n\ndef load_command_class(name):\n \"\"\"\n 给定命令名称和应用程序名称,返回Command类实例.\n \"\"\"\n module = import_module(f'commands.{name}')\n return module.Command()\n\n\n@functools.lru_cache(maxsize=None)\ndef get_commands():\n \"\"\"\n 返回一个列表.\n\n 在应用程序中查找commands软件包,如果存在命令软件包,\n 则在该软件包中注册所有的命令.\n\n 始终包含本应用程序的命令.如果指定了用户应用程序,则还包括用户定义的命令.\n\n 该列表在第一次调用中缓存,并在后续调用中重用.\n \"\"\"\n commands = find_commands(__path__[0])\n\n if PATH:\n commands.extend(find_commands(PATH))\n return commands\n\n\nclass CommanderUtility:\n \"\"\"\n 仿照django命令行解析应用程序开发的命令行解析框架\n \"\"\"\n\n def __init__(self, argv=None, version=None):\n self.argv = argv or sys.argv\n self.version = version or __version__\n self.prog_name = os.path.basename(self.argv[0])\n\n def main_help_text(self, commands_only=False):\n \"\"\"返回脚本的主要帮助信息,字符串格式\"\"\"\n usage = []\n if not commands_only:\n usage.extend([\n f'输入\"{self.prog_name} help \"以获取子命令的帮助信息.',\n '',\n '可用子命令:',\n ''\n ])\n usage.extend(sorted(get_commands()))\n return '\\n'.join(usage)\n\n def fetch_command(self, subcommand):\n \"\"\"\n 尝试获取给定子命令,如果获取不到,则从命令行调用的相应命令打印一条信息.\n \"\"\"\n commands = get_commands()\n if subcommand not in commands:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(f'未知命令: \"{subcommand}\"')\n if possible_matches:\n sys.stderr.write(f'. 你是要使用\"{possible_matches[0]}\"吗?')\n sys.stderr.write(f'\\n输入\"{self.prog_name} help\"以获取用法.\\n')\n sys.exit(1)\n\n if isinstance(subcommand, BaseCommand):\n klass = subcommand\n else:\n klass = load_command_class(subcommand)\n return klass\n\n def execute(self):\n \"\"\"\n 运行已创建的子命令\n \"\"\"\n try:\n subcommand = self.argv[1]\n except:\n subcommand = 'help'\n\n parser = ArgumentParser(add_help=False, allow_abbrev=False)\n parser.add_argument('args', nargs='*')\n options, args = parser.parse_known_args(self.argv[2:])\n\n if subcommand == 'help':\n if options.args:\n self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])\n else:\n sys.stdout.write(f'{self.main_help_text(commands_only=\"--commands\" in args)}\\n')\n elif subcommand == 'version' or self.argv[1:] in (['-v'], ['--version']):\n sys.stdout.write(f'{self.version}\\n')\n elif self.argv[1:] in (['-h'], ['--help']):\n sys.stdout.write(f'{self.main_help_text()}\\n')\n else:\n self.fetch_command(subcommand).run_from_argv(self.argv)\n\n\ndef execute_from_command_line(path, argv=None, version=None):\n \"\"\"运行CommanderUtility\"\"\"\n if not isinstance(path, str):\n raise TypeError('path必须是字符串')\n\n # 将字符串处理成命令行参数\n if isinstance(argv, str):\n argv = shlex.split(argv)\n\n global PATH\n PATH = path\n utility = CommanderUtility(argv, version)\n utility.execute()\n", "repo_name": "RockJohnson503/commander", "sub_path": "commander/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sys.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pkgutil.iter_modules", "line_number": 31, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 39, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "difflib.get_close_matches", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 94, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 95, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 95, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 96, "usage_type": "call"}, {"api_name": "commander.base.BaseCommand", "line_number": 98, "usage_type": "argument"}, {"api_name": "argparse.ArgumentParser", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 121, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 123, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 123, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 125, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 125, "usage_type": "attribute"}, {"api_name": "shlex.split", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "6857190582", "text": "# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\nimport datetime\nimport calendar \nimport logging\n_logger = logging.getLogger(__name__)\n\nclass FleetVehicleReport(models.TransientModel):\n _name = 'fleet.vehicle.report'\n\n @api.model\n def _default_tags(self):\n vehicle_tags = self.env['fleet.vehicle.tag'].sudo().search([ ] )\n return vehicle_tags.ids \n\n @api.model\n def _default_states(self):\n vehicle_states = self.env['fleet.vehicle.state'].sudo().search([ ] )\n return vehicle_states.ids \n\n # start_date = fields.Date('Start Date', required=True)\n # end_date = fields.Date(string=\"End Date\", required=True)\n tag_ids = fields.Many2many('fleet.vehicle.tag', 'vehicle_report_vehicle_tag_rel', 'vehicle_report_id', 'tag_id', 'Tags', store=True, default=_default_tags )\n state_ids = fields.Many2many('fleet.vehicle.state', 'vehicle_report_vehicle_state_rel', 'vehicle_report_id', 'state_id', 'States', store=True, default=_default_states )\n \n @api.multi\n def action_print(self):\n # vehicles = self.env['fleet.vehicle'].search([ ( 'tag_ids', 'in', self.tag_ids.ids ) ])\n # tag_names = [ tag_id.name for tag_id in self.tag_ids ]\n # # tag_names += [ \"-\" ]\n # final_dict = {}\n # tag_state_dict = {}\n # for tag_name in tag_names:\n # tag_state_dict[ tag_name ] = {}\n # for state_id in self.state_ids :\n # tag_state_dict[ tag_name ][ state_id.name ] = 0\n # for vehicle in vehicles:\n # if vehicle.tag_ids and vehicle.state_id :\n # tag_state_dict[ vehicle.tag_ids[0].name ][ vehicle.state_id.name ] += 1\n\n vehicles = self.env['fleet.vehicle'].search([ ( 'tag_ids', 'in', self.tag_ids.ids ) ])\n state_names = [ state_id.name for state_id in self.state_ids ]\n tag_names = [ tag_id.name for tag_id in self.tag_ids ]\n final_dict = {}\n tag_state_dict = {}\n tag_total_dict = {}\n for tag_name in tag_names:\n tag_state_dict[ tag_name ] = []\n tag_total_dict[ tag_name ] = {\n \"name\" : \"Total\"\n }\n for state_id in self.state_ids :\n tag_total_dict[ tag_name ][ state_id.name ] = 0\n\n for vehicle in vehicles:\n if vehicle.tag_ids and vehicle.state_id :\n row = {}\n row[ \"name\" ] = vehicle.name\n for state_id in self.state_ids :\n row[ state_id.name ] = 1 if vehicle.state_id.name == state_id.name else 0\n tag_state_dict[ vehicle.tag_ids[0].name ] += [ row ]\n tag_total_dict[ vehicle.tag_ids[0].name ][ vehicle.state_id.name ] += 1\n \n final_dict = tag_state_dict\n datas = {\n 'ids': self.ids,\n 'model': 'fleet.vehicle.report',\n 'form': final_dict,\n 'tag_total_dict': tag_total_dict,\n 'state_names': state_names,\n 'date': datetime.datetime.now().strftime(\"%d/%m/%Y\"),\n }\n return self.env['report'].get_action(self,'fleet_report.fleet_vehicle_temp', data=datas)", "repo_name": "madukubah/fleet_report", "sub_path": "wizard/fleet_vehicle_report.py", "file_name": "fleet_vehicle_report.py", "file_ext": "py", "file_size_in_byte": 3191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 10, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 10, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 13, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 13, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 25, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 25, "usage_type": "name"}, {"api_name": "odoo.fields.Many2many", "line_number": 26, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "odoo.api.multi", "line_number": 28, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "16651324650", "text": "import os\r\nfrom collections import Counter\r\n\r\nimport torch\r\nfrom keras_preprocessing.text import tokenizer_from_json\r\n\r\nimport sys\r\nsys.path.append('.')\r\n\r\nfrom sklearn import metrics\r\nimport sklearn.datasets\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.datasets import fetch_20newsgroups\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n# import tensorflow.keras as keras\r\n# from tensorflow.keras import backend\r\n# from tensorflow.keras.layers import Dropout, Dense, GRU, Embedding, Input, Conv1D, MaxPooling1D, Concatenate, Flatten\r\n# from tensorflow.keras.models import Model, Sequential\r\n# from tensorflow.keras.preprocessing.text import Tokenizer\r\n# from tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\nfrom keras import backend\r\nfrom keras.layers import Dropout, Dense, GRU, Embedding, Input, Conv1D, MaxPooling1D, Concatenate, Flatten\r\nfrom keras.models import Model, Sequential\r\nfrom keras.preprocessing.text import Tokenizer\r\nfrom keras.preprocessing.sequence import pad_sequences\r\n\r\nfrom util import *\r\nfrom config import *\r\n\r\nnum_cores = 3\r\n\r\ndef get_session():\r\n _config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\r\n inter_op_parallelism_threads=num_cores,\r\n device_count={'CPU': 1, 'GPU': 0})\r\n _session = tf.Session(config=_config)\r\n return _session\r\n\r\nbackend.set_session(get_session())\r\n\r\ndef load_sents(dataset_name):\r\n d = load_custom(os.path.join(annotation_path, 'dataset_{}.json'.format(dataset_name)))['caption_item']\r\n sents = []\r\n for item in d:\r\n for i in item.sentences:\r\n i.split = item.split\r\n sents.extend(item.sentences)\r\n return sents\r\n\r\ndef get_data(sents, style_tag, balance=True):\r\n all_sents = sents\r\n\r\n X = [' '.join(sent.words) for sent in all_sents]\r\n Y = [1 if hasattr(sent, 'tag') and sent.tag == style_tag else 0 for sent in all_sents]\r\n splits = [sent.split for sent in all_sents]\r\n\r\n train_pos, train_neg = [], []\r\n test_pos, test_neg = [], []\r\n\r\n for i, split in enumerate(splits):\r\n # if split == 'test':\r\n if random.random() < 0.15:\r\n if Y[i] == 1:\r\n test_pos.append((X[i], Y[i]))\r\n else:\r\n test_neg.append((X[i], Y[i]))\r\n else:\r\n if Y[i] == 1:\r\n train_pos.append((X[i], Y[i]))\r\n else:\r\n train_neg.append((X[i], Y[i]))\r\n\r\n if balance:\r\n if len(train_pos) > len(train_neg):\r\n train_pos = random.sample(train_pos, k=len(train_neg))\r\n if len(train_neg) > len(train_pos):\r\n train_neg = random.sample(train_neg, k=len(train_pos))\r\n if len(test_neg) > len(test_pos):\r\n test_neg = random.sample(test_neg, k=len(test_pos))\r\n if len(test_pos) > len(test_neg):\r\n test_pos = random.sample(test_pos, k=len(test_neg))\r\n\r\n X_train, Y_train = [[i[j] for i in train_pos + train_neg] for j in (0, 1)]\r\n X_test, Y_test = [[i[j] for i in test_pos + test_neg] for j in (0, 1)]\r\n\r\n print('train_pos:', len(train_pos), 'train_neg:', len(train_neg))\r\n print('test_pos:', len(test_pos), 'test_neg:', len(test_neg))\r\n\r\n return X_train, Y_train, X_test, Y_test\r\n\r\n# TODO: max length -> 25 (or other values)\r\ndef loadData_Tokenizer(X_train, X_test,MAX_NB_WORDS=75000, MAX_SEQUENCE_LENGTH=500):\r\n np.random.seed(7)\r\n text = np.concatenate((X_train, X_test), axis=0)\r\n text = np.array(text)\r\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\r\n tokenizer.fit_on_texts(text)\r\n sequences = tokenizer.texts_to_sequences(text)\r\n word_index = tokenizer.word_index\r\n text = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\r\n print('Found %s unique tokens.' % len(word_index))\r\n indices = np.arange(text.shape[0])\r\n # np.random.shuffle(indices)\r\n text = text[indices]\r\n print(text.shape)\r\n X_train = text[0:len(X_train), ]\r\n X_test = text[len(X_train):, ]\r\n embeddings_index = {}\r\n f = open(\"/media/wentian/sdb2/work/caption_dataset/Chinese Word Vectors/sgns.wiki.word\", encoding=\"utf8\")\r\n for line in f:\r\n values = line.split()\r\n word = values[0]\r\n if word not in word_index:\r\n continue\r\n try:\r\n coefs = np.asarray(values[1:], dtype='float32')\r\n except:\r\n pass\r\n embeddings_index[word] = coefs\r\n f.close()\r\n print('Total %s word vectors.' % len(embeddings_index))\r\n\r\n return (X_train, X_test, word_index, embeddings_index, tokenizer)\r\n\r\ndef Build_Model_RNN_Text(word_index, embeddings_index, nclasses, MAX_SEQUENCE_LENGTH=500, EMBEDDING_DIM=50, dropout=0.5):\r\n \"\"\"\r\n def buildModel_RNN(word_index, embeddings_index, nclasses, MAX_SEQUENCE_LENGTH=500, EMBEDDING_DIM=50, dropout=0.5):\r\n word_index in word index ,\r\n embeddings_index is embeddings index, look at data_helper.py\r\n nClasses is number of classes,\r\n MAX_SEQUENCE_LENGTH is maximum lenght of text sequences\r\n \"\"\"\r\n model = Sequential()\r\n hidden_layer = 3\r\n gru_node = 32\r\n\r\n embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))\r\n for word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n if len(embedding_matrix[i]) != len(embedding_vector):\r\n print(\"could not broadcast input array from shape\", str(len(embedding_matrix[i])),\r\n \"into shape\", str(len(embedding_vector)), \" Please make sure your\"\r\n \" EMBEDDING_DIM is equal to embedding_vector file ,GloVe,\")\r\n exit(1)\r\n embedding_matrix[i] = embedding_vector\r\n model.add(Embedding(len(word_index) + 1,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=True))\r\n\r\n for i in range(0,hidden_layer):\r\n model.add(GRU(gru_node,return_sequences=True, recurrent_dropout=0.2))\r\n model.add(Dropout(dropout))\r\n model.add(GRU(gru_node, recurrent_dropout=0.2))\r\n model.add(Dropout(dropout))\r\n model.add(Dense(256, activation='relu'))\r\n model.add(Dense(nclasses, activation='softmax'))\r\n\r\n\r\n model.compile(loss='sparse_categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n return model\r\n\r\ndef Build_Model_CNN_Text(word_index, embeddings_index, nclasses, MAX_SEQUENCE_LENGTH=500, EMBEDDING_DIM=50, dropout=0.5):\r\n\r\n \"\"\"\r\n def buildModel_CNN(word_index, embeddings_index, nclasses, MAX_SEQUENCE_LENGTH=500, EMBEDDING_DIM=50, dropout=0.5):\r\n word_index in word index ,\r\n embeddings_index is embeddings index, look at data_helper.py\r\n nClasses is number of classes,\r\n MAX_SEQUENCE_LENGTH is maximum lenght of text sequences,\r\n EMBEDDING_DIM is an int value for dimention of word embedding look at data_helper.py\r\n \"\"\"\r\n\r\n model = Sequential()\r\n embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))\r\n for word, i in word_index.items():\r\n embedding_vector = embeddings_index.get(word)\r\n if embedding_vector is not None:\r\n # words not found in embedding index will be all-zeros.\r\n if len(embedding_matrix[i]) !=len(embedding_vector):\r\n print(\"could not broadcast input array from shape\",str(len(embedding_matrix[i])),\r\n \"into shape\",str(len(embedding_vector)),\" Please make sure your\"\r\n \" EMBEDDING_DIM is equal to embedding_vector file ,GloVe,\")\r\n exit(1)\r\n\r\n embedding_matrix[i] = embedding_vector\r\n\r\n embedding_layer = Embedding(len(word_index) + 1,\r\n EMBEDDING_DIM,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=True)\r\n\r\n # applying a more complex convolutional approach\r\n convs = []\r\n filter_sizes = []\r\n layer = 5\r\n print(\"Filter \",layer)\r\n for fl in range(0,layer):\r\n filter_sizes.append((fl+2))\r\n\r\n node = 128\r\n sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\r\n embedded_sequences = embedding_layer(sequence_input)\r\n\r\n for fsz in filter_sizes:\r\n l_conv = Conv1D(node, kernel_size=fsz, activation='relu')(embedded_sequences)\r\n l_pool = MaxPooling1D(5)(l_conv)\r\n #l_pool = Dropout(0.25)(l_pool)\r\n convs.append(l_pool)\r\n\r\n l_merge = Concatenate(axis=1)(convs)\r\n l_cov1 = Conv1D(node, 5, activation='relu')(l_merge)\r\n l_cov1 = Dropout(dropout)(l_cov1)\r\n l_pool1 = MaxPooling1D(5)(l_cov1)\r\n l_cov2 = Conv1D(node, 5, activation='relu')(l_pool1)\r\n l_cov2 = Dropout(dropout)(l_cov2)\r\n l_pool2 = MaxPooling1D(30)(l_cov2)\r\n l_flat = Flatten()(l_pool2)\r\n l_dense = Dense(1024, activation='relu')(l_flat)\r\n l_dense = Dropout(dropout)(l_dense)\r\n l_dense = Dense(512, activation='relu')(l_dense)\r\n l_dense = Dropout(dropout)(l_dense)\r\n preds = Dense(nclasses, activation='softmax')(l_dense)\r\n model = Model(sequence_input, preds)\r\n\r\n model.compile(loss='sparse_categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n\r\n\r\n return model\r\n\r\ndef train(style_dataset, target_style, all_datasets=('coco', 'senticap', 'flickrstyle')):\r\n MAX_SEQUENCE_LENGTH = 25\r\n nclasses = 2\r\n\r\n all_sent = []\r\n for d in all_datasets:\r\n all_sent.extend(load_sents(d))\r\n\r\n c = Counter()\r\n c.update([i.tag if hasattr(i, 'tag') else None for i in all_sent])\r\n\r\n X_train, Y_train, X_test, Y_test = get_data(all_sent, target_style)\r\n print('get_data done')\r\n X_train_Glove, X_test_Glove, word_index, embeddings_index, tokenizer = loadData_Tokenizer(X_train, X_test,\r\n MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH)\r\n print('loadData_Tokenizer done')\r\n\r\n model_RNN = Build_Model_RNN_Text(word_index, embeddings_index, nclasses=nclasses, MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH,\r\n EMBEDDING_DIM=300)\r\n _ = time.time()\r\n model_RNN.fit(X_train_Glove, Y_train,\r\n validation_data=(X_test_Glove, Y_test),\r\n epochs=15,\r\n batch_size=128,\r\n verbose=2)\r\n print('fit used {}'.format(time.time() - _))\r\n predicted = model_RNN.predict_classes(X_test_Glove)\r\n print(metrics.classification_report(Y_test, predicted))\r\n\r\n model_save_dir = '../data/clf_nn'\r\n if not os.path.exists(model_save_dir):\r\n os.makedirs(model_save_dir)\r\n with open(os.path.join(model_save_dir, 'model_rnn_info_{}_{}.pkl'.format(style_dataset, target_style)), 'wb') as f:\r\n pickle.dump(obj={'word_index': word_index, 'embeddings_index': embeddings_index,\r\n 'nclasses': nclasses, 'MAX_SEQUENCE_LENGTH': MAX_SEQUENCE_LENGTH,\r\n 'EMBEDDING_DIM': 300,\r\n 'tokenizer_config': tokenizer.to_json(),\r\n '_classification_report': metrics.classification_report(Y_test, predicted)},\r\n file=f)\r\n model_RNN.save_weights(filepath=os.path.join(model_save_dir, 'model_rnn_{}_{}.h5'.format(style_dataset, target_style)))\r\n\r\n# def train_multi():\r\n# MAX_SEQUENCE_LENGTH = 25\r\n# nclasses = 5\r\n#\r\n# sents_coco = load_sents('coco')\r\n# sents_senticap = load_sents('senticap')\r\n# sents_flickrstyle = load_sents('flickrstyle')\r\n#\r\n# all_sent = sents_coco + sents_senticap + sents_flickrstyle\r\n#\r\n# c = Counter()\r\n# c.update([i.tag if hasattr(i, 'tag') else None for i in all_sent])\r\n#\r\n# X_train, Y_train, X_test, Y_test = get_data(all_sent, target_style)\r\n# print('get_data done')\r\n# X_train_Glove, X_test_Glove, word_index, embeddings_index, tokenizer = loadData_Tokenizer(X_train, X_test,\r\n# MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH)\r\n# print('loadData_Tokenizer done')\r\n#\r\n# model_RNN = Build_Model_RNN_Text(word_index, embeddings_index, nclasses=nclasses, MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH)\r\n# _ = time.time()\r\n# model_RNN.fit(X_train_Glove, Y_train,\r\n# validation_data=(X_test_Glove, Y_test),\r\n# epochs=15,\r\n# batch_size=128,\r\n# verbose=2)\r\n# print('fit used {}'.format(time.time() - _))\r\n# predicted = model_RNN.predict_classes(X_test_Glove)\r\n# print(metrics.classification_report(Y_test, predicted))\r\n#\r\n# model_save_dir = '../data/clf_nn'\r\n# if not os.path.exists(model_save_dir):\r\n# os.makedirs(model_save_dir)\r\n# with open(os.path.join(model_save_dir, 'model_rnn_info_{}_{}.pkl'.format(style_dataset, target_style)), 'wb') as f:\r\n# pickle.dump(obj={'word_index': word_index, 'embeddings_index': embeddings_index,\r\n# 'nclasses': nclasses, 'MAX_SEQUENCE_LENGTH': MAX_SEQUENCE_LENGTH,\r\n# 'tokenizer_config': tokenizer.to_json(),\r\n# '_classification_report': metrics.classification_report(Y_test, predicted)},\r\n# file=f)\r\n# model_RNN.save_weights(filepath=os.path.join(model_save_dir, 'model_rnn_{}_{}.h5'.format(style_dataset, target_style)))\r\n\r\ndef test_tokenize(tokenizer, sents, MAX_SEQUENCE_LENGTH=500):\r\n sequences = tokenizer.texts_to_sequences(sents)\r\n text = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\r\n return text\r\n\r\ndef test(dataset, style, test_file):\r\n obj = json.load(open(test_file, 'r'))\r\n # obj = json.load(open('/media/wentian/sdb2/work/caption_ma/save/2019-10-04_21-09-37_2agent_neg/annotation.json', 'r'))['annotations']\r\n sents = [i['caption'] for i in obj]\r\n\r\n d = pickle.load(open(r'../data/clf_nn/model_rnn_info_{}_{}.pkl'.format(dataset, style), 'rb'))\r\n w, e, tokenizer_config = d['word_index'], d['embeddings_index'], d['tokenizer_config']\r\n MAX_SEQUENCE_LENGTH, nclasses, EMBEDDING_DIM = d['MAX_SEQUENCE_LENGTH'], d['nclasses'], d['EMBEDDING_DIM']\r\n tokenizer = tokenizer_from_json(tokenizer_config)\r\n model_RNN = Build_Model_RNN_Text(w, e, nclasses=nclasses, MAX_SEQUENCE_LENGTH=MAX_SEQUENCE_LENGTH, EMBEDDING_DIM=EMBEDDING_DIM)\r\n model_RNN.load_weights('../data/clf_nn/model_rnn_{}_{}.h5'.format(dataset, style))\r\n\r\n X_train_Glove = test_tokenize(tokenizer, sents, MAX_SEQUENCE_LENGTH)\r\n predicted = model_RNN.predict_classes(X_train_Glove, verbose=0)\r\n\r\n for i in range(len(predicted)):\r\n if predicted[i] == 0:\r\n print(sents[i], predicted[i])\r\n print(sum(predicted) / len(predicted))\r\n\r\n\r\nif __name__ == '__main__':\r\n # train('senticap', 'positive')\r\n # train('senticap', 'negative')\r\n # train('flickrstyle', 'humor')\r\n # train('flickrstyle', 'romantic')\r\n\r\n # train('chn_styled_word', 'positive', all_datasets=['youku_chn_word', 'chn_styled_word'])\r\n # train('chn_styled_word', 'negative', all_datasets=['youku_chn_word', 'chn_styled_word'])\r\n # train('chn_styled_word', 'humor', all_datasets=['youku_chn_word', 'chn_styled_word'])\r\n # train('chn_styled_word', 'romantic', all_datasets=['youku_chn_word', 'chn_styled_word'])\r\n\r\n # test('youku_chn_word', 'positive', '/media/wentian/sdb2/work/styled_caption/save/2019-08-22_21-17-19_chn_positive_word/result_youku_chn_word_positive_10.json')\r\n # test('youku_chn_word', 'negative', '/media/wentian/sdb2/work/styled_caption/save/2019-08-22_23-50-59_chn_negative_word/result_youku_chn_word_negative_20.json')\r\n # test('youku_chn_word', 'humor', '/media/wentian/sdb2/work/styled_caption/save/2019-08-22_21-08-48_chn_humor_word/result_youku_chn_word_humor_7.json')\r\n test('youku_chn_word', 'romantic', '/media/wentian/sdb2/work/styled_caption/save/2019-08-22_21-13-01_chn_romantic_word/result_youku_chn_word_romantic_8.json')", "repo_name": "entalent/MemCap", "sub_path": "src/styled_eval/train_clf_nn_chn.py", "file_name": "train_clf_nn_chn.py", "file_ext": "py", "file_size_in_byte": 16377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.backend.set_session", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 140, "usage_type": "attribute"}, {"api_name": "keras.layers.Embedding", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.layers.GRU", "line_number": 158, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 159, "usage_type": "call"}, {"api_name": "keras.layers.GRU", "line_number": 160, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 161, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 162, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 163, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 183, "usage_type": "attribute"}, {"api_name": "keras.layers.Embedding", "line_number": 196, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 211, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 215, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 216, "usage_type": "call"}, {"api_name": "keras.layers.Concatenate", "line_number": 220, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 221, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 222, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 223, "usage_type": "call"}, {"api_name": "keras.layers.Conv1D", "line_number": 224, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 225, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 226, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 227, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 228, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 229, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 230, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 231, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 232, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 233, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 251, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 270, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 270, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path", "line_number": 275, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 280, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 280, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 327, "usage_type": "call"}, {"api_name": "keras_preprocessing.text.tokenizer_from_json", "line_number": 338, "usage_type": "call"}]} +{"seq_id": "14805957568", "text": "from sortedcontainers import SortedList\nimport itertools\n\n\n# represents a filtered chain complex over a field\nclass FCC:\n # vertices - a dictionary {v : filtration level}\n # edges - a set [FCC.Edge]\n def __init__(self, vertices, edges):\n self.vertices = vertices\n self.inv = {v: {} for v in vertices}\n self.outv = {v: {} for v in vertices}\n self.edges = {} # a dictionary {filtration level change : {FCC.Edge}}\n self.num_edges = 0 # keeps track of the number of edges in self.edges\n\n self.edge_priority = {}\n for i, e in enumerate(edges):\n print ('FCC: adding edge '+str(i)+'/'+str(len(edges)))\n self.add_edge(e)\n\n # set up the proper priorities before any reduction\n self.edges = {}\n for i, e in enumerate(edges):\n print ('FCC: updating edge priority '+str(i)+'/'+str(len(edges)))\n self.edge_priority[e] = len(self.inv[e.target]) * len(self.outv[e.source])\n if self.delta_f(e) in self.edges:\n self.edges[self.delta_f(e)].add((e, self.edge_priority[e]))\n else:\n self.edges[self.delta_f(e)] = SortedList([(e, self.edge_priority[e])], key = lambda x: x[1])\n\n # reduces this chain complex until there are no edges remaining\n # if a page is specified, then stop reducing at that page of the spectral sequence\n def reduce(self, page=None):\n for i in itertools.count():\n if (page is not None and i == page) or self.num_edges == 0:\n return\n\n while i in self.edges and len(self.edges[i]) > 0:\n u = self.edges[i].pop(index = 0)[0]\n self.num_edges -= 1\n\n x, y, c = u.source, u.target, u.coefficient\n\n del self.inv[y][x]\n del self.outv[x][y]\n\n new_edges = []\n print ('Reduction at '+str(i)+': '+str(len(self.vertices))+' vertices, '+\n str(len(self.edges[i]))+' edges')\n for w in self.inv[y]:\n t = -self.inv[y][w].coefficient * 1 / c\n for z in self.outv[x]:\n e = self.get_edge(w, z)\n e.coefficient += t * self.outv[x][z].coefficient\n new_edges.append(e)\n\n for e in new_edges:\n self.add_edge(e)\n\n self.remove_vertex(x)\n self.remove_vertex(y)\n\n # remove all vertices with polynomial degree > k\n def truncate(self, k):\n for v in self.vertices.keys():\n if v[1].lift().degree() > k:\n self.remove_vertex(v)\n\n def get_edge(self, source, target):\n if target in self.outv[source]:\n return self.outv[source][target]\n else:\n return FCC.Edge(source, target, 0)\n\n def add_edge(self, e):\n if e.coefficient == 0:\n self.remove_edge(e)\n return\n\n self.inv[e.target][e.source] = e\n self.outv[e.source][e.target] = e\n\n t = self.edge_priority[e] if e in self.edge_priority else None\n self.edge_priority[e] = len(self.inv[e.target]) * len(self.outv[e.source])\n\n if self.delta_f(e) in self.edges:\n if (e, t) in self.edges[self.delta_f(e)]:\n self.edges[self.delta_f(e)].remove((e, t))\n self.num_edges -= 1\n self.edges[self.delta_f(e)].add((e, self.edge_priority[e]))\n else:\n self.edges[self.delta_f(e)] = SortedList([(e, self.edge_priority[e])], key = lambda x: x[1])\n self.num_edges += 1\n\n def remove_edge(self, e):\n if e in self.edge_priority:\n if (e, self.edge_priority[e]) in self.edges[self.delta_f(e)]:\n self.edges[self.delta_f(e)].remove((e, self.edge_priority[e]))\n self.num_edges -= 1\n del self.edge_priority[e]\n if e.source in self.inv[e.target]:\n del self.inv[e.target][e.source]\n if e.target in self.outv[e.source]:\n del self.outv[e.source][e.target]\n\n def remove_vertex(self, v):\n for e in self.inv[v].values():\n self.remove_edge(e)\n for e in self.outv[v].values():\n self.remove_edge(e)\n\n del self.inv[v]\n del self.outv[v]\n\n del self.vertices[v]\n\n # returns how much this edge increases the filtration level\n def delta_f(self, e):\n return self.vertices[e.target] - self.vertices[e.source]\n\n class Edge:\n def __init__(self, source, target, coefficient):\n self.source = source\n self.target = target\n self.coefficient = coefficient\n\n def __repr__(self):\n return str(self.source) + '--- ' + str(self.coefficient) + ' -->' + str(self.target)\n\n def __eq__(self, other):\n return self.source == other.source and self.target == other.target\n\n def __hash__(self):\n return hash((self.source, self.target))\n", "repo_name": "samueltripp/khovanov", "sub_path": "FCC.py", "file_name": "FCC.py", "file_ext": "py", "file_size_in_byte": 5006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sortedcontainers.SortedList", "line_number": 29, "usage_type": "call"}, {"api_name": "itertools.count", "line_number": 34, "usage_type": "call"}, {"api_name": "sortedcontainers.SortedList", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "18867844125", "text": "from argparse import ArgumentParser\nfrom port_scanner import PortScannerError, scan\nfrom typing import Any, Dict\n\n\ndef parse_args() -> Dict[str, Any]:\n parser = ArgumentParser(description='TCP and UDP port scanner')\n parser.add_argument('-t', '--tcp_only', help='Scan only TCP',\n action='store_true')\n parser.add_argument('-u', '--udp_only', help='Scan only UDP',\n action='store_true')\n parser.add_argument('-p', '--ports', nargs=2, default=['1', '65535'],\n metavar='PORT', help='Port range')\n parser.add_argument('host', help='Remote host')\n return parser.parse_args().__dict__\n\n\nif __name__ == '__main__':\n try:\n args = parse_args()\n scan(**args)\n except PortScannerError as e:\n print(e.message)\n exit(1)\n except KeyboardInterrupt:\n print('\\nTerminated.')\n exit()\n", "repo_name": "AleksandrChirkin/Portscan", "sub_path": "port_scanner/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 6, "usage_type": "name"}, {"api_name": "port_scanner.scan", "line_number": 21, "usage_type": "call"}, {"api_name": "port_scanner.PortScannerError", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "24406610788", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom sklearn.datasets import make_blobs\nimport transitions as trans\nimport copy, csv\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom random import randint\nfrom math import floor\n\n\n'''\nThe purpose of this algorithm is to enable the simulation of different types of group transitions, both internal and external, between multiple clusterings in a n-dimensional space.\n'''\n\nclass Evocluster:\n\n #Args n_samples l_centers num_f r_state, list 2dlist int int\n #Ret None; makes the initial clustering \n def __init__(self, n_samples, l_centers, num_f, r_state):\n \n self.evo_clustering = {}\n self.centers = {}\n self.num_f = num_f\n\n X, y = make_blobs(n_samples, centers = l_centers, n_features=self.num_f, random_state = r_state)\n colors = att_colors(y)\n\n self.evo_clustering[0] = X,y,colors\n self.centers[0] = find_centroids(X,y, num_f)\n \n \n #Args transitions, list\n #Ret None; makes the next clustering with the chosen transictions \n def new_clustering(self, transitions):\n \n for tr in transitions:\n \n next_clustering = copy.deepcopy(self.evo_clustering) \n last_t = list(self.evo_clustering)[-1]\n\n X, y, colors = self.evo_clustering[last_t]\n\n X, y, colors, new_c = ch_transition(tr, X, y, colors, self.centers[last_t], self.num_f)\n next_clustering[last_t+1] = X, y, colors\n\n self.evo_clustering = next_clustering\n self.centers[last_t+1] = new_c\n \n #Args None, None\n #Ret None; plots graphs for all clusterings\n def plot_view_all(self):\n for j in self.evo_clustering.keys():\n X, y, colors = self.evo_clustering[j]\n n_c = 0\n centers_c = self.centers[j]\n for i in np.unique(y):\n plt.scatter(X[y == i, 0], X[y == i, 1], color=colors[i], marker = 'o', s=20, label=\"Cluster \"+str(n_c))\n plt.scatter(centers_c[i][0], centers_c[i][1], color='black', marker = 'h', s=20)\n plt.title(\"T_\" + str(j))\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.xlim(0, 40)\n plt.ylim(0, 50)\n plt.legend()\n n_c = n_c + 1\n plt.show()\n \n #Args None, None\n #Ret None; export the clustering samples and labels in a csv format \n def export_csv(self):\n \n t = 0\n for i in self.evo_clustering.keys():\n \n X, y, colors = self.evo_clustering[i]\n \n with open(\"evo_cluster_\"+ str(t) +\".csv\", mode='w') as csv_file:\n writer = csv.writer(csv_file)\n \n for j in range(X.shape[0]):\n row = np.concatenate((X[j], y[j]), axis = None)\n writer.writerow(row)\n \n csv_file.close()\n \n t = t + 1 \n \n#Args tr X y colors centers num_f, list list list dict dict int\n#Ret list list dict dict; returns the new data for the next clustering \ndef ch_transition(tr, X, y, colors, centers, num_f):\n\n new_c = 0\n \n #internal transitions\n\n if(\"int_den_dif\" in tr):\n X, y = trans.int_den_dif(X, y, centers, [1], num_f)\n new_c = find_centroids(X, y, num_f)\n\n if(\"int_den_comp\" in tr):\n X, y = trans.int_den_comp(X, y, centers, [2], num_f)\n new_c = find_centroids(X, y, num_f)\n\n if(\"int_size_grow\" in tr):\n X, y = trans.int_size_grow(X, y, centers, [1], num_f, 30)\n new_c = find_centroids(X, y, num_f)\n\n if(\"int_size_reduc\" in tr):\n X, y = trans.int_size_reduc(X, y, [1], 80)\n new_c = find_centroids(X, y, num_f)\n\n if(\"int_local\" in tr): \n X, y = trans.int_local(X, y, [2], num_f, (-8,8))\n new_c = find_centroids(X, y, num_f)\n\n #external transitions\n\n if(\"ext_death\" in tr):\n X, y, colors = trans.ext_death(X, y, [3], colors)\n new_c = find_centroids(X, y, num_f)\n\n if(\"ext_birth\" in tr):\n X, y, colors = trans.ext_birth(X, y, colors, num_f, 15)\n new_c = find_centroids(X, y, num_f)\n\n if(\"ext_union\" in tr):\n X, y, colors = trans.ext_union(X, y, colors, [0,2], [10,40,25], num_f)\n new_c = find_centroids(X, y, num_f)\n\n if(\"ext_div\" in tr):\n X, y, colors = trans.ext_div(X, y, colors, [0], [[30,25,20],[20,40,25],[30,10,20]], [0.3,0.3,0.4], num_f)\n new_c = find_centroids(X, y, num_f)\n\n return X, y, colors, new_c\n\n\n#Args y, list\n#Ret dict; returns the associated colors for the first clustering \ndef att_colors(y):\n colors = {}\n for i in np.unique(y):\n colors[i] = '#%06X' % randint(0, 0xFFFFFF) \n return colors\n\n#Args X y num_f, list list int\n#Ret dicts; returns the updated centers \ndef find_centroids(X, y, num_f):\n \n c = list(np.unique(y))\n \n clusters = {}\n sum_axis = {}\n len_axis = {}\n \n for i in c:\n clusters[i] = []\n sum_axis[i] = []\n len_axis[i] = []\n \n for i in range(X.shape[0]):\n clusters[y[i]].append(X[i])\n \n for k in clusters.keys():\n c_axis = list(zip(*clusters[k]))\n for i in range(num_f):\n sum_axis[k].append(sum(c_axis[i]))\n len_axis[k].append(len(c_axis[i]))\n \n centers = {}\n for k in clusters.keys():\n centers[k] = []\n for i in range(num_f):\n centers[k].append(sum_axis[k][i]/len_axis[k][i])\n \n return centers\n ", "repo_name": "afonsoMatheus/CETra", "sub_path": "monic/.ipynb_checkpoints/evolution_clustering-checkpoint.py", "file_name": "evolution_clustering-checkpoint.py", "file_ext": "py", "file_size_in_byte": 5678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.datasets.make_blobs", "line_number": 27, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "csv.writer", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 83, "usage_type": "call"}, {"api_name": "transitions.int_den_dif", "line_number": 99, "usage_type": "call"}, {"api_name": "transitions.int_den_comp", "line_number": 103, "usage_type": "call"}, {"api_name": "transitions.int_size_grow", "line_number": 107, "usage_type": "call"}, {"api_name": "transitions.int_size_reduc", "line_number": 111, "usage_type": "call"}, {"api_name": "transitions.int_local", "line_number": 115, "usage_type": "call"}, {"api_name": "transitions.ext_death", "line_number": 121, "usage_type": "call"}, {"api_name": "transitions.ext_birth", "line_number": 125, "usage_type": "call"}, {"api_name": "transitions.ext_union", "line_number": 129, "usage_type": "call"}, {"api_name": "transitions.ext_div", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 143, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "26846833347", "text": "import os\nimport re\nimport copy\nimport random\nimport math\nimport time\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nIn my model the arrows point opposite from the paper referenced.\nIn the paper the A -> B represents A follows B\nHere A -> B represents information flows from A to B (B follows A)\n'''\ndef load_network(input_file):\n\tnode_file = open(\"networks/\" + input_file + \"/nodes.csv\", \"r\")\n\tedge_file = open(\"networks/\" + input_file + \"/edges.csv\", \"r\")\n\tnodes_str = node_file.readlines()[1:]\n\tedges_str = edge_file.readlines()[1:]\n\tnodes = []\n\tedges = []\n\tfor node_str in nodes_str:\n\t\tid, followers, following = node_str.split(\",\")\n\t\tnodes.append([int(followers), [], int(following), []])\n\t\t\n\tfor edge_str in edges_str:\n\t\tnode, follower, weight = edge_str.split(\",\")\n\t\tnode = int(node)\n\t\tfollower = int(follower)\n\t\tprint(node)\n\t\t\n\t\tnodes[node][1].append(follower)\n\t\tnodes[follower][3].append(node)\n\t\n\treturn nodes\n\t\t\ndef write_node_output(nodes, output_file):\n\tres = \"id,num_following\\n\"\n\tfor i,node in enumerate(nodes):\n\t\t\n\t\tres = res + str(i) + \",\" + str(node[0]) + \",\" + str(node[2]) + \"\\n\"\n\t\t\n\toutput = open(output_file + \"/nodes.csv\", \"w\")\n\t\n\toutput.write(res)\n\t\ndef write_edge_output(nodes, output_file):\n\tall_edges = []\n\tres = \"from,to,weight\\n\"\n\n\tfor i, node in enumerate(nodes):\n\t\tfor edge in node[1]:\n\t\t\tres = res + str(i) + \",\" + str(edge) + \",1\\n\"\n\t\t\n\t\t\n\toutput = open(output_file + \"/edges.csv\", \"w\")\n\t\n\toutput.write(res)\t\n\n\t\n#in degree of nodes from the paper (followers)\n#out degree in generated graph\ndef degree_func(): \n\tfollowers = 0\n\tx = [.0001, .00177, .31622, 10, 177.8, 562.3] #*10e4 (probability of getting y)\n\t#-8, -6.75, -4.75, -3, -1.75, -1.25\n\ty = [100000,10000,1000,100,10,1]\n\tr = random.uniform(.0031623, 562)\n\tfor i, x1 in enumerate(x):\n\t\tif r <= x1:\n\t\t\tx_dis = (r-x[i-1])/(x[i]-x[i-1])\n\t\t\tfollowers = int(x_dis*(y[i-1]-y[i]) + y[i])\n\t\t\tbreak\n\t\n\tfollowing = 0\n\tif followers <= 1000:\n\t\tfollowing = followers * (1.2 - (.2*followers/1000)) + 3\n\t\t\n\telse:\n\t\tfollowing = followers - (followers/2) * ((followers-1000)/100000)\n\tfollowing = int(following)\n\treturn followers, following\n\t\ndef gen_nodes(num):\n\tnodes = []\n\tfor i in range(0,num):\n\t\tnum_followers, num_following = degree_func()\n\t\tnodes.append([num_followers, [], num_following, []])\n\t\t\n\treturn nodes\n\t\t\t\n\t\t\t\ndef weighted_choice(weights):\n\ttotal = sum(w for w in weights)\n\tr = random.uniform(0, total)\n\tupto = 0\n\tfor i,w in enumerate(weights):\n\t\tif upto + w >= r:\n\t\t\treturn i\n\t\tupto += w\n\tassert False, \"Shouldn't get here\"\n\n\ndef gen_edge_probs(nodes, curr_node_idx):\n\tweights = np.full(len(nodes), 100)\n\tcurr_node_relationships = nodes[curr_node_idx][1] + list(set(nodes[curr_node_idx][3]) - set(nodes[curr_node_idx][1]))\n\t\n\t\n\t\n\tfor i, node in enumerate(nodes):\n\t\t#generates weights for every possible connection based on max #following\n\t\tif i == curr_node_idx or i in nodes[curr_node_idx][1]: # if i is node, or is already a follower of curr node\n\t\t\tweights[i] = 0\n\t\t\tcontinue\n\t\tdeduct = 100*(len(nodes[i][3])/nodes[i][2]) #num_following/max_num_following\n\t\tweights[i] = weights[i] - min(deduct,90)\n\t\n\t\t#modifies weights based on # of shared connections\n\t\ttmp_rel = nodes[i][1] + list(set(nodes[i][3]) - set(nodes[i][1]))\n\t\tintersect = set(curr_node_relationships).intersection(tmp_rel)\n\t\t\n\t\tmult = 1+len(intersect)\n\t\t\n\t\tweights[i] = weights[i] * mult\n\t\t\n\treturn weights\n\t\n#generates edges, adds one follower (out arrow) to each node sequentially until\n#no more followers are available in any node\ndef gen_edges(nodes):\n\tfollowers_available = np.full(len(nodes), True)\n\ttotal_nodes = len(nodes)\n\tpercent_done = 0\n\ttotal_followers = sum([n[0] for n in nodes])\n\tnum_done = 0\n\twhile True in followers_available:\n\t\tfor i, node in enumerate(nodes):\n\t\t\tif followers_available[i] == False:\n\t\t\t\tcontinue\n\t\t\t\t\n\t\t\tweights = gen_edge_probs(nodes, i)\n\t\t\tnew_follower = weighted_choice(weights)\n\t\t\t\n\t\t\tnodes[i][1].append(new_follower)\n\t\t\tnodes[new_follower][3].append(i)\n\t\t\tfollowers_available[i] = len(nodes[i][1]) < nodes[i][0]\n\t\t\t\n\t\n\t\t\t#if there are fewer nodes than available connections.\n\t\t\t#this should not happen under normal operation. prevents errors in testing small networks\n\t\t\tif len(nodes[i][1]) == len(nodes)-1: \t\n\t\t\t\tfollowers_available[i] = False\t\n\t\t\t\t\n\t\t\tnum_done += 1 \t\t\n\t\t\tif (num_done%int(total_followers/100) == 0): \n\t\t\t\tprint(str(int(num_done/total_followers*100)) + \"%\")\n\t\ndef tweet(nodes, node_idx, tweeted, seen, curr_step):\n\ttweeted[node_idx] = True\n\tfor follower in nodes[node_idx][1]:\n\t\tseen[follower][0] = curr_step\n\t\tseen[follower][1] = seen[follower][1]+1\n\t\n\t\ndef run_network(nodes):\n\ttweeted = np.full(len(nodes), False)\n\tseen = np.full((len(nodes),2), 0)\n\ttimesteps = []\n\t\n\trand_start = 0\n\twhile True:\n\t\trand_start = random.randint(0,len(nodes)-1)\n\t\tif nodes[rand_start][0] <= 30:\n\t\t\tbreak\n\ttweet(nodes, rand_start, tweeted, seen, 1)\n\t\n\tcount = 0\n\tcurr_step = 1\n\tbase_prob = .02\n\twhile count < 5:\n\t\ttimesteps.append(copy.deepcopy(tweeted))\n\t\ttweet_next = []\n\t\tfor i in range(0, len(tweeted)):\n\t\t\tif not tweeted[i] and seen[i][0]:\n\t\t\t\tr = random.uniform(0,1)\n\t\t\t\t\n\t\t\t\ttime_past = curr_step - seen[i][0] + 1\n\t\t\t\t\n\t\t\t\tprob = base_prob/(time_past*2)\n\n\t\t\t\t#prob = 1- math.pow((1-prob),seen[i][1])\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t#prob = base_prob + (1-base_prob)/((curr_step - seen[i])) \n\t\t\t\t\n\t\t\t\t#prob = base_prob + math.pow(time_past, 1.5)/30\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif r < max(prob, .000001):\n\t\t\t\t\ttweet_next.append(i)\n\t\tif not tweet_next:\n\t\t\tcount += 1\n\t\telse:\n\t\t\tcount = 0\n\t\t\t\n\t\tfor tweeter in tweet_next:\n\t\t\ttweet(nodes, tweeter, tweeted, seen, curr_step)\n\t\t\n\t\tcurr_step += 1\n\t\t\n\treturn timesteps\n\t\t\t\ndef gen_net(n, output_dir):\n\tnodes = gen_nodes(n)\n\tedges = gen_edges(nodes)\n\t\n\tif not os.path.exists(\"networks/\" + output_dir):\n\t\tos.makedirs(\"networks/\" + output_dir)\n\t\n\twrite_node_output(nodes, \"networks/\" + output_dir)\n\twrite_edge_output(nodes, \"networks/\" + output_dir)\n\t\ndef run_analysis(timesteps, nodes, output_dir):\n\toutput_dir = output_dir + \"/\" + str(int(time.time()))\n\t\n\ttry:\n\t\tos.stat(\"networks/\" + output_dir)\n\texcept:\n\t\tos.mkdir(\"networks/\" + output_dir) \n\t\n\tx = np.linspace(0, len(timesteps)-1, len(timesteps), endpoint=True)\n\ty = []\n\t\n\t\n\tuser_tweet_list = []\n\tnew_user_tweet = []\n\n\t\n\t#plot tweets vs time\n\tfor ts in timesteps:\n\t\ttweeted_this_ts=[i for i, x in enumerate(ts) if x]\n\t\tif not user_tweet_list:\n\t\t\tnew_user_tweet.append([x for x in tweeted_this_ts]) #writing code on 2 hours of sleep makes bad code...\n\t\telse:\n\t\t\tnew_user_tweet.append([x for x in tweeted_this_ts if x not in user_tweet_list[-1]])\n\t\t\t\n\t\tuser_tweet_list.append(tweeted_this_ts)\n\t\ty.append(len(user_tweet_list[-1]))\n\t\t\n\t\t\n\tuser_order_file = open(\"networks/\" + output_dir + \"/user_order.txt\", \"w\")\n\t\n\tuser_order_file.write(json.dumps(new_user_tweet, indent = 4, sort_keys=True))\n\t\n\tplt.plot(x,y)\n\tplt.savefig(\"networks/\" + output_dir + \"/tweets_over_time.png\")\n\tplt.clf()\n\t\n\t\n\t\n\t#plot degree vs time\n\ty = []\n\talready_tweeted = []\n\tfor count, ts in enumerate(timesteps):\n\t\tidxs = [i for i, j in enumerate(ts) if j == True]\n\t\ttmp = 0\n\t\tfor idx in idxs:\n\t\t\tif idx not in already_tweeted:\n\t\t\t\ttmp = tmp + nodes[idx][0]\n\t\t\t\talready_tweeted.append(idx)\n\t\t\n\t\ty.append(tmp)\n\t\t\t\n\t#x = np.linspace(0, len(y)-1, len(y), endpoint=True)\n\tplt.plot(x,y)\n\tplt.savefig(\"networks/\" + output_dir + \"/followers_over_time_avg.png\")\n\tplt.clf()\n\t\n\t\n\t#plot degree vs time\n\ty = []\n\tx = []\n\talready_tweeted = []\n\tfor count, ts in enumerate(timesteps):\n\t\tidxs = [i for i, j in enumerate(ts) if j == True]\n\t\ttmp = []\n\t\tfor idx in idxs:\n\t\t\tif idx not in already_tweeted:\n\t\t\t\ttmp.append(nodes[idx][0])\n\t\t\t\talready_tweeted.append(idx)\n\t\t\n\t\tnum_x = len(tmp)\n\t\tfor i, t in enumerate(tmp):\n\t\t\ty.append(t)\n\t\t\tx.append(count + i/num_x)\n\t\t\t\n\t#x = np.linspace(0, len(y)-1, len(y), endpoint=True)\n\tplt.plot(x,y)\n\tplt.savefig(\"networks/\" + output_dir + \"/followers_over_time.png\")\n\tplt.clf()\n\t\n\t\ndef clustering(curr_node_idx, curr_node, nodes):\n\tnbrhood = curr_node[1] + list(set(curr_node[3]) - set(curr_node[1]))\n\tnbrhood.append(curr_node_idx)\n\tnbrhood_edgs = 0\n\tfor n in nbrhood:\t\t\n\t\tfollowers_in_rbrhood = set(nodes[n][1]).intersection(nbrhood)\n\t\tnbrhood_edgs = nbrhood_edgs + len(followers_in_rbrhood)\n\t\t\n\tnum_neigh = len(nbrhood)\n\t\n\treturn nbrhood_edgs/(num_neigh*(num_neigh-1))\n\t\t\n\t\t\ndef net_analysis(nodes, output):\n\tdirectory = \"networks/\" + output\n\tx = np.linspace(0, len(nodes)-1, len(nodes), endpoint=True)\n\t\n\t#follower degree\n\tdeg = []\n\ty = []\n\tfor n in nodes:\n\t\ty.append(n[0])\n\t\tdeg.append(n[0])\n\t\n\ty.sort()\n\tplt.plot(x,y)\n\tplt.savefig(directory + \"/follower_degree\")\n\tplt.clf()\n\t\n\t#following degree\n\ty = []\n\tfor n in nodes:\n\t\ty.append(n[2])\n\t\n\ty.sort()\n\tplt.plot(x,y)\n\tplt.savefig(directory + \"/following_degree\")\n\tplt.clf()\n\t\n\t#clustering\n\ty = []\n\tfor i,n in enumerate(nodes):\n\t\ty.append(clustering(i, n, nodes))\n\t\n\tdeg, y = (list(x) for x in zip(*sorted(zip(deg, y))))\n\t\n\tplt.plot(deg,y)\n\tplt.savefig(directory + \"/clustering\")\n\tplt.clf()\n\t\ndef run_from_save(output_name, num_runs):\n\t#add in to do multiple runs\n\tnodes = load_network(output_name)\n\tfor i in range(0,num_runs):\n\t\tprint(i)\n\t\tts = run_network(nodes)\n\t\twhile np.count_nonzero(ts[-1] == True) <= 300: # if less than x people tweeted, redo the analysis\n\t\t\tts = run_network(nodes)\n\t\tstart_idx = ts[0].tolist().index(True)\n\t\trun_analysis(ts, nodes, output_name)\n\t\t\ndef analyze_runs(run_dir, nodes):\n\n\ty = [0]*10000\n\tnum_clusters = 0\n\tclusters = []\n\tfor (root, dirs, files) in os.walk(\"networks/\" + run_dir):\n\t\tfor dir in dirs:\n\t\t\tinput = open(root + \"/\" + dir + \"/user_order.txt\", \"r\")\n\t\t\tuser_propogation = json.load(input)\n\t\t\t\n\t\t\tavg = 0\n\t\t\tstream_follow = []\n\t\t\tstream_ts = []\n\t\t\tin_cluster = False\n\t\t\tfor ts in user_propogation:\n\t\t\t\t\n\n\t\t\t\tif len(stream_follow) == 50:\n\t\t\t\t\tstream_follow.pop(0)\n\t\t\t\t\tstream_ts.pop(0)\n\n\t\t\t\tstream_ts.append(ts)\n\t\t\t\tsum_users = sum([len(x) for x in stream_ts])\n\n\t\t\t\tsum_followers = sum([nodes[idx][0] for idx in ts])\n\t\t\t\tstream_follow.append(sum_followers)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif sum_users > 200 and not in_cluster:\n\t\t\t\t\tcluster = []\n\t\t\t\t\tin_cluster = True\n\t\t\t\t\tfor past_ts in stream_ts:\n\t\t\t\t\t\tfor user in past_ts:\n\t\t\t\t\t\t\tcluster.append(user)\n\t\t\t\t\t\t\ty[user] = y[user] + 1\n\t\t\t\t\tclusters.append(cluster)\n\t\t\t\telif sum_users < 200 and in_cluster:\n\t\t\t\t\tnum_clusters = num_clusters + 1\n\t\t\t\t\tin_cluster = False\n\t\n\ty2 = [0]*10000\n\tfor idx, count in enumerate(y):\n\t\ty2[idx] = nodes[idx][0]\n\n\tsorted_degrees = [x for _,x in sorted(zip(y,y2))]\n\t\t\n\tlist.sort(y)\t\n\tplt.plot(np.linspace(0,10000,10000),y)\n\tplt.savefig(\"networks/10000_1c/200cluster.png\")\n\tplt.show()\n\tplt.clf()\n\t\n\tlist.sort(y2)\n\tplt.plot(np.linspace(0,10000,10000),y2)\n\t#plt.show()\n\t\t\ndef identify_clusters(run_dir, nodes):\n\t\n\tnum_clusters = 0\n\tclusters = []\n\tfor (root, dirs, files) in os.walk(\"networks/\" + run_dir):\n\t\tfor dir in dirs:\n\t\t\tinput = open(root + \"/\" + dir + \"/user_order.txt\", \"r\")\n\t\t\tuser_propogation = json.load(input)\n\t\t\t\n\t\t\t\n\t\t\t\nif __name__ == \"__main__\":\n\t#in degree = num followers\n\t#out degree = num following\n\tdata_dir = \"100000_java\"\n\t\n\ttry:\n\t\tos.stat(\"networks/\" + data_dir)\n\texcept:\n\t\tos.mkdir(\"networks/\" + data_dir) \n\t\n\t#gen_net(1000, data_dir)\n\tnodes = load_network(data_dir)\n\t#net_analysis(nodes, data_dir)\n\t#run_from_save(data_dir, 1000)\n\t#analyze_runs(data_dir, nodes)", "repo_name": "AdamSchunk/twitter_news", "sub_path": "graph_generator/gen_networks.py", "file_name": "gen_networks.py", "file_ext": "py", "file_size_in_byte": 11215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.uniform", "line_number": 69, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 168, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 173, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 182, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 220, "usage_type": "call"}, {"api_name": "time.time", "line_number": 226, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 229, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 233, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 300, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 302, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 330, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 331, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 331, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 352, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 352, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 353, "usage_type": "name"}, {"api_name": "numpy.count_nonzero", "line_number": 361, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 371, "usage_type": "call"}, {"api_name": "json.load", "line_number": 374, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 413, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 415, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 415, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 416, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 416, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 419, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 419, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 419, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 426, "usage_type": "call"}, {"api_name": "json.load", "line_number": 429, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 439, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 441, "usage_type": "call"}]} +{"seq_id": "10685071431", "text": "import os\nimport sys\nfrom argparse import ArgumentParser\nfrom typing import List\nfrom collections import OrderedDict\n\nfrom flask import Flask\nfrom rick.base import Di, Container, MapLoader\nfrom rick.event import EventManager\nfrom rick.mixin import Injectable\nfrom rick.util.loader import load_class\nfrom rick.resource.console import ConsoleWriter\n\nfrom pokie.constants import (\n DI_CONFIG,\n DI_SERVICES,\n DI_FLASK,\n DI_APP,\n DI_EVENTS,\n DI_TTY,\n DI_SIGNAL, CFG_HTTP_ERROR_HANLDER, DI_HTTP_ERROR_HANDLER,\n)\nfrom .module import BaseModule\nfrom .command import CliCommand\nfrom pokie.util.cli_args import ArgParser\nfrom .signal import SignalManager\n\n\nclass FlaskApplication:\n CLI_CMD_SUCCESS = 0\n CLI_CMD_FAILED = 1\n CLI_CMD_NOT_FOUND = 2\n\n module_file_name = \"module\" # module class file name\n module_class_name = \"Module\" # default module class name\n\n system_modules = [\n \"pokie.contrib.base\",\n ] # system modules to always be included\n\n def __init__(self, cfg: Container):\n self.di = Di()\n self.app = None\n self.modules = {} # app module list\n\n self.di.add(DI_CONFIG, cfg)\n self.di.add(DI_APP, self)\n self.cfg = cfg\n\n def build(self, module_list: list, factories: List = None) -> Flask:\n \"\"\"\n Build the application\n\n Factories is a list of optional callables to assemble functionality on top of Di, eg. database connection,\n cache, logging, etc. Factories are called *before* modules are initialized, to ensure all required dependencies\n are available\n\n :param module_list: list of module names to initialize\n :param factories: optional list of callables to be initialized with the application\n :return:\n \"\"\"\n if not factories:\n factories = []\n\n self.app = Flask(type(self).__name__)\n self.app.di = self.di\n self.di.add(DI_FLASK, self.app)\n\n # initialize signal manager\n self.di.add(DI_SIGNAL, SignalManager(self.di))\n\n # initialize TTY\n self.di.add(DI_TTY, ConsoleWriter())\n\n # load modules\n self.modules = {}\n module_list = [*self.system_modules, *module_list]\n for name in module_list:\n cls = load_class(\n \"{}.{}.{}\".format(name, self.module_file_name, self.module_class_name),\n True\n )\n if cls is None:\n raise RuntimeError(\n \"build(): cannot load module '{}' - Module() class not found\".format(\n name\n )\n )\n if not issubclass(cls, BaseModule):\n raise RuntimeError(\n \"build(): Class Module on '{}' must extend BaseModule\".format(name)\n )\n if name in self.modules.keys():\n raise ValueError(\n \"build(): Module named '{}' already exists\".format(name)\n )\n self.modules[name] = cls(self.di)\n\n # build service map\n svc_map = {}\n for name, m in self.modules.items():\n services = getattr(m, \"services\", {})\n if type(services) is dict:\n svc_map.update(services)\n else:\n raise RuntimeError(\n \"build(): cannot load service map from module '{}'; attribute must be of type dict\".format(\n name\n )\n )\n # register service mapper\n self.di.add(DI_SERVICES, MapLoader(self.di, svc_map))\n\n # run factories\n for factory in factories:\n if type(factory) is str:\n # if factory is string, assume it is a path to a callable\n factory = load_class(factory, True)\n if not callable(factory):\n raise RuntimeError(\"build(): non-callable or non-existing factory\")\n else:\n factory(self.di)\n\n # parse events from modules\n evt_mgr = EventManager()\n for _, module in self.modules.items():\n module_events = getattr(module, \"events\", None)\n if isinstance(module_events, dict):\n for evt_name, evt_details in module_events.items():\n for priority, handlers in evt_details.items():\n for handler in handlers:\n evt_mgr.add_handler(evt_name, handler, int(priority))\n\n self.di.add(DI_EVENTS, evt_mgr)\n\n # register exception handler\n if self.cfg.has(CFG_HTTP_ERROR_HANLDER):\n handler = load_class(self.cfg.get(CFG_HTTP_ERROR_HANLDER), True)\n if not issubclass(handler, Injectable):\n raise RuntimeError(\"build(): HTTP_ERROR_HANDLER class does not extend Injectable\")\n # initialize & register handler\n handler = handler(self.di)\n self.di.add(DI_HTTP_ERROR_HANDLER, handler)\n\n # initialize modules\n for _, module in self.modules.items():\n module.build(self)\n\n return self.app\n\n def http(self, **kwargs):\n self.app.run(**kwargs)\n\n def cli_runner(self, command: str, args: list = None, **kwargs) -> int:\n # either console or inline commands\n if args is None:\n args = []\n\n # parameter parser\n parser = ArgParser(**kwargs)\n\n if \"writer\" in kwargs.keys():\n tty = kwargs[\"writer\"]\n else:\n tty = ConsoleWriter()\n\n # lookup handler\n for _, module in self.modules.items():\n if command in module.cmd.keys():\n handler = load_class(module.cmd[command])\n if not handler:\n raise RuntimeError(\n \"cli(): handler class '{}' not found\".format(\n module.cmd[command]\n )\n )\n if not issubclass(handler, CliCommand):\n raise RuntimeError(\n \"cli(): command handler does not extend CliCommand\"\n )\n handler = handler(self.di, writer=tty) # type: CliCommand\n if not handler.skipargs: # skipargs controls usage of argparser\n handler.arguments(parser)\n args = parser.parse_args(args)\n if parser.failed:\n # invalid/insufficient args\n tty.error(parser.error_message)\n parser.print_help(tty.stderr)\n return self.CLI_CMD_FAILED\n else:\n # skipargs is true, all argparsing is ignored\n # this allow for custom cli arg handling\n args = None\n\n if handler.run(args):\n return self.CLI_CMD_SUCCESS\n return self.CLI_CMD_FAILED\n\n # command not found\n tty.error(\"error executing '{}': command not found\".format(command))\n return self.CLI_CMD_NOT_FOUND\n\n def cli(self, **kwargs):\n \"\"\"\n Execute CLI commands\n :param kwargs: optional parameters for ArgumentParse\n :return:\n \"\"\"\n # default command when no args detected\n command = \"list\"\n # extract command if specified\n if len(sys.argv) > 1:\n command = str(sys.argv[1])\n\n if \"add_help\" not in kwargs.keys():\n kwargs[\"add_help\"] = False\n if \"usage\" not in kwargs.keys():\n kwargs[\"usage\"] = \"{} {} [OPTIONS...]\".format(\n os.path.basename(sys.argv[0]), command\n )\n\n # exit code directly maps return codes\n exit(self.cli_runner(command, sys.argv[2:], **kwargs))\n", "repo_name": "oddbit-project/pokie", "sub_path": "pokie/core/application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 7792, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rick.base.Container", "line_number": 41, "usage_type": "name"}, {"api_name": "rick.base.Di", "line_number": 42, "usage_type": "call"}, {"api_name": "pokie.constants.DI_CONFIG", "line_number": 46, "usage_type": "argument"}, {"api_name": "pokie.constants.DI_APP", "line_number": 47, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 65, "usage_type": "call"}, {"api_name": "pokie.constants.DI_FLASK", "line_number": 67, "usage_type": "argument"}, {"api_name": "pokie.constants.DI_SIGNAL", "line_number": 70, "usage_type": "argument"}, {"api_name": "signal.SignalManager", "line_number": 70, "usage_type": "call"}, {"api_name": "pokie.constants.DI_TTY", "line_number": 73, "usage_type": "argument"}, {"api_name": "rick.resource.console.ConsoleWriter", "line_number": 73, "usage_type": "call"}, {"api_name": "rick.util.loader.load_class", "line_number": 79, "usage_type": "call"}, {"api_name": "module.BaseModule", "line_number": 89, "usage_type": "argument"}, {"api_name": "pokie.constants.DI_SERVICES", "line_number": 112, "usage_type": "argument"}, {"api_name": "rick.base.MapLoader", "line_number": 112, "usage_type": "call"}, {"api_name": "rick.util.loader.load_class", "line_number": 118, "usage_type": "call"}, {"api_name": "rick.event.EventManager", "line_number": 125, "usage_type": "call"}, {"api_name": "pokie.constants.DI_EVENTS", "line_number": 134, "usage_type": "argument"}, {"api_name": "pokie.constants.CFG_HTTP_ERROR_HANLDER", "line_number": 137, "usage_type": "argument"}, {"api_name": "rick.util.loader.load_class", "line_number": 138, "usage_type": "call"}, {"api_name": "pokie.constants.CFG_HTTP_ERROR_HANLDER", "line_number": 138, "usage_type": "argument"}, {"api_name": "rick.mixin.Injectable", "line_number": 139, "usage_type": "argument"}, {"api_name": "pokie.constants.DI_HTTP_ERROR_HANDLER", "line_number": 143, "usage_type": "argument"}, {"api_name": "module.build", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 50, "usage_type": "name"}, {"api_name": "pokie.util.cli_args.ArgParser", "line_number": 160, "usage_type": "call"}, {"api_name": "rick.resource.console.ConsoleWriter", "line_number": 165, "usage_type": "call"}, {"api_name": "module.cmd.keys", "line_number": 169, "usage_type": "call"}, {"api_name": "module.cmd", "line_number": 169, "usage_type": "attribute"}, {"api_name": "rick.util.loader.load_class", "line_number": 170, "usage_type": "call"}, {"api_name": "module.cmd", "line_number": 170, "usage_type": "attribute"}, {"api_name": "module.cmd", "line_number": 174, "usage_type": "attribute"}, {"api_name": "command.CliCommand", "line_number": 177, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 213, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 219, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 223, "usage_type": "attribute"}]} +{"seq_id": "73831668966", "text": "import random\nimport sys\n\nfrom app import Mass, Spring\nfrom preprocess import read_file\n\nimport networkx as nx\n\nimport matplotlib.pyplot as plt\n\nnetwork = read_file()\n\nnodes = set()\nedges = dict()\n\nmasses = dict()\nlog = dict()\nsprings = list()\n\nfor entry in network:\n nodes.update([entry[0]])\n edges[frozenset(entry[:2])] = entry[2]\n\nfor index in nodes:\n masses[index] = Mass()\n masses[index].position = complex(random.random(), random.random())\n\nfor key in masses.keys():\n masses[key].fixed = True\n masses[key].position = 1 + 1j\n break\n\nfor key in masses.keys():\n log[key] = []\n\nfor item in edges:\n x, y = item\n\n springs.append(Spring(masses[x], masses[y], relaxed_length=1/edges[item]))\n\n# Simulate\nfor gen in range(4000):\n # print(f\"Generation: {gen}\")\n\n for key in masses.keys():\n masses[key].force = complex(0)\n\n for s in springs:\n s.update()\n\n for key in masses.keys():\n m = masses[key]\n\n m.update(dt=0.1)\n\n log[key].append(m.position)\n\nlast_state = dict()\n\nfor key in log.keys():\n x = []\n y = []\n\n for item in log[key]:\n x.append(item.real)\n y.append(item.imag)\n\n last_state[key] = [x[-1], y[-1]]\n\n plt.plot(x, y)\n\nplt.savefig(\"vis/trajectory.png\")\nplt.show()\n\nfor connection in edges.keys():\n m1, m2 = connection\n\n x1, y1 = last_state[m1]\n x2, y2 = last_state[m2]\n\n plt.plot([x1, x2], [y1, y2])\n\nfor key in last_state.keys():\n plt.plot(*last_state[key], \"ro\")\n\nplt.savefig(\"vis/sim_network.png\")\nplt.show()\n\nplotting = False\n\nif plotting:\n g = nx.Graph()\n\n for item in edges:\n x, y = item\n\n g.add_edge(x, y)\n\n nx.draw_spring(g)\n\n plt.savefig(\"vis/network.png\")\n", "repo_name": "sebastianjkern/spring-clustering", "sub_path": "cluster_examples_sandbox.py", "file_name": "cluster_examples_sandbox.py", "file_ext": "py", "file_size_in_byte": 1716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "preprocess.read_file", "line_number": 11, "usage_type": "call"}, {"api_name": "app.Mass", "line_number": 25, "usage_type": "call"}, {"api_name": "random.random", "line_number": 26, "usage_type": "call"}, {"api_name": "app.Spring", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "networkx.Graph", "line_number": 92, "usage_type": "call"}, {"api_name": "networkx.draw_spring", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "28180842099", "text": "import os\n\nimport youtube_dl\n\n\ndef get_options(path):\n complete_path = os.path.join(path, '%(title)s.%(ext)s')\n return {\n 'outtmpl': complete_path,\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n\n\nclass DownloadHandler(object):\n\n def __init__(self, path_: str, max_length_: int):\n self.path = os.path.join(path_, 'downloads')\n self.inc = 0\n self.max_length = max_length_\n\n def get_dir_name(self):\n res = os.path.join(self.path, 'download_' + str(self.inc))\n while os.path.exists(res):\n self.inc += 1\n res = os.path.join(self.path, 'download_' + str(self.inc))\n return res\n\n def download_videos(self, video_list):\n # make directory with self.inc\n directory = self.get_dir_name()\n os.mkdir(directory)\n # download videos in video_list\n with youtube_dl.YoutubeDL(get_options(directory)) as downloader:\n for link in video_list:\n dict_meta = downloader.extract_info(link, download=False)\n if dict_meta['duration'] > self.max_length * 60:\n title = dict_meta['title'] + '.toolarge'\n f = open(os.path.join(directory, title), \"x\")\n f.close()\n else:\n downloader.download([link])\n # return directory name\n return directory\n", "repo_name": "trapwired/EmailYoutubeDownload", "sub_path": "src/DownloadHandler.py", "file_name": "DownloadHandler.py", "file_ext": "py", "file_size_in_byte": 1531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 36, "usage_type": "call"}, {"api_name": "youtube_dl.YoutubeDL", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "5324813246", "text": "####################################################################################\n# Based on the Faster-LIME package (https://github.com/seansaito/Faster-LIME) \n# by author seansaito (https://github.com/seansaito)\n# Slight modifications have been made to make it compatiable with scikit-explain.\n####################################################################################\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom abc import ABC, abstractmethod\nfrom sklearn.preprocessing import StandardScaler\n\nfrom ..common.utils import ridge_solve, kernel_fn, discretize, dict_disc_to_bin\n\nclass BaseTabularExplainer(ABC):\n\n def __init__(self, training_data, feature_names=None,\n categorical_names=None, \n discretizer='quartile', **kwargs):\n \"\"\"\n Args:\n training_data (np.ndarray): Training data to measure training data statistics\n feature_names (list): List of feature names\n categorical_feature_idxes (list): List of idxes of features that are categorical\n discretizer (str): Discretization resolution\n\n Assumptions:\n * Data only contains categorical and/or numerical data\n * Categorical data is already converted to ordinal labels (e.g. via scikit-learn's\n OrdinalEncoder)\n\n \"\"\"\n self.training_data = training_data\n self.num_features = self.training_data.shape[1]\n\n # Parse columns\n if feature_names is not None:\n # TODO input validation\n self.feature_names = list(feature_names)\n else:\n self.feature_names = list(range(self.num_features))\n \n categorical_feature_idxes = [feature_names.index(f) for f in categorical_names]\n \n self.categorical_feature_idxes = categorical_feature_idxes\n if self.categorical_feature_idxes:\n self.categorical_features = [self.feature_names[i] for i in\n self.categorical_feature_idxes]\n self.numerical_features = [f for f in self.feature_names if\n f not in self.categorical_features]\n self.numerical_feature_idxes = [idx for idx in range(self.num_features) if\n idx not in self.categorical_feature_idxes]\n else:\n self.categorical_features = []\n self.numerical_features = self.feature_names\n self.numerical_feature_idxes = list(range(self.num_features))\n\n # Some book-keeping: keep track of the original indices of each feature\n self.dict_num_feature_to_idx = {feature: idx for (idx, feature) in\n enumerate(self.numerical_features)}\n self.dict_feature_to_idx = {feature: idx for (idx, feature) in\n enumerate(self.feature_names)}\n self.list_reorder = [self.dict_feature_to_idx[feature] for feature in\n self.numerical_features + self.categorical_features]\n\n # Get training data statistics\n # Numerical feature statistics\n if self.numerical_features:\n training_data_num = self.training_data[:, self.numerical_feature_idxes]\n self.sc = StandardScaler(with_mean=False)\n self.sc.fit(training_data_num)\n self.percentiles = dict_disc_to_bin[discretizer]\n self.all_bins_num = np.percentile(training_data_num, self.percentiles, axis=0).T\n\n # Categorical feature statistics\n if self.categorical_features:\n training_data_cat = self.training_data[:, self.categorical_feature_idxes]\n training_data_cat = training_data_cat.astype(int)\n \n self.dict_categorical_hist = {\n feature: np.bincount(training_data_cat[:, idx]) / self.training_data.shape[0] for\n (idx, feature) in enumerate(self.categorical_features)\n }\n\n # Another mapping from feature to type\n self.dict_feature_to_type = {\n feature: 'categorical' if feature in self.categorical_features else 'numerical' for\n feature in self.feature_names}\n\n @abstractmethod\n def explain_instance(self, **kwargs):\n raise NotImplementedError\n\n\n\nclass FastLimeTabularExplainer(BaseTabularExplainer):\n \"\"\"\n A basic tabular explainer\n \"\"\"\n def explain_instance(self, data_row, predict_fn, label=0, num_samples=5000, num_features=10,\n kernel_width=None, **kwargs):\n \"\"\"\n Explain a prediction on a given instance\n\n Args:\n data_row (np.ndarray): Data instance to explain\n predict_fn (func): A function which provides predictions from the target model\n label (int): The class to explain\n num_samples (int): Number of synthetic samples to generate\n num_features (int): Number of top features to return\n kernel_width (Optional[float]): Width of the Gaussian kernel when weighting synthetic samples\n\n Returns:\n (list) Tuples of feature and score, sorted by the score\n \"\"\"\n # Scale the data\n data_row = data_row.reshape((1, -1))\n\n # Split data into numerical and categorical data and process\n list_orig = []\n list_disc = []\n if self.numerical_features:\n data_num = data_row[:, self.numerical_feature_idxes]\n data_num = self.sc.transform(data_num)\n data_synthetic_num = np.tile(data_num, (num_samples, 1))\n # Add noise\n data_synthetic_num = data_synthetic_num + np.random.normal(\n size=(num_samples, data_num.shape[1]))\n data_synthetic_num[0] = data_num.ravel()\n # Convert back to original domain\n data_synthetic_num_original = self.sc.inverse_transform(data_synthetic_num)\n # Discretize\n data_synthetic_num_disc, _ = discretize(data_synthetic_num_original, self.percentiles,\n self.all_bins_num)\n list_disc.append(data_synthetic_num_disc)\n list_orig.append(data_synthetic_num_original)\n\n if self.categorical_features:\n # Sample from training distribution for each categorical feature\n data_cat = data_row[:, self.categorical_feature_idxes]\n list_buf = []\n for feature in self.categorical_features:\n list_buf.append(np.random.choice(a=len(self.dict_categorical_hist[feature]),\n size=(1, num_samples),\n p=self.dict_categorical_hist[feature]))\n data_cat_original = data_cat_disc = np.concatenate(list_buf).T\n data_cat_original[0] = data_cat.ravel()\n data_cat_disc[0] = data_cat.ravel()\n list_disc.append(data_cat_disc)\n list_orig.append(data_cat_original)\n\n # Concatenate the data and reorder the columns\n data_synthetic_original = np.concatenate(list_orig, axis=1)\n data_synthetic_disc = np.concatenate(list_disc, axis=1)\n data_synthetic_original = data_synthetic_original[:, self.list_reorder]\n data_synthetic_disc = data_synthetic_disc[:, self.list_reorder]\n\n # Get model predictions (i.e. groundtruth)\n model_pred = predict_fn(data_synthetic_original)\n \n # For classification tasks.\n if np.ndim(model_pred)==2:\n model_pred=model_pred[:,label]\n\n\n # Get distances between original sample and neighbors\n if self.numerical_features:\n distances = cdist(data_synthetic_num[:1], data_synthetic_num).reshape(-1, 1)\n else:\n distances = cdist(data_synthetic_disc[:1], data_synthetic_disc).reshape(-1, 1)\n\n # Weight distances according to some kernel (e.g. Gaussian)\n if kernel_width is None:\n kernel_width = np.sqrt(data_row.shape[1]) * 0.75\n weights = kernel_fn(distances, kernel_width=kernel_width).ravel()\n\n # Turn discretized data into onehot\n data_synthetic_onehot = OneHotEncoder().fit_transform(data_synthetic_disc)\n\n # Solve\n tup = (data_synthetic_onehot, model_pred, weights)\n importances, bias = ridge_solve(tup)\n #print(importances.shape, bias.shape)\n \n #explanations = sorted(list(zip(self.feature_names, importances)),\n # key=lambda x: x[1], reverse=True)[:num_features]\n\n return importances, bias ", "repo_name": "monte-flora/scikit-explain", "sub_path": "skexplain/main/lime_fast.py", "file_name": "lime_fast.py", "file_ext": "py", "file_size_in_byte": 8674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "52", "api": [{"api_name": "abc.ABC", "line_number": 16, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 71, "usage_type": "call"}, {"api_name": "common.utils.dict_disc_to_bin", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 82, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.tile", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "common.utils.discretize", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 163, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 175, "usage_type": "call"}, {"api_name": "common.utils.kernel_fn", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 179, "usage_type": "call"}, {"api_name": "common.utils.ridge_solve", "line_number": 183, "usage_type": "call"}]} +{"seq_id": "74825380964", "text": "\"\"\"\nJuly Zhou\nSection AF\nCSE 163\nFinal Project\nThis file includes functions to clean and organize the data\nand plot graphs for research problem 1 and 2.\n\"\"\"\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\n\n\ndef dataclean(file):\n \"\"\"\n It cleans the txt data file which is in json format and return\n a dirable dataframe without data that is not needed or missing.\n It also select the appropriate data to present the result of\n research problem 1.\n \"\"\"\n with open(file) as json_file:\n data = json.load(json_file)\n df = pd.DataFrame.from_records(data)\n df1 = df.filter(items=['name', 'geography', 'data'])\n df2 = df1.dropna()\n df3 = df2[df2['name'] ==\n 'Transportation carbon dioxide emissions, '\n 'all fuels, Washington']\n df4 = df2[df2['name'] ==\n 'Industrial carbon dioxide emissions, '\n 'all fuels, Washington']\n data3 = df3['data']\n data4 = df4['data']\n wa3 = data3.to_numpy()[0]\n wa4 = data4.to_numpy()[0]\n year = []\n ems = []\n ems1 = []\n for i in wa3:\n year.append(i[0])\n ems.append(i[1])\n for i in wa4:\n ems1.append(i[1])\n tra = {'year': year, 'tra_emission': ems, 'ind_emission': ems1}\n dfwa = pd.DataFrame(tra, columns=['year', 'tra_emission',\n 'ind_emission'])\n dfwa = dfwa.sort_values(by=['year'], ascending=True)\n return dfwa\n\n\ndef prob1(dfwa):\n \"\"\"\n It takes a dataframe from the dataclean function that was well\n constructed for this function and plots a 1 by 2 line graph to\n present the comparison of transportation and industry CO2\n emission change over 40 years in WA.\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(10, 8))\n fig.suptitle('Transportation and industry CO2 emission over '\n '40 years in WA state')\n ax1.plot(dfwa['year'], dfwa['tra_emission'])\n ax1.set_title('Transportation CO2 emission')\n ax1.set_ylabel('CO2 emission, metric tons')\n ax1.grid(True)\n ax2.plot(dfwa['year'], dfwa['ind_emission'])\n ax2.set_title('Industry CO2 emission')\n ax2.set_ylabel('CO2 emission, metric tons')\n ax2.tick_params(axis='x', labelrotation=45)\n ax2.grid(True)\n plt.savefig('WA_trans_ind.png')\n\n\ndef prob2(file):\n \"\"\"\n It takes a the txt data file which is in json format and cleans\n up the data followed by plotting a 1 by 2 pie chart to show the\n CO2 emission from different fuels in 2007 and 2017 in the US.It\n also print out the % CO2 emission from different feuls.\n \"\"\"\n with open(file) as json_file:\n data = json.load(json_file)\n # data type is a list of dictionaries, 5119 items, 15 keys per item\n df = pd.DataFrame.from_records(data)\n df1 = df.filter(items=['name', 'geography', 'data'])\n df2 = df1.dropna()\n df2 = df1[df1['name'] ==\n 'Total carbon dioxide emissions from all sectors, '\n 'coal, United States']\n df3 = df1[df1['name'] ==\n 'Total carbon dioxide emissions from all sectors, '\n 'natural gas, United States']\n df4 = df1[df1['name'] ==\n 'Total carbon dioxide emissions from all sectors, '\n 'petroleum, United States']\n coal = df2['data'].to_numpy()[0]\n ngas = df3['data'].to_numpy()[0]\n petro = df4['data'].to_numpy()[0]\n ems = []\n ems1 = []\n ems2 = []\n for i in coal:\n ems.append(i[1])\n for i in ngas:\n ems1.append(i[1])\n for i in petro:\n ems2.append(i[1])\n emssum17 = [ems[0], ems1[0], ems2[0]]\n emssum07 = [ems[10], ems1[10], ems2[10]]\n total17 = np.sum(emssum17)\n total07 = np.sum(emssum07)\n df5 = pd.DataFrame({'CO2 emission in 2017, metric tons': emssum17,\n 'CO2 emission in 2007, metric tons': emssum07},\n index=['coal', 'natural gas', 'petroleum'])\n df5.plot.pie(subplots=True, figsize=(16, 8))\n plt.title('CO2 emission from different fuels in 2017 and 2007 in the US')\n print('total CO2 emission in 2017: ', round(total17, 2), 'metric tons')\n print('coal 2017: ', round(ems[0]/total17*100, 2), '%')\n print('natural gas 2017: ', round(ems1[0]/total17*100, 2), '%')\n print('petroleum 2017: ', round(ems2[0]/total17*100, 2), '%')\n print('total CO2 emission in 2007: ', round(total07, 2), 'metric tons')\n print('coal 2007: ', round(ems[10]/total07*100, 2), '%')\n print('natural gas 2007: ', round(ems1[10]/total07*100, 2), '%')\n print('petroleum 2007: ', round(ems2[10]/total07*100, 2), '%')\n plt.savefig('US_fuels.png')\n\n\ndef main():\n file = 'EMISS.json'\n df = dataclean(file)\n prob1(df)\n prob2(file)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Julyzzzzzz/classprojectCO2", "sub_path": "classprojectCO2/project.py", "file_name": "project.py", "file_ext": "py", "file_size_in_byte": 4773, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "json.load", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "26935797092", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"KronoPI_cybergenik\",\n version=\"0.0.1\",\n author=\"Luciano Remes\",\n author_email=\"cybergenik@gmail.com\",\n description=\"Generates random 4 digit number using user time and date from PI digits\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Cybergenik/KronoPI\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)", "repo_name": "Cybergenik/KronoPI", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "4663737570", "text": "import lxml.html as lh\nfrom pathlib import Path\n\nfor f in Path(\"api-docs/docs-2.4/sphinx/html\").glob(\"**/*.html\"):\n print(f)\n doc = lh.parse(str(f))\n head = doc.find(\"head\")\n for l in head.findall(\"link\"):\n if \"jsdelivr\" not in l.get(\"href\", \"\"):\n continue\n l.set(\"href\", \"https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css\")\n l.set(\n \"integrity\",\n \"sha384-zB1R0rpPzHqg7Kpt0Aljp8JPLqbXI3bhnPWROx27a9N0Ll6ZP/+DiW/UqRcLbRjq\",\n )\n l.set(\"crossorigin\", \"anonymous\")\n\n for l in head.findall(\"script\"):\n if \"jsdelivr\" not in l.get(\"src\", \"\"):\n continue\n if \"katex.min.js\" in l.get(\"src\"):\n l.set(\"src\", \"https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.js\")\n l.set(\n \"integrity\",\n \"sha384-y23I5Q6l+B6vatafAwxRu/0oK/79VlbSz7Q9aiSZUvyWYIYsd+qj+o24G5ZU2zJz\",\n )\n elif \"auto-render.min.js\" in l.get(\"src\"):\n l.set(\n \"src\",\n \"https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/contrib/auto-render.min.js\",\n )\n l.set(\n \"integrity\",\n \"sha384-kWPLUVMOks5AQFrykwIup5lo0m3iMkkHrD0uJ4H5cjeGihAutqP0yW0J6dpFiVkI\",\n )\n\n l.set(\"defer\", None)\n l.set(\"crossorigin\", \"anonymous\")\n\n with open(f, \"w\") as file_obj:\n file_obj.write(lh.tostring(doc).decode(\"utf-8\"))\n", "repo_name": "Cantera/api-docs", "sub_path": "bump_katex.py", "file_name": "bump_katex.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pathlib.Path", "line_number": 4, "usage_type": "call"}, {"api_name": "lxml.html.parse", "line_number": 6, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 6, "usage_type": "name"}, {"api_name": "lxml.html.tostring", "line_number": 41, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "39177158566", "text": "from django.db import models\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel\n\nfrom wagtailcomments.models import COMMENT_MODEL_SETTING, BaseComment\n\n\nclass Comment(BaseComment):\n body = models.TextField()\n\n form_class = 'wagtailcomments.basic.forms.CommentForm'\n\n panels = BaseComment.panels + [\n FieldPanel('body'),\n ]\n\n class Meta:\n swappable = COMMENT_MODEL_SETTING\n", "repo_name": "neon-jungle/wagtailcomments", "sub_path": "wagtailcomments/basic/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "50", "api": [{"api_name": "wagtailcomments.models.BaseComment", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 8, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "wagtailcomments.models.BaseComment.panels", "line_number": 12, "usage_type": "attribute"}, {"api_name": "wagtailcomments.models.BaseComment", "line_number": 12, "usage_type": "name"}, {"api_name": "wagtail.wagtailadmin.edit_handlers.FieldPanel", "line_number": 13, "usage_type": "call"}, {"api_name": "wagtailcomments.models.COMMENT_MODEL_SETTING", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "39132873215", "text": "from torch.utils.data import Dataset\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nfrom typing import List\r\ndef get_all_file_in_dir(dir:str):\r\n import os\r\n return [os.path.join(dir,file) for file in os.listdir(dir)]\r\n\r\ndef get_rgb_mean_and_std(dataset:Dataset|List[Dataset]):\r\n if isinstance(dataset,Dataset):\r\n print(\"a dataset\")\r\n mean = [0, 0, 0]\r\n std = [0, 0, 0]\r\n num_imgs = len(dataset)\r\n for k in range(num_imgs):\r\n img, tar = dataset[k]\r\n for i in range(3):\r\n mean[i] += img[i, :, :].mean()\r\n std[i] += img[i, :, :].std()\r\n mean = np.array(mean) / num_imgs\r\n std = np.array(std) / num_imgs\r\n return mean, std\r\n else:\r\n print(\"a list of dataset\")\r\n mean = [0, 0, 0]\r\n std = [0, 0, 0]\r\n num_imgs = 0\r\n for dataset_ in dataset:\r\n mean_, std_ = get_rgb_mean_and_std(dataset_)\r\n mean = mean*num_imgs + mean_*len(dataset_)\r\n std = std*num_imgs + std_*len(dataset_)\r\n num_imgs += len(dataset_)\r\n return mean, std\r\n\r\n", "repo_name": "geraltigas/ML_code_and_utils", "sub_path": "myutils/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 1135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 10, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "23777145294", "text": "from django.conf.urls import url, include\n\n\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework.authtoken import views as drf_views\nfrom rest_framework.routers import DefaultRouter\n\nfrom .views import AbapUserViewSet, CategoryViewSet, ComponentViewSet, HostViewSet, LandscapeRoleViewSet, NoteViewSet, AbapUserList\n\n\n# Create a router and register our viewsets with it.\ncorerouter = DefaultRouter()\n\n\n\n\n# Register SAP Settings\nsaprouter = DefaultRouter()\nsaprouter.register(r'abapusers', AbapUserViewSet)\nsaprouter.register(r'categories', CategoryViewSet)\nsaprouter.register(r'components', ComponentViewSet)\nsaprouter.register(r'hosts', HostViewSet)\nsaprouter.register(r'landscaperoles', LandscapeRoleViewSet)\nsaprouter.register(r'notes', NoteViewSet)\n\n\nurlpatterns = [\n\n url(r'^access_token/', drf_views.obtain_auth_token),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),\n url(r'^', include(corerouter.urls)),\n url(r'^sap/', include(saprouter.urls)),\n\n\n]\n", "repo_name": "rafaelbasile/bratzsoft.com", "sub_path": "bratzsoft/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 18, "usage_type": "call"}, {"api_name": "views.AbapUserViewSet", "line_number": 19, "usage_type": "argument"}, {"api_name": "views.CategoryViewSet", "line_number": 20, "usage_type": "argument"}, {"api_name": "views.ComponentViewSet", "line_number": 21, "usage_type": "argument"}, {"api_name": "views.HostViewSet", "line_number": 22, "usage_type": "argument"}, {"api_name": "views.LandscapeRoleViewSet", "line_number": 23, "usage_type": "argument"}, {"api_name": "views.NoteViewSet", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "rest_framework.authtoken.views.obtain_auth_token", "line_number": 29, "usage_type": "attribute"}, {"api_name": "rest_framework.authtoken.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "14548158793", "text": "import cv2\nimport helpers # helper functions\n\nimport random\nimport matplotlib \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n#%matplotlib inline\n\n# Image data directories\nIMAGE_DIR_TRAINING = \"traffic_light_images/training/\"\nIMAGE_DIR_TEST = \"traffic_light_images/test/\"\n\nIMAGE_LIST = helpers.load_dataset(IMAGE_DIR_TRAINING)\n\n## TODO: Write code to display an image in IMAGE_LIST (try finding a yellow traffic light!)\n## TODO: Print out 1. The shape of the image and 2. The image's label\n\n# ------------------- Global Definitions -------------------\n\n# Definition of the 3 possible traffic light states and theirs label\ntl_states = ['red', 'yellow', 'green']\ntl_state_red = 0\ntl_state_yellow = 1\ntl_state_green = 2\ntl_state_count = 3\ntl_state_red_string = tl_states[tl_state_red]\ntl_state_yellow_string = tl_states[tl_state_yellow]\ntl_state_green_string = tl_states[tl_state_green]\n\n# Index of image and label in image set\nimage_data_image_index = 0\nimage_data_label_index = 1\n\n# Normalized image size\ndefault_image_size = 32\n\n# ---------------- End of Global Definitions ---------------\n\nfig = plt.figure(figsize=(20,40))\n\nexample_count = 24\nif example_count>len(IMAGE_LIST):\n example_count = len(IMAGE_LIST)\n \nchosen = set()\n\n# print 24 random examples, prevent double choice\nfor example_index in range(example_count):\n tries = 0\n \n while tries<2:\n index = 0\n tries += 1\n if example_index==0: # first choice should be a yellow light\n for iterator in range(len(IMAGE_LIST)):\n if IMAGE_LIST[iterator][image_data_label_index]==tl_state_yellow_string:\n index = iterator\n break\n else: # all other choices are random\n index = random.randint(0, len(IMAGE_LIST)-1)\n \n if index in chosen: # try a second time if chosen already\n continue\n chosen.add(index)\n \n example_image = IMAGE_LIST[index][image_data_image_index]\n result = \"{}, shape: {}\".format(IMAGE_LIST[index][image_data_label_index],example_image.shape)\n ax = fig.add_subplot(example_count, 4, example_index+1, title=result)\n ax.imshow(example_image.squeeze())\n \nfig.tight_layout(pad=0.7)\n\ndef standardize(image_list):\n \n # Empty image data array\n standard_list = []\n\n # Iterate through all the image-label pairs\n for item in image_list:\n image = item[0]\n label = item[1]\n\n # Standardize the image\n standardized_im = standardize_input(image)\n\n # One-hot encode the label\n one_hot_label = one_hot_encode(label) \n\n # Append the image, and it's one hot encoded label to the full, processed list of image data \n standard_list.append((standardized_im, one_hot_label))\n \n return standard_list\n\n# Standardize all training images\nSTANDARDIZED_LIST = standardize(IMAGE_LIST)\n\n# TODO: Display a standardized image and its label\n\nfig = plt.figure(figsize=(20,40))\n\n# 12 example pairs\nexample_count = 12\nif example_count>len(IMAGE_LIST):\n example_count = len(IMAGE_LIST)\ntotal_count = example_count*2\n\nchosen = set() # use set to prevent double random selection\n\nfor example_index in range(example_count):\n\n tries = 0\n index = 0\n \n # select next image\n while tries<2:\n tries += 1\n index = random.randint(0, len(IMAGE_LIST)-1)\n \n if index in chosen:\n continue\n chosen.add(index)\n \n eff_index = example_index*2\n \n # print original\n example_image = IMAGE_LIST[index][image_data_image_index]\n result = \"{} {}\".format(IMAGE_LIST[index][image_data_label_index],example_image.shape)\n ax = fig.add_subplot(total_count, 4, eff_index+1, title=result)\n ax.imshow(example_image.squeeze())\n \n # print standardized counterpiece\n eff_index += 1\n example_image = STANDARDIZED_LIST[index][image_data_image_index]\n result = \"{} {}\".format(STANDARDIZED_LIST[index][image_data_label_index],example_image.shape)\n ax = fig.add_subplot(total_count, 4, eff_index+1, title=result)\n ax.imshow(example_image.squeeze())\n\nfig.tight_layout(pad=0.7)", "repo_name": "xdoestech/PythonTutorials", "sub_path": "TrafficLightClassifier_demo/Load_Visualize_data.py", "file_name": "Load_Visualize_data.py", "file_ext": "py", "file_size_in_byte": 4167, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "helpers.load_dataset", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "14594163203", "text": "import sys\n\nfrom prettytable import PrettyTable\n\n\nclass NodeLim:\n def __init__(self, node_lim_entry):\n self.index = node_lim_entry[0]\n self.active = node_lim_entry[1].split(':')[2]\n self.standby = node_lim_entry[2].split(':')[2]\n self.primary = node_lim_entry[3].split(':')[2]\n self.secondary = node_lim_entry[4].split(':')[2]\n self.state = node_lim_entry[9]\n\n\nclass TsmTsft:\n def __init__(self, tsm_tsft_entry):\n self.tsid = tsm_tsft_entry[0]\n self.valid_prim = tsm_tsft_entry[1]\n self.valid_sec = tsm_tsft_entry[2]\n self.papid_prim = tsm_tsft_entry[3]\n self.papid_sec = tsm_tsft_entry[4]\n self.board_prim = (int(self.papid_prim.split('/')[0]) + 1) if (\n self.valid_prim == '1') else 0\n self.board_sec = (int(self.papid_sec.split('/')[0]) + 1) if (\n self.valid_sec == '1') else 0\n\n\nclass MergeNodeLim:\n def __init__(self, active, standby, primary, secondary, state,\n index_range=''):\n self.active = active\n self.standby = standby\n self.primary = primary\n self.secondary = secondary\n self.state = state\n self.index_range = index_range\n\n\nclass MergeTsmTsft:\n def __init__(self, valid_prim, valid_sec, board_prim, board_sec,\n tsid_range=''):\n self.valid_prim = valid_prim\n self.valid_sec = valid_sec\n self.board_prim = board_prim\n self.board_sec = board_sec\n self.tsid_range = tsid_range\n\n\ndef build_node_lim_list(filename):\n with open(filename) as file_object:\n\n node_lim_list = []\n\n while True:\n line = file_object.readline()\n if line.find('UP Index') != -1:\n break\n\n while True:\n line = file_object.readline()\n node_lim_entry = line.split()\n if len(node_lim_entry) > 0:\n node_lim = NodeLim(node_lim_entry)\n node_lim_list.append(node_lim)\n if int(node_lim_entry[0]) == 2047:\n break\n\n return node_lim_list\n\n\ndef build_tsm_tsft_list(filename):\n with open(filename) as file_object:\n\n tsm_tsft_list = []\n\n while True:\n line = file_object.readline()\n if line.find('TSID Valid Primary') != -1:\n break\n\n while True:\n line = file_object.readline()\n tsm_tsft_entry_prim = line.split()\n if len(tsm_tsft_entry_prim) > 0:\n line = file_object.readline()\n tsm_tsft_entry_sec = line.split()\n tsm_tsft_entry = [tsm_tsft_entry_prim[0],\n tsm_tsft_entry_prim[1],\n tsm_tsft_entry_sec[1],\n tsm_tsft_entry_prim[5],\n tsm_tsft_entry_sec[5]]\n tsm_tsft = TsmTsft(tsm_tsft_entry)\n tsm_tsft_list.append(tsm_tsft)\n if int(tsm_tsft_entry_prim[0]) == 2047:\n break\n\n return tsm_tsft_list\n\n\ndef analyze_node_lim_list(node_lim_list):\n merge_node_lim_list = []\n\n active = ''\n standby = ''\n primary = ''\n secondary = ''\n state = ''\n index = ''\n\n for node_lim_entry in node_lim_list:\n if (node_lim_entry.active != active) or (\n node_lim_entry.standby != standby) or (\n node_lim_entry.primary != primary) or (\n node_lim_entry.secondary != secondary) or (\n node_lim_entry.state != state):\n active = node_lim_entry.active\n standby = node_lim_entry.standby\n primary = node_lim_entry.primary\n secondary = node_lim_entry.secondary\n state = node_lim_entry.state\n index = node_lim_entry.index\n merge_node_lim = MergeNodeLim(active, standby, primary,\n secondary, state)\n\n merge_node_lim_list.append(merge_node_lim)\n else:\n merge_node_lim_list[-1].index_range = index + ' - ' + \\\n node_lim_entry.index\n\n format_node_lim_list(merge_node_lim_list)\n\n\ndef analyze_tsm_tsft_list(tsm_tsft_list):\n merge_tsm_tsft_list = []\n\n valid_prim = ''\n valid_sec = ''\n board_prim = ''\n board_sec = ''\n tsid = ''\n\n for tsm_tsft_entry in tsm_tsft_list:\n if (tsm_tsft_entry.valid_prim != valid_prim) or (\n tsm_tsft_entry.valid_sec != valid_sec) or (\n tsm_tsft_entry.board_prim != board_prim) or (\n tsm_tsft_entry.board_sec != board_sec):\n valid_prim = tsm_tsft_entry.valid_prim\n valid_sec = tsm_tsft_entry.valid_sec\n board_prim = tsm_tsft_entry.board_prim\n board_sec = tsm_tsft_entry.board_sec\n tsid = tsm_tsft_entry.tsid\n merge_tsm_tsft = MergeTsmTsft(valid_prim, valid_sec, board_prim,\n board_sec)\n\n merge_tsm_tsft_list.append(merge_tsm_tsft)\n else:\n merge_tsm_tsft_list[-1].tsid_range = tsid + ' - ' + \\\n tsm_tsft_entry.tsid\n\n format_tsm_tsft__list(merge_tsm_tsft_list)\n\n\ndef format_node_lim_list(merge_node_lim_list):\n with open('node_lim_and_tsm_tsft_analyze_result.txt', 'a') as file_object:\n\n headers = ['Index Range', 'Primary', 'Secondary', 'Active', 'Standby',\n 'State']\n table = PrettyTable(headers)\n for merge_node_lim in merge_node_lim_list:\n row = [merge_node_lim.index_range, merge_node_lim.primary,\n merge_node_lim.secondary, merge_node_lim.active,\n merge_node_lim.standby, merge_node_lim.state]\n table.add_row(row)\n\n file_object.write(str(table) + '\\n')\n\n\ndef format_tsm_tsft__list(merge_tsm_tsft_list):\n with open('node_lim_and_tsm_tsft_analyze_result.txt', 'a') as file_object:\n\n headers = ['Tsid Range', 'Primary', 'Secondary', 'Primary Valid',\n 'Secondary Valid']\n table = PrettyTable(headers)\n for merge_tsm_tsft in merge_tsm_tsft_list:\n row = [merge_tsm_tsft.tsid_range, merge_tsm_tsft.board_prim,\n merge_tsm_tsft.board_sec, merge_tsm_tsft.valid_prim,\n merge_tsm_tsft.valid_sec]\n table.add_row(row)\n\n file_object.write(str(table) + '\\n')\n\n\nif len(sys.argv) > 2:\n node_lim_master_table_filename = sys.argv[1]\n card_fabl_tsm_tsft_table_filename = sys.argv[2]\n\n node_lim_list_built = build_node_lim_list(node_lim_master_table_filename)\n tsm_tsft_list_built = build_tsm_tsft_list(\n card_fabl_tsm_tsft_table_filename)\n\n analyze_node_lim_list(node_lim_list_built)\n analyze_tsm_tsft_list(tsm_tsft_list_built)\n", "repo_name": "crystalDf/EPGAnalyze", "sub_path": "node_lim_and_tsm_tsft_analyze/node_lim_and_tsm_tsft_analyze.py", "file_name": "node_lim_and_tsm_tsft_analyze.py", "file_ext": "py", "file_size_in_byte": 6915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "prettytable.PrettyTable", "line_number": 169, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 184, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 194, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 195, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 196, "usage_type": "attribute"}]} +{"seq_id": "28075391376", "text": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom .forms import IngredientsForm, RecipesForm, WeekdayForm\nfrom .models import WeekDays\n\n\ndef home(request):\n context = {\n 'weekdays': WeekDays.objects.all(),\n }\n\n return render(request, 'madplan/home.html', context)\n\n\ndef add_weekday(request):\n if request.method == 'POST':\n form = WeekdayForm(request.POST)\n\n if form.is_valid():\n form.clean()\n form.save()\n return HttpResponseRedirect('/')\n else:\n form = WeekdayForm()\n\n return render(request, 'madplan/add_weekday.html', {'form': form})\n\n\ndef add_ingredients(request):\n if request.method == 'POST':\n form = IngredientsForm(request.POST)\n\n if form.is_valid():\n form.clean()\n form.save()\n return HttpResponseRedirect('/')\n else:\n form = IngredientsForm()\n\n return render(request, 'madplan/add_ingredients.html', {'form': form})\n\n\ndef add_recipes(request):\n if request.method == 'POST':\n form = RecipesForm(request.POST)\n\n if form.is_valid():\n form.clean()\n form.save()\n return HttpResponseRedirect('/')\n else:\n form = RecipesForm()\n\n return render(request, 'madplan/add_recipes.html', {'form': form})\n", "repo_name": "razzervision/Mealplan", "sub_path": "madplan/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "models.WeekDays.objects.all", "line_number": 10, "usage_type": "call"}, {"api_name": "models.WeekDays.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.WeekDays", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "forms.WeekdayForm", "line_number": 18, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 23, "usage_type": "call"}, {"api_name": "forms.WeekdayForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "forms.IngredientsForm", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 37, "usage_type": "call"}, {"api_name": "forms.IngredientsForm", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 41, "usage_type": "call"}, {"api_name": "forms.RecipesForm", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 51, "usage_type": "call"}, {"api_name": "forms.RecipesForm", "line_number": 53, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "37147572518", "text": "\"\"\"\r\nThis is the main monitoring program\r\ncron: 1 1 1 1 1\r\nnew Env('tg监控程序')\r\n\"\"\"\r\n\r\nimport datetime\r\nimport re\r\nfrom telethon import events, TelegramClient\r\nimport asy\r\nfrom ql_invoke import Invoke\r\n\r\napi_id = asy.read_config(\"api_id\")\r\napi_hash = asy.read_config(\"api_hash\")\r\nclient = TelegramClient(\"session_name\", api_id, api_hash)\r\n\r\n\r\n# 获取消息\r\n@client.on(events.NewMessage(chats=asy.read_config(\"sur_channel_id\")))\r\nasync def get_the_message(event):\r\n val = event.message.message\r\n await judgment_variables(val)\r\n # 采用监听方式采集消息\r\n\r\n\r\nasync def judgment_variables(val):\r\n part_text = re.compile(r'export (.*?)=')\r\n value_list = part_text.findall(val)\r\n rule = re.compile(r'export (.*\")')\r\n variables_list = rule.findall(val)\r\n script_file = await condition(value_list, variables_list)\r\n if script_file:\r\n task_info = await invoke.get_task_info(script_file)\r\n if await invoke.task_status(task_info['id'], value_list, variables_list):\r\n await send_message(\"开始运行任务,没有任务结束提示,后期更新加上\")\r\n else:\r\n print(\"没有发现变量可调用的脚本,可能是脚本库没有标注出来,自行手动添加即可,功能待完善\")\r\n\r\n\r\nasync def send_message(message):\r\n await client.send_message(asy.read_config(\"log_channel_id\"), message)\r\n\r\n\r\nasync def condition(cod1, cod2):\r\n if cod1 and cod2 is not None:\r\n script_file = await asy.read_varname(cod1[0])\r\n if script_file:\r\n return script_file\r\n # if await invoke.save_config(variables=cod1, value=cod2):\r\n # await send_message(f\"已将 {cod2} 写入配置文件中\")\r\n # return script_file\r\n # else:\r\n # await send_message(f\"{cod2} 配置文件更新失败\")\r\n # return None\r\n else:\r\n await send_message(f\"未匹配到 {cod1} 可能是脚本库没有标注出来,自行手动添加即可\")\r\n return None\r\n\r\n\r\nif __name__ == '__main__':\r\n # 获取当前时间\r\n now = datetime.datetime.now()\r\n print(\"------开始运行!-------\")\r\n now_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(f\"运行时间于:{now_time}\")\r\n invoke = Invoke()\r\n with client:\r\n client.loop.run_until_complete(send_message(f\"hello!您在{now_time}上线啦\"))\r\n client.run_until_disconnected()\r\n", "repo_name": "wangquanfugui233/JDVariableMonitoring", "sub_path": "surveillance/surveillance_tg.py", "file_name": "surveillance_tg.py", "file_ext": "py", "file_size_in_byte": 2439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "asy.read_config", "line_number": 13, "usage_type": "call"}, {"api_name": "asy.read_config", "line_number": 14, "usage_type": "call"}, {"api_name": "telethon.TelegramClient", "line_number": 15, "usage_type": "call"}, {"api_name": "telethon.events.NewMessage", "line_number": 19, "usage_type": "call"}, {"api_name": "telethon.events", "line_number": 19, "usage_type": "name"}, {"api_name": "asy.read_config", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 27, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 29, "usage_type": "call"}, {"api_name": "asy.read_config", "line_number": 41, "usage_type": "call"}, {"api_name": "asy.read_varname", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ql_invoke.Invoke", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "12178193388", "text": "import unreal\nimport importlib, pathlib, os, signal, venv, sys\nimport dependency_manager\n#import model_asset_tools\n\n\n# Get all python files in a folder\ndef get_py_files(src, ends_with):\n #cwd = os.getcwd() # Current Working directory\n py_files = [] \n for root, dirs, files in os.walk(src):\n for file in files:\n if file.endswith(ends_with):\n py_files.append(os.path.join(root, file))\n print(os.path.join(root, file))\n return py_files\n\n# Dynamically import a list of python files\ndef dynamic_import(module_name, py_path):\n module_spec = importlib.util.spec_from_file_location(module_name, py_path)\n module = importlib.util.module_from_spec(module_spec)\n module_spec.loader.exec_module(module)\n return module\n\n# Recursively load all python files in a directory\ndef dynamic_import_from_src(src, ends_with, star_import = False):\n my_py_files = get_py_files(src, ends_with)\n for py_file in my_py_files:\n module_name = os.path.split(py_file)[-1].strip(\".py\")\n imported_module = dynamic_import(module_name, py_file)\n if star_import:\n for obj in dir(imported_module):\n globals()[obj] = imported_module.__dict__[obj]\n else:\n globals()[module_name] = imported_module\n return\n\n# Load all manifest files that should be located next to each bridge file\ndef load_manifests(manifest_dir):\n # Get manifest objects\n manifests = {}\n manifest_files = get_py_files(manifest_dir, \"_dependencies.py\")\n for manifest in manifest_files:\n manifest_module_name = os.path.split(manifest)[-1][:-len(\"_dependencies.py\")]\n imported_module = dynamic_import(manifest_module_name, manifest)\n\n print(manifest_module_name)\n manifest = unreal.DependencyManifest()\n manifest_entries = imported_module.GetDependencies()\n manifest.set_editor_property(\"ManifestEntries\", manifest_entries)\n manifests[manifest_module_name] = manifest\n\n return manifests\n\n\n# Replace print() command to fix Unreal flagging every Python print call as an error (doesn't work)\nprint = unreal.log\n\n# Redirect missing SIGKILL signal on windows to SIGTERM\nsignal.SIGKILL = signal.SIGTERM\n\n# Get plugin startup options\ndependency_options = unreal.StableDiffusionBlueprintLibrary.get_dependency_options()\nplugin_options = unreal.StableDiffusionBlueprintLibrary.get_plugin_options()\n\n# Set up virtual environment\nlegacy_site_packages = pathlib.Path(unreal.Paths().engine_saved_dir()) / \"StableDiffusionToolsPyEnv\"\ndefault_site_packages = pathlib.Path(__file__).parent.parent.parent / \"FrozenPythonDependencies\"\nfrozen_python_deps_available = True if os.path.exists(default_site_packages) else False\n\nif not plugin_options.get_freeze_dependencies() and not os.path.exists(default_site_packages):\n default_site_packages = plugin_options.get_python_site_packages_override_path().path\n\nenv_dir = plugin_options.get_python_site_packages_override_path().path if plugin_options.get_use_override_python_site_packages_path() else default_site_packages\nenv_site_packages = pathlib.Path(env_dir) / \"Lib\" / \"site-packages\"\nprint(f\"Dependency installation dir: {env_site_packages}\")\n\n# Setup a new virtual environment to contain our downloaded python dependencies\nif not os.path.exists(env_site_packages):\n os.makedirs(env_site_packages)\nsys.path.append(str(env_site_packages))\n\n# Load dependency manager\ndep_manager = dependency_manager.PyDependencyManager()\ndep_manager.set_editor_property(\"PluginSitePackages\", str(env_site_packages))\ndep_manager.set_editor_property(\"FrozenDependenciesAvailable\", frozen_python_deps_available)\nsubsystem = unreal.get_editor_subsystem(unreal.StableDiffusionSubsystem)\nsubsystem.set_editor_property(\"DependencyManager\", dep_manager)\n\n# Nuke dependencies before loading them if we're trying to reset the editor dependencies\nreset_deps = dependency_options.get_editor_property(\"ClearDependenciesOnEditorRestart\")\nreset_system_deps = dependency_options.get_editor_property(\"ClearSystemDependenciesOnEditorRestart\")\n\nif reset_deps or reset_system_deps:\n print(f\"Clearing python dependendencies\")\n dep_manager.clear_all_dependencies(env_dir, reset_system_deps)\n\n # Clear out the old engine-level site-packages directory in case it doesn't match the new-style\n if os.path.exists(legacy_site_packages):\n print(f\"Removing old legacy site-packages from {legacy_site_packages}\")\n dep_manager.clear_all_dependencies(legacy_site_packages, False)\n \n # Flag dependencies as cleared so we don't keep clearing them every restart\n dep_manager.finished_clearing_dependencies()\n\n# Location of plugin bridge files\nbridge_dir = os.path.join(pathlib.Path(__file__).parent.resolve(), \"bridges\")\n\n# Loads a map of all bridge names and dependencies that need to be installed to import/run the bridge\ndep_manager.set_editor_property(\"DependencyManifests\", load_manifests(bridge_dir))\n\n# Import all bridges so we can pick which derived class we want to use in the plugin editor settings\nif dependency_options.get_editor_property(\"AutoLoadBridgeScripts\"):\n dynamic_import_from_src(os.path.join(pathlib.Path(__file__).parent.resolve(), \"bridges\"), \"Bridge.py\")\n\n# Let Unreal know we've finished loading our init script\nsubsystem.set_editor_property(\"PythonLoaded\", True)\nsubsystem.on_python_loaded_ex.broadcast()\n", "repo_name": "Mystfit/Unreal-StableDiffusionTools", "sub_path": "StableDiffusionTools/Content/Python/init_unreal.py", "file_name": "init_unreal.py", "file_ext": "py", "file_size_in_byte": 5389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 157, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.walk", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "importlib.util.spec_from_file_location", "line_number": 20, "usage_type": "call"}, {"api_name": "importlib.util", "line_number": 20, "usage_type": "attribute"}, {"api_name": "importlib.util.module_from_spec", "line_number": 21, "usage_type": "call"}, {"api_name": "importlib.util", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "unreal.DependencyManifest", "line_number": 48, "usage_type": "call"}, {"api_name": "unreal.log", "line_number": 57, "usage_type": "attribute"}, {"api_name": "signal.SIGKILL", "line_number": 60, "usage_type": "attribute"}, {"api_name": "signal.SIGTERM", "line_number": 60, "usage_type": "attribute"}, {"api_name": "unreal.StableDiffusionBlueprintLibrary.get_dependency_options", "line_number": 63, "usage_type": "call"}, {"api_name": "unreal.StableDiffusionBlueprintLibrary", "line_number": 63, "usage_type": "attribute"}, {"api_name": "unreal.StableDiffusionBlueprintLibrary.get_plugin_options", "line_number": 64, "usage_type": "call"}, {"api_name": "unreal.StableDiffusionBlueprintLibrary", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 67, "usage_type": "call"}, {"api_name": "unreal.Paths", "line_number": 67, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 80, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 81, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "dependency_manager.PyDependencyManager", "line_number": 84, "usage_type": "call"}, {"api_name": "unreal.get_editor_subsystem", "line_number": 87, "usage_type": "call"}, {"api_name": "unreal.StableDiffusionSubsystem", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "31227732364", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport colorfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='subcategory',\n name='color',\n field=colorfield.fields.ColorField(null=True, verbose_name=b'Subcategory Color', max_length=10, blank=True),\n preserve_default=True,\n ),\n ]\n", "repo_name": "SparmedDev/sparmed01", "sub_path": "shop/migrations/0002_subcategory_color.py", "file_name": "0002_subcategory_color.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "colorfield.fields.fields.ColorField", "line_number": 18, "usage_type": "call"}, {"api_name": "colorfield.fields.fields", "line_number": 18, "usage_type": "attribute"}, {"api_name": "colorfield.fields", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "37191839069", "text": "from asciimatics.widgets import Frame, ListBox, Layout, Divider, Text, \\\n Button, TextBox, Widget\nfrom asciimatics.scene import Scene\nfrom asciimatics.screen import Screen\nfrom asciimatics.exceptions import ResizeScreenError, NextScene, StopApplication\nimport sys\nimport os\nimport sqlite3\n\n\n# noinspection SqlResolve\nclass Book(object):\n def __init__(self):\n\n # Create /db folder if not exist.\n if not os.path.exists('db'):\n os.makedirs('db')\n\n # Create a database in /db folder\n self._db = sqlite3.connect(\"./db/library.db\")\n self._db.row_factory = sqlite3.Row\n\n # Create table if not exist.\n self._db.cursor().execute('''\n CREATE TABLE IF NOT EXISTS books(\n id INTEGER PRIMARY KEY,\n title TEXT,\n author TEXT,\n genre TEXT,\n year TEXT,\n publisher TEXT)\n ''')\n self._db.commit()\n\n # Current book id.\n self.current_id = None\n\n def add(self, book):\n self._db.cursor().execute('''\n INSERT INTO books(title, author, genre, year, publisher)\n VALUES(:title, :author, :genre, :year, :publisher)''',\n book)\n self._db.commit()\n\n def get_summary(self):\n return self._db.cursor().execute(\n \"SELECT title, id from books\").fetchall()\n\n def get_book(self, book_id):\n return self._db.cursor().execute(\n \"SELECT * from books WHERE id=:id\", {\"id\": book_id}).fetchone()\n\n def get_current_book(self):\n if self.current_id is None:\n return {\"title\": \"\", \"author\": \"\", \"genre\": \"\", \"year\": \"\", \"publisher\": \"\"}\n else:\n return self.get_book(self.current_id)\n\n def update_current_book(self, details):\n if self.current_id is None:\n self.add(details)\n else:\n self._db.cursor().execute('''\n UPDATE books SET title=:title, author=:author, genre=:genre,\n year=:year, publisher=:publisher WHERE id=:id''', details\n )\n self._db.commit()\n\n def delete_book(self, book_id):\n self._db.cursor().execute('''\n DELETE FROM books WHERE id=:id''', {\"id\": book_id})\n self._db.commit()\n\n\nclass ListView(Frame):\n def __init__(self, screen, model):\n super(ListView, self).__init__(screen,\n screen.height,\n screen.width,\n on_load=self._reload_list,\n hover_focus=True,\n can_scroll=False,\n title=\"Books List\")\n # Save off the model that accesses the books database.\n self._model = model\n\n # Create the form for displaying the list of books.\n self._list_view = ListBox(\n Widget.FILL_FRAME,\n model.get_summary(),\n name=\"books\",\n add_scroll_bar=True,\n on_change=self._on_pick,\n on_select=self._edit)\n self._edit_button = Button(\"Edit\", self._edit)\n self._delete_button = Button(\"Delete\", self._delete)\n layout = Layout([100], fill_frame=True)\n self.add_layout(layout)\n layout.add_widget(self._list_view)\n layout.add_widget(Divider())\n layout2 = Layout([1, 1, 1, 1])\n self.add_layout(layout2)\n layout2.add_widget(Button(\"Add\", self._add), 0)\n layout2.add_widget(self._edit_button, 1)\n layout2.add_widget(self._delete_button, 2)\n layout2.add_widget(Button(\"Quit\", self._quit), 3)\n self.fix()\n self._on_pick()\n\n def _on_pick(self):\n self._edit_button.disabled = self._list_view.value is None\n self._delete_button.disabled = self._list_view.value is None\n\n def _reload_list(self, new_value=None):\n self._list_view.options = self._model.get_summary()\n self._list_view.value = new_value\n\n def _add(self):\n self._model.current_id = None\n raise NextScene(\"Edit Book\")\n\n def _edit(self):\n self.save()\n self._model.current_id = self.data[\"books\"]\n raise NextScene(\"Edit Book\")\n\n def _delete(self):\n self.save()\n self._model.delete_book(self.data[\"books\"])\n self._reload_list()\n\n @staticmethod\n def _quit():\n raise StopApplication(\"User pressed quit\")\n\n\nclass BookView(Frame):\n def __init__(self, screen, model):\n super(BookView, self).__init__(screen,\n screen.height * 2 // 3,\n screen.width * 2 // 3,\n hover_focus=True,\n can_scroll=False,\n title=\"Book Details\",\n reduce_cpu=True)\n # Save off the model that accesses the books database.\n self._model = model\n\n # Create the form for displaying the list of books.\n layout = Layout([100], fill_frame=True)\n self.add_layout(layout)\n layout.add_widget(Text(\"Title :\", \"title\"))\n layout.add_widget(Text(\"Author :\", \"author\"))\n layout.add_widget(Text(\"Genre :\", \"genre\"))\n layout.add_widget(Text(\"Year :\", \"year\"))\n layout.add_widget(Text(\"Publisher:\", \"publisher\"))\n layout2 = Layout([1, 1, 1, 1])\n self.add_layout(layout2)\n layout2.add_widget(Button(\"OK\", self._ok), 0)\n layout2.add_widget(Button(\"Cancel\", self._cancel), 3)\n self.fix()\n\n def reset(self):\n # Do standard reset to clear out form, then populate with new data.\n super(BookView, self).reset()\n self.data = self._model.get_current_book()\n\n def _ok(self):\n self.save()\n self._model.update_current_book(self.data)\n raise NextScene(\"Main\")\n\n @staticmethod\n def _cancel():\n raise NextScene(\"Main\")\n\n\ndef demo(screen, scene):\n scenes = [\n Scene([ListView(screen, books)], -1, name=\"Main\"),\n Scene([BookView(screen, books)], -1, name=\"Edit Book\")\n ]\n\n screen.play(scenes, stop_on_resize=True, start_scene=scene, allow_int=True)\n\n\nbooks = Book()\nlast_scene = None\nwhile True:\n try:\n Screen.wrapper(demo, catch_interrupt=True, arguments=[last_scene])\n sys.exit(0)\n except ResizeScreenError as e:\n last_scene = e.scene\n", "repo_name": "alex329657/PythonCore", "sub_path": "LibraryList/librarylist.py", "file_name": "librarylist.py", "file_ext": "py", "file_size_in_byte": 6555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 20, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 21, "usage_type": "attribute"}, {"api_name": "asciimatics.widgets.Frame", "line_number": 75, "usage_type": "name"}, {"api_name": "asciimatics.widgets.ListBox", "line_number": 88, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Widget.FILL_FRAME", "line_number": 89, "usage_type": "attribute"}, {"api_name": "asciimatics.widgets.Widget", "line_number": 89, "usage_type": "name"}, {"api_name": "asciimatics.widgets.Button", "line_number": 95, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Button", "line_number": 96, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Layout", "line_number": 97, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Divider", "line_number": 100, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Layout", "line_number": 101, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Button", "line_number": 103, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Button", "line_number": 106, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.NextScene", "line_number": 120, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.NextScene", "line_number": 125, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.StopApplication", "line_number": 134, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Frame", "line_number": 137, "usage_type": "name"}, {"api_name": "asciimatics.widgets.Layout", "line_number": 150, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Text", "line_number": 152, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Text", "line_number": 153, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Text", "line_number": 154, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Text", "line_number": 155, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Text", "line_number": 156, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Layout", "line_number": 157, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Button", "line_number": 159, "usage_type": "call"}, {"api_name": "asciimatics.widgets.Button", "line_number": 160, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.NextScene", "line_number": 171, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.NextScene", "line_number": 175, "usage_type": "call"}, {"api_name": "asciimatics.scene.Scene", "line_number": 180, "usage_type": "call"}, {"api_name": "asciimatics.scene.Scene", "line_number": 181, "usage_type": "call"}, {"api_name": "asciimatics.screen.Screen.wrapper", "line_number": 191, "usage_type": "call"}, {"api_name": "asciimatics.screen.Screen", "line_number": 191, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 192, "usage_type": "call"}, {"api_name": "asciimatics.exceptions.ResizeScreenError", "line_number": 193, "usage_type": "name"}]} +{"seq_id": "74384031205", "text": "\"\"\"\nDiaspora client.\n\"\"\"\nimport logging\n\nimport diaspy.connection\nimport diaspy.models\nimport diaspy.streams\n\nfrom feedspora.generic_client import GenericClient\n\n\nclass DiaspyClient(GenericClient):\n ''' The DiaspyClient handles the connection to Diaspora. '''\n stream = None\n connection = None\n\n def __init__(self, config, testing):\n '''\n Initialize\n :param config:\n :param testing:\n '''\n self._config = config\n\n if not testing:\n self.connection = diaspy.connection.Connection(\n pod=config['pod'],\n username=config['username'],\n password=config['password'])\n self.connection.login()\n try:\n self.stream = diaspy.streams.Stream(self.connection,\n 'stream.json')\n except diaspy.errors.PostError as exception:\n logging.error(\"Cannot get diaspy stream: %s\", str(exception))\n self.stream = None\n self.set_common_opts(config)\n\n def get_dict_output(self, **kwargs):\n '''\n Return dict output for testing purposes\n :param kwargs:\n '''\n\n return {\n \"client\": self._config['name'],\n \"content\": kwargs['text'],\n \"media\": kwargs['photo']\n }\n\n def post(self, feed, entry):\n '''\n Post entry to Diaspora.\n :param feed:\n :param entry:\n '''\n\n text = self.resolve_option(feed, 'post_prefix') + \\\n '['+entry.title +']('+self.shorten_url(feed, entry.link)+')'\n stripped_html = self.strip_html(feed, entry.content) \\\n if entry.content else None\n if self.resolve_option(feed, 'post_include_content') and stripped_html:\n text += \": \" + stripped_html\n text += self.resolve_option(feed, 'post_suffix')\n post_tags = ''.join([\" #{}\".format(k)\n for k in self.filter_tags(feed, entry)])\n if post_tags:\n text += ' |'+post_tags\n\n media_path = None\n if self.resolve_option(feed, 'post_include_media') and entry.media_url:\n # Need to download image from that URL in order to post it!\n media_path = self.download_media(entry.media_url)\n\n post_params = {'text': text,\n 'photo': media_path,\n 'aspect_ids': 'public',\n 'provider_display_name': 'FeedSpora'\n }\n\n to_return = False\n if self.stream:\n to_return = self.stream.post(**post_params)\n elif self.is_testing():\n self.accumulate_testing_output(self.get_dict_output(**post_params))\n else:\n logging.info(\"Diaspy stream is None, not posting anything\")\n\n return to_return\n", "repo_name": "aurelg/feedspora", "sub_path": "src/feedspora/diaspora_client.py", "file_name": "diaspora_client.py", "file_ext": "py", "file_size_in_byte": 2877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 35, "dataset": "github-code", "pt": "52", "api": [{"api_name": "feedspora.generic_client.GenericClient", "line_number": 13, "usage_type": "name"}, {"api_name": "diaspy.connection.connection.Connection", "line_number": 27, "usage_type": "call"}, {"api_name": "diaspy.connection.connection", "line_number": 27, "usage_type": "attribute"}, {"api_name": "diaspy.connection", "line_number": 27, "usage_type": "name"}, {"api_name": "diaspy.connection.streams.Stream", "line_number": 33, "usage_type": "call"}, {"api_name": "diaspy.connection.streams", "line_number": 33, "usage_type": "attribute"}, {"api_name": "diaspy.connection", "line_number": 33, "usage_type": "name"}, {"api_name": "diaspy.connection.errors", "line_number": 35, "usage_type": "attribute"}, {"api_name": "diaspy.connection", "line_number": 35, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "16023478452", "text": "\"\"\"Functions for downloading and reading MNIST data.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport tempfile\n\nimport numpy\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\n#10 classes\n#784 (28x28) pixels in each image\n#None (55000) images\n\n####################################################\n#mnist = read_data_sets(\"MNIST_data/\", one_hot=True)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport itertools\nimport tensorflow as tf\n\n\nloaded = np.load('type_age_atRedshiftZero.npz')\ntrainImages = loaded['trainImages']\ntrainLabels = loaded['trainLabels']\n#trainFilenames = loaded['trainFilenames']\n#trainTypeNames = loaded['trainTypeNames']\ntestImages = loaded['testImages']\ntestLabels = loaded['testLabels']\n#testFilenames = loaded['testFilenames']\ntestTypeNames = loaded['testTypeNames']\ntypeNamesList = loaded['typeNamesList']\n#validateImages = sortData[2][0]\n#validateLabels = sortData[2][1]\n\n\nprint(\"Completed creatingArrays\")\n\nN = 1024\nntypes = len(testLabels[0])\nimWidth = 32 #Image size and width\nimWidthReduc = 8\n\na = []\n\nsess = tf.InteractiveSession()\n\n#WEIGHT INITIALISATION\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n#CONVOLUTION AND POOLING\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\nx = tf.placeholder(tf.float32, shape=[None, N])\ny_ = tf.placeholder(tf.float32, shape=[None, ntypes])\n\n#FIRST CONVOLUTIONAL LAYER\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\nx_image = tf.reshape(x, [-1,imWidth,imWidth,1])\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\n#SECOND CONVOLUTIONAL LAYER\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\n#DENSELY CONNECTED LAYER\nW_fc1 = weight_variable([imWidthReduc * imWidthReduc * 64, N])\nb_fc1 = bias_variable([N])\nh_pool2_flat = tf.reshape(h_pool2, [-1, imWidthReduc*imWidthReduc*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n#DROPOUT\nkeep_prob = tf.placeholder(tf.float32)\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n#READOUT LAYER\nW_fc2 = weight_variable([N, ntypes])\nb_fc2 = bias_variable([ntypes])\n\ny_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n########\n##batch_xs1 = trainImages\n##batch_ys1 = trainLabels\n##print(sess.run(y_conv, feed_dict={x: batch_xs1, y_: batch_ys1}))\n########\n\n#TRAIN AND EVALUATE MODEL\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsess.run(tf.initialize_all_variables())\n\ntrainImagesCycle = itertools.cycle(trainImages)\ntrainLabelsCycle = itertools.cycle(trainLabels)\nfor i in range(100000):\n batch_xs = np.array(list(itertools.islice(trainImagesCycle, 50*i, 50*i+50)))\n batch_ys = np.array(list(itertools.islice(trainLabelsCycle, 50*i, 50*i+50)))\n train_step.run(feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5})\n if (i % 100 == 0):\n train_accuracy = accuracy.eval(feed_dict={x:batch_xs, y_: batch_ys, keep_prob: 1.0})\n print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n testacc = accuracy.eval(feed_dict={x: testImages, y_: testLabels, keep_prob: 1.0})\n print(\"test accuracy %g\"%(testacc))\n a.append(testacc)\nprint(\"test accuracy %g\"%accuracy.eval(feed_dict={x: testImages, y_: testLabels, keep_prob: 1.0}))\n\nyy = y_conv.eval(feed_dict={x: testImages, y_: testLabels, keep_prob: 1.0})\ncp = correct_prediction.eval(feed_dict={x: testImages, y_: testLabels, keep_prob: 1.0})\nprint(cp)\nfor i in range(len(cp)):\n if (cp[i] == False):\n predictedIndex = np.argmax(yy[i])\n print(i, testTypeNames[i], typeNamesList[predictedIndex])\n\n\n#ACTUAL ACCURACY, SUBTYPE ACCURACY, AGE ACCURACY\ntypeAndAgeCorrect = 0\ntypeCorrect = 0\nsubTypeCorrect = 0\nsubTypeAndAgeCorrect = 0\ntypeAndNearAgeCorrect = 0\nsubTypeAndNearAgeCorrect = 0\nfor i in range(len(testTypeNames)):\n predictedIndex = np.argmax(yy[i])\n testSubType = testTypeNames[i][0:2]\n actualSubType = typeNamesList[predictedIndex][0:2]\n if testTypeNames[i][0:3] == 'IIb':\n testSubType = 'Ib'\n if typeNamesList[predictedIndex][0:3] == 'IIb':\n actualSubType = 'Ib'\n testType = testTypeNames[i].split(': ')[0]\n actualType = typeNamesList[predictedIndex].split(': ')[0]\n testAge = testTypeNames[i].split(': ')[1]\n actualAge = typeNamesList[predictedIndex].split(': ')[1]\n nearTestAge = testAge.split(' to ')\n \n if (testTypeNames[i] == typeNamesList[predictedIndex]):\n typeAndAgeCorrect += 1\n if (testType == actualType): #correct type\n typeCorrect += 1\n if ((nearTestAge[0] in actualAge) or (nearTestAge[1] in actualAge)): #check if the age is in the neigbouring bin\n typeAndNearAgeCorrect += 1 #all correct except nearby bin\n if (testSubType == actualSubType): #correct subtype\n subTypeCorrect += 1\n if testAge == actualAge:\n subTypeAndAgeCorrect += 1\n if ((nearTestAge[0] in actualAge) or (nearTestAge[1] in actualAge)): #check if the age is in the neigbouring bin\n subTypeAndNearAgeCorrect += 1 #subtype and nearby bin\n\ntypeAndAgeAccuracy = float(typeAndAgeCorrect)/len(testTypeNames)\ntypeAccuracy = float(typeCorrect)/len(testTypeNames)\nsubTypeAccuracy = float(subTypeCorrect)/len(testTypeNames)\nsubTypeAndAgeAccuracy = float(subTypeAndAgeCorrect)/len(testTypeNames)\ntypeAndNearAgeAccuracy = float(typeAndNearAgeCorrect)/len(testTypeNames)\nsubTypeAndNearAgeAccuracy = float(subTypeAndNearAgeCorrect)/len(testTypeNames)\n\nprint(\"typeAndAgeAccuracy : \" + str(typeAndAgeAccuracy))\nprint(\"typeAccuracy : \" + str(typeAccuracy))\nprint(\"subTypeAccuracy : \" + str(subTypeAccuracy))\nprint(\"subTypeAndAgeAccuracy: \" + str(subTypeAndAgeAccuracy))\nprint(\"typeAndNearAgeAccuracy : \" + str(typeAndNearAgeAccuracy))\nprint(\"subTypeAndNearAgeAccuracy : \" + str(subTypeAndNearAgeAccuracy))\n \n\n#SAVE THE MODEL\nsaver = tf.train.Saver()\nsave_path = saver.save(sess, \"model_trainedAtZeroZ.ckpt\")\nprint(\"Model saved in file: %s\" % save_path)\n", "repo_name": "daniel-muthukrishna/SNClassifying_Pre-alpha", "sub_path": "tf_improved_model.py", "file_name": "tf_improved_model.py", "file_ext": "py", "file_size_in_byte": 6786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.load", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 76, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 93, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dropout", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.softmax", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 100, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 114, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 116, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 189, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 189, "usage_type": "attribute"}]} +{"seq_id": "25477819701", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport time\nimport common.global_const as gc\n\n\ndef get_contract_by_item_no(item_no):\n sql = \"select * from contract where contract_asset_item_no = '%s'\" % item_no\n contract = gc.CONTRACT_DB.query(sql)\n return contract\n\n\ndef get_contract_by_item_no_opportunity(item_no, contract_sign_opportunity, status=None):\n table = get_contract_table(item_no)\n sql = \"select * from %s where contract_asset_item_no = '%s' and contract_sign_opportunity = '%s'\" \\\n % (table, item_no, contract_sign_opportunity)\n if status is not None:\n sql += \" and contract_status = '%s'\" % status\n sql += \" group by contract_type\"\n contract = gc.CONTRACT_DB.query(sql)\n return contract\n\n\ndef get_contract(item_no, contract_sign_opportunity, contract_type):\n table = get_contract_table(item_no)\n sql = \"select * from %s where contract_asset_item_no = '%s' and contract_sign_opportunity = '%s' \" \\\n \"and contract_type = '%s' order by contract_id asc\" % (table, item_no, contract_sign_opportunity, contract_type)\n contract = gc.CONTRACT_DB.query(sql)\n return contract\n\n\ndef get_open_task_by_item_no(item_no):\n sql = \"select * from task where task_order_no = '%s' and task_status = 'open'\" % item_no\n contract = gc.CONTRACT_DB.query(sql)\n return contract\n\n\ndef get_open_task_by_item_no_and_task_type(item_no, task_type):\n sql = \"select * from task where task_order_no = '%s' and task_status = 'open' and task_type = '%s'\" % (item_no, task_type)\n contract = gc.CONTRACT_DB.query(sql)\n return contract\n\n\ndef create_contract(item_no, contract_type, contract_type_text, url, opportunity=None):\n table = get_contract_table(item_no)\n sql = \"INSERT INTO %s (contract_create_at, contract_asset_item_no, contract_type, contract_type_text, \" \\\n \"contract_url, contract_status, contract_from_system, contract_code, contract_apply_id, contract_flow_key, \" \\\n \"contract_ref_item_no, contract_sign_at, contract_update_at, contract_sign_opportunity, contract_provider, \" \\\n \"contract_subject) \" \\\n \" VALUES (now(), '%s', '%s', '%s', \" \\\n \"'%s', 'SUCCESS', 'dsq', \" \\\n \"'BIZ111276200422034951', '20201587498579320992-20713-1587498591702', 'auth1_manman_flow', '%s_noloan', \" \\\n \"now(), now(), '%s', 'YUN', '苏州')\"\\\n % (table, item_no, contract_type, contract_type_text, url, item_no, opportunity)\n contract = gc.CONTRACT_DB.insert(sql)\n return contract\n\n\ndef create_change_channel_task(item_no, old_channel, new_channel):\n sql = \"INSERT INTO task (task_order_no, task_type, task_status, task_next_run_at, task_request_data, \" \\\n \"task_response_data, task_memo, task_create_at, task_update_at, task_version, task_priority, \" \\\n \"task_retrytimes) \" \\\n \"VALUES ('%s', 'ChangeChannel', 'open', '2020-07-01 02:42:57', \" \\\n \"'{\\\\\\\"apply_code\\\\\\\" : \\\\\\\"%s\\\\\\\",\\\\\\\"loan_channel\\\\\\\" : \\\\\\\"%s\\\\\\\",\\\\\\\"route_channel\\\\\\\" : \\\\\\\"%s\\\\\\\", \" \\\n \"\\\\\\\"old_channel\\\\\\\" : \\\\\\\"%s\\\\\\\",\\\\\\\"version\\\\\\\" : 1593542576551,\\\\\\\"need_register\\\\\\\" : 1}', \" \\\n \"'', '', '2020-07-01 02:42:57', '2020-07-01 02:42:57', 0, 1, 0)\"\\\n % (item_no, item_no, new_channel, new_channel, old_channel)\n result = gc.CONTRACT_DB.insert(sql)\n return result\n\n\ndef create_asset_import_task(item_no, channel):\n sql = \"INSERT INTO task (task_order_no, task_type, task_status, task_next_run_at, task_request_data, \" \\\n \"task_response_data, task_memo, task_create_at, task_update_at, task_version, task_priority, \" \\\n \"task_retrytimes) VALUES ('%s', 'AssetImport', 'open', NOW(), \" \\\n \"'{\\\\\\\"from_system\\\\\\\" : \\\\\\\"banana\\\\\\\",\\\\\\\"key\\\\\\\" : \\\\\\\"%s%s\\\\\\\",\" \\\n \"\\\\\\\"type\\\\\\\" : \\\\\\\"DSQAssetImport\\\\\\\",\\\\\\\"data\\\\\\\" : { \\\\\\\"configId\\\\\\\" : null, \\\\\\\"type\\\\\\\" : null, \" \\\n \"\\\\\\\"capital\\\\\\\" : \\\\\\\"%s\\\\\\\", \\\\\\\"scope\\\\\\\" : \\\\\\\"youxi_bill\\\\\\\", \\\\\\\"fromSystemName\\\\\\\" : null, \" \\\n \"\\\\\\\"periodCount\\\\\\\" : 6, \\\\\\\"signOpportunity\\\\\\\" : \\\\\\\"AssetImport\\\\\\\", \\\\\\\"signType\\\\\\\" : null, \" \\\n \"\\\\\\\"condition\\\\\\\" : {\\\\\\\"loan_channel\\\\\\\" : null,\\\\\\\"source_type\\\\\\\" : null,\" \\\n \"\\\\\\\"from_system_name\\\\\\\" : null,\\\\\\\"period_count\\\\\\\" : null,\\\\\\\"sub_type\\\\\\\" : null }, \" \\\n \"\\\\\\\"itemNo\\\\\\\" : \\\\\\\"%s\\\\\\\", \\\\\\\"refItemNo\\\\\\\" : \\\\\\\"%s_noloan\\\\\\\", \" \\\n \"\\\\\\\"cover\\\\\\\" : false, \\\\\\\"constMap\\\\\\\" : { }, \\\\\\\"sequenceList\\\\\\\" : null}\\\\\\n}', \" \\\n \"'', '', NOW(), NOW(), 0, 2, 0)\"\\\n % (item_no, item_no, channel, channel, item_no, item_no)\n result = gc.CONTRACT_DB.insert(sql)\n return result\n\n\ndef get_sendmsg(item_no, sendmsg_type):\n sql = \"select * from sendmsg where sendmsg_order_no = '%s' and sendmsg_type = '%s' \" \\\n \"order by sendmsg_id asc\" % (item_no, sendmsg_type)\n result = gc.CONTRACT_DB.query(sql)\n return result\n\n\ndef contract_create_attachment_by_item_no(item_no, channel, attachment_type, attachment_name, attachment_url):\n now_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n table = get_contract_table(item_no)\n sql = \"INSERT INTO %s\" \\\n \" ( `contract_create_at`, `contract_asset_item_no`, `contract_type`, `contract_type_text`, `contract_url`, \" \\\n \"`contract_status`, `contract_from_system`, `contract_code`, `contract_apply_id`, `contract_flow_key`, \" \\\n \"`contract_ref_item_no`, `contract_sign_at`, `contract_update_at`, `contract_sign_opportunity`, \" \\\n \"`contract_provider`, `contract_subject`, `contract_channel`, `contract_version`) VALUES( \" \\\n \"'%s', '%s', %s, '%s', \" \\\n \"'%s', 'SUCCESS', 'strawberry',\" \\\n \" FLOOR(RAND() * 10000), '%s-30300-1611906119811', 'tpl2007301443234707AE', \" \\\n \"'%s', '%s', '%s', 'AssetImport', 'YUN', '如皋智萃', \" \\\n \"'%s', 1);\" % \\\n (table, now_time, item_no, attachment_type, attachment_name, attachment_url, item_no, item_no, now_time, now_time, channel )\n result = gc.CONTRACT_DB.insert(sql)\n return result\n\n\ndef get_contract_table(item_no):\n if item_no[0:4] == 'enc_':\n table = 'contract_enc'\n else:\n if item_no[0:4].isupper():\n table = 'contract_' + str(item_no[1:5])\n else:\n table = 'contract_' + str(item_no[0:4])\n return table\n\n\nif __name__ == '__main__':\n print(get_contract_by_item_no(\"20201585883351257122\"))\n", "repo_name": "xiujingyuan/framework-test", "sub_path": "biztest/function/contract/contract_db_function.py", "file_name": "contract_db_function.py", "file_ext": "py", "file_size_in_byte": 6443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 9, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 9, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 9, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 20, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 20, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 20, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 28, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 28, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 28, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 34, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 34, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 34, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 40, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 40, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 40, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.insert", "line_number": 55, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 55, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 55, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.insert", "line_number": 68, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 68, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 68, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.insert", "line_number": 86, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 86, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 86, "usage_type": "name"}, {"api_name": "common.global_const.CONTRACT_DB.query", "line_number": 93, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 93, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 93, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 98, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 98, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB.insert", "line_number": 111, "usage_type": "call"}, {"api_name": "common.global_const.CONTRACT_DB", "line_number": 111, "usage_type": "attribute"}, {"api_name": "common.global_const", "line_number": 111, "usage_type": "name"}]} +{"seq_id": "6624481457", "text": "from django.urls import path\r\nfrom first import views\r\n\r\nurlpatterns = [\r\n path('', views.index,name='index'),\r\n path('detail//', views.detail, name='detail'),\r\n path('reply/', views.reply, name='reply'),\r\n path('issue/', views.issue, name='issue'),\r\n path('submit/', views.submit, name='submit'),\r\n path('choice_cate/', views.choice_cate, name='choice'),\r\n path('category//', views.category, name='category'),\r\n path('myquestion/',views.my_question, name='myquestion'),\r\n path('search/', views.search, name='search'),\r\n path('finish/', views.finish, name='finish'),\r\n path('hint/', views.hint, name='hint')\r\n]", "repo_name": "KlausMichael0/The-Online-Answer-System", "sub_path": "OAS/first/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "first.views.index", "line_number": 5, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 5, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "first.views.detail", "line_number": 6, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "first.views.reply", "line_number": 7, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "first.views.issue", "line_number": 8, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "first.views.submit", "line_number": 9, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "first.views.choice_cate", "line_number": 10, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "first.views.category", "line_number": 11, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "first.views.my_question", "line_number": 12, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "first.views.search", "line_number": 13, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "first.views.finish", "line_number": 14, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "first.views.hint", "line_number": 15, "usage_type": "attribute"}, {"api_name": "first.views", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "18725607857", "text": "import json\n\ndata = {\n 'no':1,\n 'acc':2\n}\n#json.dumps(): 对数据进行编码\n#json.loads(): 对数据进行解码\n\n\njson_str = json.dumps(data)\nprint(data)\nprint(json_str)\n\n# 如果你要处理的是文件而不是字符串,你可以使用 json.dump() 和 json.load() 来编码和解码JSON数据\n# 写入 JSON 数据\n# with open('data.json', 'w') as f:\n# # json.dump(data, f)\n# #\n# # # 读取数据\n# # with open('data.json', 'r') as f:\n# # data = json.load(f)", "repo_name": "lizhilizhi/python-study", "sub_path": "12_json.py", "file_name": "12_json.py", "file_ext": "py", "file_size_in_byte": 479, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dumps", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "42959734268", "text": "\"\"\"\nDaemonHandler: control daemon server\n\nHyungwon Yang\n23.08.07\n\"\"\"\n\n\nimport sys, os, time, psutil, signal\nimport argparse\nimport logging\nimport tornado\nimport yaml\n\nfrom src.handlers.ServerHandler import App\nfrom src.handlers.AlignHandler import AlignHandler\nfrom src.utils.LogUtils import initLog\n\n# config\nwith open(\"conf/server.yaml\") as f:\n server_config = yaml.load(f, Loader=yaml.FullLoader)\nparser = argparse.ArgumentParser()\nargs, unknwon = parser.parse_known_args(namespace=argparse.Namespace(**server_config))\n# args = parser.parse_args(namespace=argparse.Namespace(**server_config))\n_version = args.version\nCURRENT_PATH = \"/\".join([os.path.dirname(os.path.abspath(__file__)), \"../..\"])\n\n# variables\npauseRunLoop = 0 # 0 means none pause between the calling of run() method.\nrestartPause = 1 # 0 means without a pause between stop and start during the restart of the daemon\nwaitToHardKill = 3 # when terminate a process, wait until kill the process with SIGTERM signal\n\nisReloadSignal = False\ncanDaemonRun = True\nprocessName = os.path.basename(sys.argv[0])\nstdin = '/dev/null'\nstdout = '/dev/null'\nstderr = '/dev/null'\n\n# pid\npid_dir = args.server_pid_path\npid_path = os.path.join(CURRENT_PATH, pid_dir)\nserver_pidfile = args.server_pidfile\nif not os.path.exists(pid_path):\n os.makedirs(pid_path)\n# pid folder가 있을경우만 pid 파일 생성.\npid_file = os.path.join(pid_path, server_pidfile)\n\n# data : save uploaded and processed data(wav, txt, TextGrid)\ndata_dir = args.data_path\ndata_path = os.path.join(CURRENT_PATH, data_dir)\nif not os.path.exists(data_path):\n os.makedirs(data_path)\n\n# history log\nhistory_dir = args.history_path\nhistory_path = os.path.join(CURRENT_PATH, history_dir)\nif not os.path.exists(history_path):\n os.makedirs(history_path)\n\nprint(\"open server port: {}\".format(args.server_port))\ninitLog(args.log_path, args.log_in_date, args.log_file_name, args.log_format)\n\n# make pid file.\ndef savePidFile(pid, saveName: str):\n if \".pid\" in saveName:\n savePath = os.path.join(pid_path, saveName)\n else:\n savePath = os.path.join(pid_path, saveName + \".pid\")\n with open(savePath, 'w', encoding='utf-8') as wrt:\n wrt.write(str(pid)+\"\\n\")\n\n# remove pid file.\ndef removePidFile(fileName: str):\n if \".pid\" in fileName:\n filePath = os.path.join(pid_path, fileName)\n else:\n filePath = os.path.join(pid_path, fileName + \".pid\")\n if os.path.exists(filePath):\n os.remove(filePath)\n \ndef run():\n os.chdir(CURRENT_PATH)\n # open connection.\n app = App()\n app.listen(args.server_port)\n logging.info(\"open server port: {}\".format(args.server_port))\n server_pid = os.getpid()\n savePidFile(server_pid, server_pidfile)\n \n alignHandler = AlignHandler()\n alignHandler.getServerPort(args.server_port)\n \n # main job.\n main_fa_callback = tornado.ioloop.PeriodicCallback(alignHandler.process, args.running_time)\n main_fa_callback.start()\n\n tornado.ioloop.IOLoop.current().start()\n\n\ndef on_terminate(process):\n m = f\"The daemon process with PID {process.pid} has ended correctly.\"\n logging.info(m)\n print(m)\n\ndef _sigterm_handler(signum, frame):\n canDaemonRun = False\n\ndef _reload_handler(signum, frame):\n isReloadSignal = True\n\ndef _makeDaemon():\n \"\"\"\n Make a daemon, do double-fork magic.\n \"\"\"\n try:\n pid = os.fork()\n if pid > 0:\n # Exit first parent.\n sys.exit(0)\n except OSError as e:\n m = f\"Fork #1 failed: {e}\"\n logging.error(m)\n print(m)\n sys.exit(1)\n\n # Decouple from the parent environment.\n os.chdir(\"/\")\n os.setsid()\n os.umask(0)\n\n # Do second fork.\n try:\n pid = os.fork()\n if pid > 0:\n # Exit from second parent.\n sys.exit(0)\n except OSError as e:\n m = f\"Fork #2 failed: {e}\"\n logging.error(m)\n print(m)\n sys.exit(1)\n\n m = \"The daemon process is going to background.\"\n logging.info(m)\n print(m)\n\n # Redirect standard file descriptors.\n sys.stdout.flush()\n sys.stderr.flush()\n si = open(stdin, 'r')\n so = open(stdout, 'a+')\n se = open(stderr, 'a+')\n os.dup2(si.fileno(), sys.stdin.fileno())\n os.dup2(so.fileno(), sys.stdout.fileno())\n os.dup2(se.fileno(), sys.stderr.fileno())\n \ndef _infiniteLoop():\n try:\n if pauseRunLoop:\n time.sleep(pauseRunLoop)\n\n while canDaemonRun:\n run()\n time.sleep(pauseRunLoop)\n else:\n while canDaemonRun:\n run()\n\n except Exception as e:\n m = f\"Run method failed: {e}\"\n sys.stderr.write(m)\n sys.exit(1)\n\ndef getPidFiles():\n dir_list = os.listdir(pid_path)\n pid_file_list = [each_list for each_list in dir_list if \".pid\" in each_list]\n return pid_file_list\n\ndef getPids(pid_file_list):\n pid_list = dict()\n for each_file in pid_file_list:\n with open(os.path.join(pid_path, each_file), 'r', encoding='utf-8') as txt:\n pid_line = txt.read().strip()\n pid_list[each_file] = pid_line\n return pid_list\n\ndef start():\n # Handle signals\n signal.signal(signal.SIGINT, _sigterm_handler)\n signal.signal(signal.SIGTERM, _sigterm_handler)\n signal.signal(signal.SIGHUP, _reload_handler)\n\n # Check if the daemon is already running.\n pid_file_list = getPidFiles()\n\n if server_pidfile in pid_file_list:\n pid_list = getPids(pid_file_list)\n server_pid = pid_list[server_pidfile]\n print(f\"Find a previous daemon processes with PIDs {server_pid}. Is not already the daemon running?\")\n sys.exit(1)\n else:\n print(f\"Start the daemon version {_version}\")\n\n # Daemonize the main process\n _makeDaemon()\n # Start a infinitive loop that periodically runs run() method\n _infiniteLoop()\n\ndef stop():\n pid = 999999\n logging.info(\"stop daemon.\")\n print(\"stop daemon.\")\n try:\n # find pid file list.\n pid_file_list = getPidFiles()\n if pid_file_list == []:\n print(\"Cannot find any daemon process.\")\n return None\n \n # get process id\n logging.info(f\"Found process ids: {pid_file_list}\")\n print(f\"Found process ids: {pid_file_list}\")\n pid_list = getPids(pid_file_list)\n # kill process\n if pid_list:\n for pid_k in pid_list.keys():\n pid = int(pid_list[pid_k])\n if psutil.pid_exists(pid):\n p = psutil.Process(pid)\n p.terminate()\n gone, alive = psutil.wait_procs([p], timeout=3, callback=on_terminate)\n \n for p in alive:\n p.kill()\n print(f\"The daemon process with PID {p.pid} was killed with SIGTERM!\")\n else:\n m = f\"{pid} pid does not exist.\"\n logging.warning(m)\n print(m)\n removePidFile(pid_k)\n else:\n print(\"Cannot find any daemon process.\")\n except Exception as e:\n print(e)\n \ndef status():\n \"\"\"\n Get status of the daemon.\n \"\"\"\n\n pid_list = getPidFiles()\n\n if server_pidfile in pid_list:\n server_pids = getPids([server_pidfile])\n logging.info(f\"The daemon is running with PID {server_pids[server_pidfile]}.\")\n print(f\"The daemon is running with PID {server_pids[server_pidfile]}.\")\n else:\n logging.info(\"The daemon is not running!\")\n print(\"The daemon is not running!\")\n\ndef version():\n print(f\"The daemon version: {_version}\")\n", "repo_name": "hyung8758/Korean_FA", "sub_path": "src/handlers/DaemonHandler.py", "file_name": "DaemonHandler.py", "file_ext": "py", "file_size_in_byte": 7859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "50", "api": [{"api_name": "yaml.load", "line_number": 21, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 21, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 59, "usage_type": "call"}, {"api_name": "src.utils.LogUtils.initLog", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 80, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 83, "usage_type": "call"}, {"api_name": "src.handlers.ServerHandler.App", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 87, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 88, "usage_type": "call"}, {"api_name": "src.handlers.AlignHandler.AlignHandler", "line_number": 91, "usage_type": "call"}, {"api_name": "tornado.ioloop.PeriodicCallback", "line_number": 95, "usage_type": "call"}, {"api_name": "tornado.ioloop", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tornado.ioloop.IOLoop.current", "line_number": 98, "usage_type": "call"}, {"api_name": "tornado.ioloop", "line_number": 98, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 103, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 117, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 120, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 123, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 125, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 128, "usage_type": "call"}, {"api_name": "os.setsid", "line_number": 129, "usage_type": "call"}, {"api_name": "os.umask", "line_number": 130, "usage_type": "call"}, {"api_name": "os.fork", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 137, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 140, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 142, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 145, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 149, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 150, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.dup2", "line_number": 154, "usage_type": "call"}, {"api_name": "sys.stdin.fileno", "line_number": 154, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.dup2", "line_number": 155, "usage_type": "call"}, {"api_name": "sys.stdout.fileno", "line_number": 155, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 155, "usage_type": "attribute"}, {"api_name": "os.dup2", "line_number": 156, "usage_type": "call"}, {"api_name": "sys.stderr.fileno", "line_number": 156, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 156, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 161, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 165, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 172, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 172, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 173, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 190, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 190, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 191, "usage_type": "call"}, {"api_name": "signal.SIGTERM", "line_number": 191, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 192, "usage_type": "call"}, {"api_name": "signal.SIGHUP", "line_number": 192, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 201, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 212, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 222, "usage_type": "call"}, {"api_name": "psutil.pid_exists", "line_number": 229, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 230, "usage_type": "call"}, {"api_name": "psutil.wait_procs", "line_number": 232, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 239, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 256, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 259, "usage_type": "call"}]} +{"seq_id": "42208783112", "text": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QPushButton, QFileDialog\nimport pandas as pd\n\nclass CSVViewer(QWidget):\n def __init__(self):\n super().__init__()\n\n self.init_ui()\n\n def init_ui(self):\n self.df = None\n self.current_index = 0\n\n self.layout = QVBoxLayout()\n\n self.result_label = QLabel()\n self.counter_label = QLabel()\n\n self.layout.addWidget(self.result_label)\n self.layout.addWidget(self.counter_label)\n\n self.prev_button = QPushButton('Previous')\n self.next_button = QPushButton('Next')\n\n self.prev_button.clicked.connect(self.prev_result)\n self.next_button.clicked.connect(self.next_result)\n\n self.layout.addWidget(self.prev_button)\n self.layout.addWidget(self.next_button)\n\n self.setLayout(self.layout)\n\n self.load_csv()\n\n self.show_result()\n\n def load_csv(self):\n options = QFileDialog.Options()\n file_name, _ = QFileDialog.getOpenFileName(self, \"Open CSV File\", \"\", \"CSV Files (*.csv);;All Files (*)\", options=options)\n\n if file_name:\n self.df = pd.read_csv(file_name)\n\n def show_result(self):\n if self.df is not None and not self.df.empty:\n current_row = self.df.iloc[self.current_index]\n result_text = f\"SMILES: {current_row['SMILES']}\\nName: {current_row['Name']}\\nMW: {current_row['MW']}\\nmp: {current_row['mp']}\\nProject: {current_row['Project']}\"\n self.result_label.setText(result_text)\n self.counter_label.setText(f\"Result {self.current_index + 1} of {len(self.df)}\")\n\n def prev_result(self):\n if self.current_index > 0:\n self.current_index -= 1\n self.show_result()\n\n def next_result(self):\n if self.df is not None and self.current_index < len(self.df) - 1:\n self.current_index += 1\n self.show_result()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n viewer = CSVViewer()\n viewer.setGeometry(100, 100, 800, 600)\n viewer.setWindowTitle('CSV Viewer')\n viewer.show()\n sys.exit(app.exec_())\n", "repo_name": "Tomisall/ProcessSafteyDB", "sub_path": "csvSearch.py", "file_name": "csvSearch.py", "file_ext": "py", "file_size_in_byte": 2162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 5, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 17, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.Options", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 39, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 40, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 40, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "492290210", "text": "import pygame\nimport random\nfrom actor.baselife import BaseLife\nfrom actor.nourishment import Nourishment\nclass Plant(BaseLife):\n def __init__(self, x: int, y: int,color=None) -> None:\n if color==None:\n color=self.calc_first_color()\n self.splittime=int(pygame.math.lerp(30,5,color[0]/255))\n self.splitrange=int(pygame.math.lerp(400,900,color[2]/255))\n super().__init__(x, y,pygame.math.lerp(50,200,color[1]/255),1,color)\n def step(self,screen:pygame.surface,actors:list):\n pygame.draw.polygon(screen,self.color,((self.pos.x-20,self.pos.y+30),(self.pos.x,self.pos.y-40),(self.pos.x+20,self.pos.y+30)))\n self.splittime-=1\n if self.splittime==0:\n n=list(filter(lambda x:isinstance(x,Nourishment) and(self.pos.x-x.pos.x)**2+(self.pos.y-x.pos.y)**2<=self.splitrange**2 ,actors))\n if len(n)>0:\n item=n[random.randint(0,len(n)-1)]\n item.alive=False\n actors.append(Plant(item.pos.x,item.pos.y,self.calc_next_color(self.color)))\n self.splittime=int(pygame.math.lerp(30,5,self.color[0]/255))\n super().step(screen)\n", "repo_name": "Fuses-Garage/LifeSimulator", "sub_path": "actor/plant.py", "file_name": "plant.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "actor.baselife.BaseLife", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.math.lerp", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.math.lerp", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.math.lerp", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.surface", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.draw.polygon", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 13, "usage_type": "attribute"}, {"api_name": "actor.nourishment.Nourishment", "line_number": 16, "usage_type": "argument"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.math.lerp", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.math", "line_number": 21, "usage_type": "attribute"}]} +{"seq_id": "8426038930", "text": "from facialRecog import calc_local\nfrom remoteCalc import calc_remote_single, calc_remote_multi\nimport time \nimport resource\nimport json\nimport os\n\ndef compare_single_picture(image_path):\n res, cost_1 = calc_local(image_path)\n print(\"单次本地计算时间: \", cost_1, \"ms\")\n print(\"单次本地人脸计数结果: \", res)\n start_time = int(round(time.time() * 1000))\n res = calc_remote_single(image_path)\n # print(res)\n for r in res:\n # print(r.get())\n tmp = json.loads(r.get()[-1])\n # print(tmp[\"result\"])\n print(\"单次远程人脸计数结果: \", tmp[\"result\"][0])\n cost_2 = tmp[\"result\"][1]\n end_time = int(round(time.time() * 1000))\n\n total_time_2 = end_time - start_time\n transfer_time_2 = total_time_2 - cost_2\n print(\"单次远程计算时间: \", cost_2, \"ms\")\n print(\"单次远程传输时间: \", transfer_time_2, \"ms\")\n\n\ndef compare_multi_picture(image_source): \n start_time = int(round(time.time() * 1000))\n for image_name in os.listdir(image_source):\n image_path = os.path.join(image_source, image_name)\n res1, cost_1 = calc_local(image_path)\n print(\"并行本地计算时间: \", cost_1, \"ms\")\n print(\"���行本地人脸计数结果: \", res1)\n # print(\"result_local: \", res1, cost_1)\n end_time = int(round(time.time() * 1000))\n total_time_1 = end_time - start_time\n print(\"并行本地计算总时间: \", total_time_1, \"ms\")\n \n start_time = int(round(time.time() * 1000))\n res = calc_remote_multi(image_source)\n cost_2 = 0\n for r in res:\n # print(r.get())\n tmp = json.loads(r.get()[-1])\n # print(tmp)\n cost_2 += int(tmp[\"result\"][1])\n end_time = int(round(time.time() * 1000))\n total_time_2 = end_time - start_time\n print(\"并行远程计算总时间: \", total_time_2, \"ms\")\n transfer_time_2 = total_time_2 - cost_2\n print(\"并行远程传输总时间: \", transfer_time_2, \"ms\")\n\n\nif __name__ == '__main__':\n image_source = \"./image\"\n for image_name in os.listdir(image_source):\n image_path = os.path.join(image_source, image_name)\n compare_single_picture(image_path)\n print('---单次测量结束,即将进行并行测量---')\n time.sleep(5)\n compare_multi_picture(image_source)\n print('---并行测量结束---')", "repo_name": "skyerguo/django_transfer", "sub_path": "client/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "facialRecog.calc_local", "line_number": 9, "usage_type": "call"}, {"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "remoteCalc.calc_remote_single", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "facialRecog.calc_local", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "remoteCalc.calc_remote_multi", "line_number": 42, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "21578554428", "text": "from django.urls import path\nfrom .views import ContactUsCreateView, ContactUsUpdateView, ContactUsDeleteView, contact_us_submission\n\napp_name = 'contact_us'\n\nurlpatterns = [\n path('contact_us/', ContactUsCreateView.as_view(), name='contact_us_submission'),\n path('contact_us//', ContactUsUpdateView.as_view(), name='update_contact'),\n path('contact_us//delete/', ContactUsDeleteView.as_view(), name='delete_contact'),\n]\n", "repo_name": "Isaiah-Essien/Eden_Nasa", "sub_path": "backend/contact_us/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 446, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ContactUsCreateView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "views.ContactUsCreateView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ContactUsUpdateView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ContactUsUpdateView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ContactUsDeleteView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "views.ContactUsDeleteView", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "3081296760", "text": "import datetime\nimport json\nimport time\nfrom asyncio import CancelledError\n\nfrom loguru import logger\nfrom spade.agent import Agent\nfrom spade.behaviour import TimeoutBehaviour\nfrom spade.message import Message\nfrom spade.template import Template\n\nfrom .helpers import random_position\nfrom .protocol import (\n REQUEST_PROTOCOL,\n REGISTER_PROTOCOL,\n ACCEPT_PERFORMATIVE,\n REFUSE_PERFORMATIVE,\n REQUEST_PERFORMATIVE,\n TRAVEL_PROTOCOL,\n CANCEL_PERFORMATIVE,\n INFORM_PERFORMATIVE,\n)\nfrom .utils import (\n StrategyBehaviour,\n CyclicBehaviour,\n FREE_STATION,\n BUSY_STATION,\n TRANSPORT_MOVING_TO_STATION,\n TRANSPORT_IN_STATION_PLACE,\n TRANSPORT_CHARGED,\n)\n\n\nclass StationAgent(Agent):\n def __init__(self, agentjid, password):\n super().__init__(jid=agentjid, password=password)\n self.agent_id = None\n self.icon = None\n self.strategy = None\n self.running_strategy = False\n self.directory_id = None\n self.registration = False\n self.station_name = None\n self.station_type = None\n self.current_pos = None\n self.available_places = None\n self.status = None\n self.power = None\n self.stopped = False\n self.ready = False\n\n # waiting waiting_list\n self.waiting_list = list()\n\n # statistics\n self.charged_transports = 0\n self.queue_length = 0\n self.max_queue_length = 0\n\n self.transports_in_queue_time = None\n self.empty_queue_time = None\n self.total_busy_time = None # total time with some transport waiting in queue\n\n def is_ready(self):\n return self.ready\n\n async def setup(self):\n self.total_busy_time = 0.0\n logger.info(\"Station agent {} running\".format(self.name))\n self.set_type(\"station\")\n self.set_status()\n try:\n template = Template()\n template.set_metadata(\"protocol\", REGISTER_PROTOCOL)\n register_behaviour = RegistrationBehaviour()\n self.add_behaviour(register_behaviour, template)\n while not self.has_behaviour(register_behaviour):\n logger.warning(\n \"Station {} could not create RegisterBehaviour. Retrying...\".format(\n self.agent_id\n )\n )\n self.add_behaviour(register_behaviour, template)\n except Exception as e:\n logger.error(\n \"EXCEPTION creating RegisterBehaviour in Station {}: {}\".format(\n self.agent_id, e\n )\n )\n try:\n template = Template()\n template.set_metadata(\"protocol\", TRAVEL_PROTOCOL)\n travel_behaviour = TravelBehaviour()\n self.add_behaviour(travel_behaviour, template)\n while not self.has_behaviour(travel_behaviour):\n logger.warning(\n \"Customer {} could not create TravelBehaviour. Retrying...\".format(\n self.agent_id\n )\n )\n self.add_behaviour(travel_behaviour, template)\n except Exception as e:\n logger.error(\n \"EXCEPTION creating TravelBehaviour in Station {}: {}\".format(\n self.agent_id, e\n )\n )\n self.ready = True\n\n async def send(self, msg):\n if not msg.sender:\n msg.sender = str(self.jid)\n logger.debug(f\"Adding agent's jid as sender to message: {msg}\")\n aioxmpp_msg = msg.prepare()\n await self.client.send(aioxmpp_msg)\n msg.sent = True\n self.traces.append(msg, category=str(self))\n\n def set_id(self, agent_id):\n \"\"\"\n Sets the agent identifier\n\n Args:\n agent_id (str): The new Agent Id\n \"\"\"\n self.agent_id = agent_id\n\n def set_icon(self, icon):\n self.icon = icon\n\n def run_strategy(self):\n \"\"\"\n Sets the strategy for the transport agent.\n \"\"\"\n if not self.running_strategy:\n template = Template()\n template.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n self.add_behaviour(self.strategy(), template)\n self.running_strategy = True\n\n def set_registration(self, status):\n \"\"\"\n Sets the status of registration\n Args:\n status (boolean): True if the transport agent has registered or False if not\n \"\"\"\n self.registration = status\n\n def set_directory(self, directory_id):\n \"\"\"\n Sets the directory JID address\n Args:\n directory_id (str): the DirectoryAgent jid\n \"\"\"\n self.directory_id = directory_id\n\n def set_type(self, station_type):\n self.station_type = station_type\n\n def set_position(self, coords=None):\n \"\"\"\n Sets the position of the station. If no position is provided it is located in a random position.\n\n Args:\n coords (list): a list coordinates (longitude and latitude)\n \"\"\"\n if coords:\n self.current_pos = coords\n else:\n self.current_pos = random_position()\n\n logger.debug(\n \"Station {} position is {}\".format(self.agent_id, self.current_pos)\n )\n\n def get_position(self):\n \"\"\"\n Returns the current position of the station.\n\n Returns:\n list: the coordinates of the current position of the customer (lon, lat)\n \"\"\"\n return self.current_pos\n\n def set_status(self, state=FREE_STATION):\n self.status = state\n\n def get_status(self):\n return self.status\n\n def set_available_places(self, places):\n self.available_places = places\n\n def get_available_places(self):\n return self.available_places\n\n def set_power(self, charge):\n self.power = charge\n\n def get_power(self):\n return self.power\n\n def to_json(self):\n \"\"\"\n Serializes the main information of a station agent to a JSON format.\n It includes the id of the agent, its current position, the destination coordinates of the agent,\n the current status, the transport that it has assigned (if any) and its waiting time.\n\n Returns:\n dict: a JSON doc with the main information of the station.\n\n Example::\n\n {\n \"id\": \"cphillips\",\n \"position\": [ 39.461327, -0.361839 ],\n \"status\": True,\n \"places\": 10,\n \"power\": 10\n }\n \"\"\"\n return {\n \"id\": self.agent_id,\n \"position\": self.current_pos,\n \"status\": self.status,\n \"places\": self.available_places,\n \"power\": self.power,\n \"icon\": self.icon,\n }\n\n async def assigning_place(self):\n \"\"\"\n Set a space in the charging station for the transport that has been accepted, when the available spaces are zero,\n the status will change to BUSY_STATION\n \"\"\"\n p = self.get_available_places()\n if p - 1 <= 0:\n self.set_status(BUSY_STATION)\n self.set_available_places(p - 1)\n logger.info(\n \"Station {} assigned place. Available places are now {}.\".format(\n self.name, self.get_available_places()\n )\n )\n\n async def deassigning_place(self):\n \"\"\"\n Leave a space of the charging station, when the station has free spaces, the status will change to FREE_STATION\n \"\"\"\n if self.waiting_list:\n transport_id = self.waiting_list.pop(0)\n # time statistics update\n if len(self.waiting_list) == 0:\n self.empty_queue_time = time.time()\n self.total_busy_time += (\n self.empty_queue_time - self.transports_in_queue_time\n )\n\n logger.debug(\n \"Station {} has a place to charge transport {}\".format(\n self.agent_id, transport_id\n )\n )\n # confirm EXPLICITLY to transport it can start charging\n reply = Message()\n reply.to = str(transport_id)\n reply.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n reply.set_metadata(\"performative\", ACCEPT_PERFORMATIVE)\n content = {\"station_id\": self.agent_id}\n reply.body = json.dumps(content)\n await self.send(reply)\n # await send_confirmation_to_transport(transport_id)\n\n else:\n p = self.get_available_places()\n if p + 1:\n self.set_status(FREE_STATION)\n self.set_available_places(p + 1)\n\n async def charging_transport(self, need, transport_id):\n total_time = need / self.get_power()\n now = datetime.datetime.now()\n start_at = now + datetime.timedelta(seconds=total_time)\n logger.info(\n \"Station {} started charging transport {} for {} seconds. From {} to {}.\".format(\n self.name, transport_id, total_time, now, start_at\n )\n )\n # charged transports update\n self.charged_transports += 1\n\n charge_behaviour = ChargeBehaviour(start_at=start_at, transport_id=transport_id)\n self.add_behaviour(charge_behaviour)\n\n\nclass ChargeBehaviour(TimeoutBehaviour):\n def __init__(self, start_at, transport_id):\n self.transport_id = transport_id\n super().__init__(start_at)\n\n async def charging_complete(self):\n \"\"\"\n Send a message to the transport agent that the vehicle load has been completed\n \"\"\"\n reply = Message()\n reply.to = str(self.transport_id)\n reply.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n reply.set_metadata(\"performative\", INFORM_PERFORMATIVE)\n content = {\"status\": TRANSPORT_CHARGED}\n reply.body = json.dumps(content)\n await self.send(reply)\n\n async def run(self):\n logger.debug(\"Station {} finished charging.\".format(self.agent.name))\n self.set(\"current_station\", None)\n await self.agent.deassigning_place()\n await self.charging_complete()\n\n\nclass RegistrationBehaviour(CyclicBehaviour):\n async def on_start(self):\n logger.debug(\"Strategy {} started in directory\".format(type(self).__name__))\n\n def set_registration(self, decision):\n self.agent.registration = decision\n\n async def send_registration(self):\n \"\"\"\n Send a ``spade.message.Message`` with a proposal to directory to register.\n \"\"\"\n logger.info(\n \"Station {} sent proposal to register to directory {}\".format(\n self.agent.name, self.agent.directory_id\n )\n )\n\n content = {\n \"jid\": str(self.agent.jid),\n \"type\": self.agent.station_type,\n \"status\": self.agent.status,\n \"position\": self.agent.get_position(),\n \"charge\": self.agent.power,\n }\n msg = Message()\n msg.to = str(self.agent.directory_id)\n msg.set_metadata(\"protocol\", REGISTER_PROTOCOL)\n msg.set_metadata(\"performative\", REQUEST_PERFORMATIVE)\n msg.body = json.dumps(content)\n await self.send(msg)\n\n async def run(self):\n try:\n if not self.agent.registration:\n await self.send_registration()\n msg = await self.receive(timeout=10)\n if msg:\n performative = msg.get_metadata(\"performative\")\n if performative == ACCEPT_PERFORMATIVE:\n self.set_registration(True)\n logger.debug(\"Registration in the directory\")\n except CancelledError:\n logger.debug(\"Cancelling async tasks...\")\n except Exception as e:\n logger.error(\n \"EXCEPTION in RegisterBehaviour of Station {}: {}\".format(\n self.agent.name, e\n )\n )\n\n\nclass TravelBehaviour(CyclicBehaviour):\n \"\"\"\n This is the internal behaviour that manages the inform of the station.\n It is triggered when the transport informs the station that it is going to the\n customer's position until the customer is droppped in its destination.\n \"\"\"\n\n async def on_start(self):\n logger.debug(\"Station {} started TravelBehavior.\".format(self.agent.name))\n\n async def run(self):\n try:\n msg = await self.receive(timeout=5)\n if not msg:\n return\n content = json.loads(msg.body)\n transport_id = msg.sender\n logger.debug(\"Station {} informed of: {}\".format(self.agent.name, content))\n if \"status\" in content:\n status = content[\"status\"]\n if status == TRANSPORT_MOVING_TO_STATION:\n logger.info(\n \"Transport {} coming to station {}.\".format(\n transport_id, self.agent.name\n )\n )\n elif status == TRANSPORT_IN_STATION_PLACE:\n logger.info(\n \"Station {} is going to start charging transport {}\".format(\n self.agent.name, transport_id\n )\n )\n\n await self.agent.charging_transport(content[\"need\"], transport_id)\n except CancelledError:\n logger.debug(\"Cancelling async tasks...\")\n except Exception as e:\n logger.error(\n \"EXCEPTION in Travel Behaviour of Station {}: {}\".format(\n self.agent.name, e\n )\n )\n\n\nclass StationStrategyBehaviour(StrategyBehaviour):\n \"\"\"\n Class from which to inherit to create a station strategy.\n You must overload the :func:`run` method\n\n Helper functions:\n * :func:`get_transport_agents`\n \"\"\"\n\n async def on_start(self):\n logger.debug(\"Strategy {} started in station\".format(type(self).__name__))\n\n async def accept_transport(self, transport_id):\n \"\"\"\n Sends a ``spade.message.Message`` to a transport to accept a travel proposal for charge.\n It uses the REQUEST_PROTOCOL and the ACCEPT_PERFORMATIVE.\n\n Args:\n transport_id (str): The Agent JID of the transport\n \"\"\"\n reply = Message()\n reply.to = str(transport_id)\n reply.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n reply.set_metadata(\"performative\", INFORM_PERFORMATIVE)\n content = {\"station_id\": str(self.agent.jid), \"dest\": self.agent.current_pos}\n reply.body = json.dumps(content)\n await self.send(reply)\n logger.debug(\n \"Station {} accepted proposal for charge from transport {}\".format(\n self.agent.name, transport_id\n )\n )\n\n async def refuse_transport(self, transport_id):\n \"\"\"\n Sends an ``spade.message.Message`` to a transport to refuse a travel proposal for charge.\n It uses the REQUEST_PROTOCOL and the REFUSE_PERFORMATIVE.\n\n Args:\n transport_id (str): The Agent JID of the transport\n \"\"\"\n reply = Message()\n reply.to = str(transport_id)\n reply.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n reply.set_metadata(\"performative\", REFUSE_PERFORMATIVE)\n content = {}\n reply.body = json.dumps(content)\n\n await self.send(reply)\n logger.debug(\n \"Station {} refused proposal for charge from transport {}\".format(\n self.agent.name, transport_id\n )\n )\n\n async def run(self):\n msg = await self.receive(timeout=5)\n\n if msg:\n performative = msg.get_metadata(\"performative\")\n transport_id = msg.sender\n if performative == CANCEL_PERFORMATIVE:\n logger.warning(\n \"Station {} received a CANCEL from Transport {}.\".format(\n self.agent.name, transport_id\n )\n )\n await self.agent.deassigning_place()\n elif (\n performative == ACCEPT_PERFORMATIVE\n ): # comes from send_confirmation_travel\n if self.agent.get_status() == FREE_STATION:\n logger.info(\n \"Station {} has a place to charge transport {}\".format(\n self.agent.name, transport_id\n )\n )\n # confirm EXPLICITLY to transport it can start charging\n reply = Message()\n reply.to = str(transport_id)\n reply.set_metadata(\"protocol\", REQUEST_PROTOCOL)\n reply.set_metadata(\"performative\", ACCEPT_PERFORMATIVE)\n content = {\"station_id\": self.agent.name}\n reply.body = json.dumps(content)\n await self.send(reply)\n await self.agent.assigning_place()\n # self.agent.assigning_place()\n\n else: # self.agent.get_status() == BUSY_STATION\n # time statistics update\n if len(self.agent.waiting_list) == 0:\n self.agent.transports_in_queue_time = time.time()\n # transport waits in a waiting_list until it is available to charge\n self.agent.waiting_list.append(str(transport_id))\n # list length statistics update\n self.agent.queue_length = len(self.agent.waiting_list)\n if self.agent.queue_length > self.agent.max_queue_length:\n self.agent.max_queue_length = self.agent.queue_length\n logger.info(\n \"{} is waiting at {}, whose waiting list is {}\".format(\n transport_id, self.agent.name, self.agent.waiting_list\n )\n )\n", "repo_name": "0x1F602/simfleet", "sub_path": "simfleet/station.py", "file_name": "station.py", "file_ext": "py", "file_size_in_byte": 18130, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "spade.agent.Agent", "line_number": 34, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 69, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 69, "usage_type": "name"}, {"api_name": "spade.template.Template", "line_number": 73, "usage_type": "call"}, {"api_name": "protocol.REGISTER_PROTOCOL", "line_number": 74, "usage_type": "argument"}, {"api_name": "loguru.logger.warning", "line_number": 78, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 78, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 85, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 85, "usage_type": "name"}, {"api_name": "spade.template.Template", "line_number": 91, "usage_type": "call"}, {"api_name": "protocol.TRAVEL_PROTOCOL", "line_number": 92, "usage_type": "argument"}, {"api_name": "loguru.logger.warning", "line_number": 96, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 96, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 103, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 103, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 113, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 113, "usage_type": "name"}, {"api_name": "spade.template.Template", "line_number": 136, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 137, "usage_type": "argument"}, {"api_name": "helpers.random_position", "line_number": 170, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 172, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 172, "usage_type": "name"}, {"api_name": "utils.FREE_STATION", "line_number": 185, "usage_type": "name"}, {"api_name": "utils.BUSY_STATION", "line_number": 238, "usage_type": "argument"}, {"api_name": "loguru.logger.info", "line_number": 240, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 240, "usage_type": "name"}, {"api_name": "time.time", "line_number": 254, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 259, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 259, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 265, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 267, "usage_type": "argument"}, {"api_name": "protocol.ACCEPT_PERFORMATIVE", "line_number": 268, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 270, "usage_type": "call"}, {"api_name": "utils.FREE_STATION", "line_number": 277, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 282, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 282, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 283, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 284, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 284, "usage_type": "name"}, {"api_name": "spade.behaviour.TimeoutBehaviour", "line_number": 296, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 305, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 307, "usage_type": "argument"}, {"api_name": "protocol.INFORM_PERFORMATIVE", "line_number": 308, "usage_type": "argument"}, {"api_name": "utils.TRANSPORT_CHARGED", "line_number": 309, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 310, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 314, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 314, "usage_type": "name"}, {"api_name": "utils.CyclicBehaviour", "line_number": 320, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 322, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 322, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 331, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 331, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 344, "usage_type": "call"}, {"api_name": "protocol.REGISTER_PROTOCOL", "line_number": 346, "usage_type": "argument"}, {"api_name": "protocol.REQUEST_PERFORMATIVE", "line_number": 347, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 348, "usage_type": "call"}, {"api_name": "protocol.ACCEPT_PERFORMATIVE", "line_number": 358, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 360, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 360, "usage_type": "name"}, {"api_name": "asyncio.CancelledError", "line_number": 361, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 362, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 362, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 364, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 364, "usage_type": "name"}, {"api_name": "utils.CyclicBehaviour", "line_number": 371, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 379, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 379, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 386, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 388, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 388, "usage_type": "name"}, {"api_name": "utils.TRANSPORT_MOVING_TO_STATION", "line_number": 391, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 392, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 392, "usage_type": "name"}, {"api_name": "utils.TRANSPORT_IN_STATION_PLACE", "line_number": 397, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 398, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 398, "usage_type": "name"}, {"api_name": "asyncio.CancelledError", "line_number": 405, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 406, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 406, "usage_type": "name"}, {"api_name": "loguru.logger.error", "line_number": 408, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 408, "usage_type": "name"}, {"api_name": "utils.StrategyBehaviour", "line_number": 415, "usage_type": "name"}, {"api_name": "loguru.logger.debug", "line_number": 425, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 425, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 435, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 437, "usage_type": "argument"}, {"api_name": "protocol.INFORM_PERFORMATIVE", "line_number": 438, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 440, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 442, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 442, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 456, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 458, "usage_type": "argument"}, {"api_name": "protocol.REFUSE_PERFORMATIVE", "line_number": 459, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 461, "usage_type": "call"}, {"api_name": "loguru.logger.debug", "line_number": 464, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 464, "usage_type": "name"}, {"api_name": "protocol.CANCEL_PERFORMATIVE", "line_number": 476, "usage_type": "name"}, {"api_name": "loguru.logger.warning", "line_number": 477, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 477, "usage_type": "name"}, {"api_name": "protocol.ACCEPT_PERFORMATIVE", "line_number": 484, "usage_type": "name"}, {"api_name": "utils.FREE_STATION", "line_number": 486, "usage_type": "name"}, {"api_name": "loguru.logger.info", "line_number": 487, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 487, "usage_type": "name"}, {"api_name": "spade.message.Message", "line_number": 493, "usage_type": "call"}, {"api_name": "protocol.REQUEST_PROTOCOL", "line_number": 495, "usage_type": "argument"}, {"api_name": "protocol.ACCEPT_PERFORMATIVE", "line_number": 496, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 498, "usage_type": "call"}, {"api_name": "time.time", "line_number": 506, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 513, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 513, "usage_type": "name"}]} +{"seq_id": "23760238558", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 19 10:14:57 2018\n\nMonitor the residuals of flow variables during a run of COOLFluiD.\n\nOnce the log of residual has fallen enough, the CFL should be modified\naccording to CFL*=2 in the .inter file.\n\nMight-do list: \n * Play an alarm sound when the CFL should be changed. For this the necessary residual\n fall-off (for instance one order of magnitude) has to be specified\n * Automatically change the CFL parameter according to CFL *= 2 by overwriting\n the appropriate line .inter on the server\n\n@author: Peter Leitner\n\"\"\"\n\n\n#========================== SPECIFY RUNTIME PARAMETERS ==================================\n# Specify time to wait (in minutes) until the next check for an update:\nwaiting_time_for_updates = 0.2 # in minutes\n# Convergence file to be monitored on the server where the computation is running:\nmyPath = \"/users/cpa/peterl/COOLFluiD/OPENMPI/optim/apps/Solver\" \\\n \"/RESULTS_Corona_Restart/\"\nmyFn = \"convergence.plt-P0.FlowNamespace\"\n# SSH connection:\ncomputation = \"local\" # Choose either \"local\" or \"remote\"\nmyHost,myUser,myPassword,myPort = \"helium.esat.kuleuven.be\",\"?\",\"?\",22\n# Choose residual to monitor:\nres = \"T\" # \"rho\"\n# Save updated figure to eps and pdf format:\nsave_figs = False\n#========================================================================================\n\n\nimport matplotlib.pylab as plt\n# import StringManipulationTools as Tools # personal module of tools containing methods for string manipulation\n# ... No longer needed: function find_numbers_in_string included in order to make\n# the script portable\n\ndef find_numbers_in_string(a_string_containing_numbers):\n \"Find either integers, floats or numbers in scientific notation.\"\n import re\n numeric_const_pattern = r\"\"\"\n[-+]? # optional sign\n(?:\n(?: \\d* \\. \\d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc\n|\n(?: \\d+ \\.? ) # 1. 12. 123. etc 1 12 123 etc\n)\n# followed by optional exponent part if desired\n(?: [Ee] [+-]? \\d+ ) ?\n\"\"\"\n rx = re.compile(numeric_const_pattern, re.VERBOSE)\n listofnums = rx.findall(a_string_containing_numbers) # read the numbers out of the string\n return listofnums\n\n\n# Check whether directory for figure output exists. If not, the directory is created\nimport os\nif computation==\"local\":\n if not os.path.exists(myPath + \"Figures-convergence/\"):\n os.mkdir(myPath + \"Figures-convergence\")\nif computation==\"remote\":\n import paramiko\n transport = paramiko.Transport((myHost, 22))\n transport.connect(username=myUser,password=myPassword)\n sftp = paramiko.SFTPClient.from_transport(transport)\n try:\n sftp.chdir(myPath + \"Figures-convergence/\") # Test if remote_path exists\n except IOError:\n print(\"Path to save figures does not exist yet, I'm creating it for you.\")\n sftp.mkdir(myPath + \"Figures-convergence/\") # Create remote_path\n sftp.chdir(myPath + \"Figures-convergence/\")\n sftp.close()\n\n \nmyFile = myPath + myFn\n\n# Initialize the convergence parameter lists\niterations = []\n# Residuals\nBx_res = []; By_res = []; Bz_res = []\nEx_res = []; Ey_res = []; Ez_res = []\nrho_res = []\nvx_res = []; vy_res = []; vz_res = []\nT_res = []\nCFL = []\nPhysTime = []\nWallTime = []\n\nif computation == \"remote\":\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(myHost,myPort,myUser,myPassword)\n \n sftp = client.open_sftp() \n fileObject = sftp.file(myFile,\"rb\").readlines()\n sftp.close()\nelif computation == \"local\":\n fileObject = open(myFile,\"rb\").readlines()\n \n\nlenfileObject = len(fileObject) # Number of iterations already computed\n\nfor i in range(2,len(fileObject)): # 2 header lines\n nums = find_numbers_in_string(str(fileObject[i]))\n # There are 19 numbers in the string:\n # 0...number of iterations\n # 9...rho\n # 13...T\n # 14...CFL\n # 15...PhysTime\n # 17...WallTime\n iterations.append(int(nums[0]))\n Bx_res.append(float(nums[1])); By_res.append(float(nums[2])); Bz_res.append(float(nums[3]))\n Ex_res.append(float(nums[4])); Ey_res.append(float(nums[5])); Ez_res.append(float(nums[6]))\n rho_res.append(float(nums[9]))\n vx_res.append(float(nums[10])); vy_res.append(float(nums[11])); vz_res.append(float(nums[12]))\n T_res.append(float(nums[13]))\n CFL.append(int(nums[14]))\n PhysTime.append(float(nums[15]))\n WallTime.append(float(nums[17]))\n \nWallTime = list(map(lambda x: x/60**2, WallTime)) # convert to hours\n\n\n# Convert into list of floats - NO LONGER NEEDED\n# iterations = [int(i) for i in iterations]\n# rho_res = [float(i) for i in rho_res]\n# T_res = [float(i) for i in T_res]\n# CFL = [int(i) for i in CFL]\n# PhysTime = [float(i) for i in PhysTime]\n# WallTime = [float(i) for i in WallTime]\n\n\n# Generate graph that is updated later periodically\nplt.rc(\"text\", usetex=True)\nplt.rcParams[\"text.latex.preamble\"]=[r\"\\usepackage{amsmath}\"]\nplt.close(0)\nfig0, ax0 = plt.subplots(num=0)\nplt.title(\"COOLFluiD Convergence Monitoring\")\nif res==\"T\":\n lh0, = ax0.plot(iterations,T_res,\"+\",color=\"#1f77b4\")\n ax0.set_ylabel(r\"$\\log\\, T$ Residual\",color=\"#1f77b4\")\nelif res==\"rho\":\n lh0, = ax0.plot(iterations,rho_res,\"+\",color=\"#1f77b4\")\n ax0.set_ylabel(r\"$\\log\\, \\varrho$ Residual\",color=\"#1f77b4\") \nax0.set_xlabel(r\"Number of iterations\")\nax0.tick_params(\"y\",colors=\"#1f77b4\")\nax0.grid(True)\nax1 = ax0.twinx()\nlh1, = ax1.plot(iterations,CFL,\".\",color=\"#d62728\")\nax1.set_ylabel(r\"CFL\",color=\"#d62728\")\nax1.tick_params(\"y\",colors=\"#d62728\")\nplt.pause(1) # flush the figure\n\nplt.close(1)\nfig1, ax2 = plt.subplots(num=1)\nplt.title(\"COOLFluiD Convergence Monitoring\")\nlh2, = ax2.plot(iterations,Bx_res,\"+\",color=\"#ff7f0e\",label=r\"$\\log\\,B$ residual\")\nlh3, = ax2.plot(iterations,Ex_res,\"+\",color=\"#2ca02c\",label=r\"$\\log\\,E$ residual\")\nlh4, = ax2.plot(iterations,vx_res,\"+\",color=\"#7f7f7f\",label=r\"$\\log\\,U$ residual\")\nif res==\"T\":\n # Plot the residual that is not already monitored in Fig. 1\n lh5, = ax2.plot(iterations,rho_res,\"+\",color=\"#9467bd\",label=r\"$\\log\\,\\varrho$ residual\")\nelif res==\"rho\":\n lh5, = ax2.plot(iterations,T_res,\"+\",color=\"#9467bd\",label=r\"$\\log\\,T$ residual\")\nplt.legend(loc=1)\nax2.set_xlabel(r\"Number of iterations\")\nax2.tick_params(\"y\",colors=\"k\")\nax2.grid(True)\nax3 = ax2.twinx()\nlh6, = ax3.plot(iterations,CFL,\".\",color=\"#d62728\")\nax3.set_ylabel(r\"CFL\",color=\"#d62728\")\nax3.tick_params(\"y\",colors=\"#d62728\")\nplt.pause(1) # flush the figure\n\nplt.close(2)\nfig2, ax4 = plt.subplots(num=2)\nplt.title(\"COOLFluiD Runtime\")\nlh7, = ax4.plot(iterations,WallTime,\"+\",color=\"#1f77b4\")\nax4.set_xlabel(r\"Number of iterations\")\nax4.set_ylabel(r\"Wall time (hrs)\",color=\"#1f77b4\")\nax4.tick_params(\"y\",colors=\"#1f77b4\")\nax4.grid(True)\nax5 = ax4.twinx()\nlh8, = ax5.plot(iterations,CFL,\".\",color=\"#d62728\")\nax5.set_ylabel(r\"CFL\",color=\"#d62728\")\nax5.tick_params(\"y\",colors=\"#d62728\")\nplt.pause(1) # flush the figure\n\n\n# Check every waiting_time_for_update minute whether the plot should be updated\nimport time\nwhile True:\n time.sleep(waiting_time_for_updates*60) # check for update on the server\n print(\"Looking for an update...\")\n \n if computation == \"remote\":\n sftp = client.open_sftp()\n fileObject = sftp.file(myFile).readlines()\n sftp.close()\n elif computation == \"local\":\n fileObject = open(myFile,\"rb\").readlines()\n \n if len(fileObject) != lenfileObject:\n print(\"Got an update, I replot.\")\n nums = find_numbers_in_string(str(fileObject[len(fileObject)-1]))\n iterations.append(int(nums[0]))\n Bx_res.append(float(nums[1])); By_res.append(float(nums[2])); Bz_res.append(float(nums[3]))\n Ex_res.append(float(nums[4])); Ey_res.append(float(nums[5])); Ez_res.append(float(nums[6]))\n rho_res.append(float(nums[9]))\n vx_res.append(float(nums[10])); vy_res.append(float(nums[11])); vz_res.append(float(nums[12]))\n T_res.append(float(nums[13]))\n CFL.append(int(nums[14]))\n PhysTime.append(float(nums[15]))\n WallTime.append(float(nums[17])/60**2)\n\n # Replot\n lh0.set_xdata(iterations)\n lh2.set_xdata(iterations)\n if res==\"T\":\n lh0.set_ydata(T_res)\n lh5.set_xdata(iterations); lh5.set_ydata(rho_res)\n elif res==\"rho\":\n lh0.set_ydata(rho_res)\n lh5.set_xdata(iterations); lh5.set_ydata(T_res)\n lh1.set_xdata(iterations); lh1.set_ydata(CFL)\n lh2.set_xdata(iterations); lh2.set_ydata(Bx_res)\n lh3.set_xdata(iterations); lh3.set_ydata(Ex_res)\n lh4.set_xdata(iterations); lh4.set_ydata(vx_res)\n lh6.set_xdata(iterations); lh6.set_ydata(CFL)\n lh7.set_xdata(iterations); lh7.set_ydata(WallTime)\n lh8.set_xdata(iterations); lh8.set_ydata(CFL)\n ax0.relim(); ax0.autoscale_view(True,True,True)\n ax1.relim(); ax1.autoscale_view(True,True,True)\n ax2.relim(); ax2.autoscale_view(True,True,True)\n ax3.relim(); ax3.autoscale_view(True,True,True)\n ax4.relim(); ax4.autoscale_view(True,True,True)\n ax5.relim(); ax5.autoscale_view(True,True,True)\n plt.draw()\n plt.pause(1.)\n if save_figs:\n plt.savefig(myPath + \"Figures-convergence/varCFL-\" + res + \"conv.eps\",bbox_inches=\"tight\")\n plt.savefig(myPath + \"Figures-convergence/varCFL-\" + res + \"conv.pdf\",bbox_inches=\"tight\")\n\n lenfileObject = len(fileObject)\n \n", "repo_name": "andrealani/COOLFluiD", "sub_path": "tools/tecplot/convergence-monitoring.py", "file_name": "convergence-monitoring.py", "file_ext": "py", "file_size_in_byte": 9513, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 203, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.compile", "line_number": 56, "usage_type": "call"}, {"api_name": "re.VERBOSE", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 65, "usage_type": "call"}, {"api_name": "paramiko.Transport", "line_number": 68, "usage_type": "call"}, {"api_name": "paramiko.SFTPClient.from_transport", "line_number": 70, "usage_type": "call"}, {"api_name": "paramiko.SFTPClient", "line_number": 70, "usage_type": "attribute"}, {"api_name": "paramiko.SSHClient", "line_number": 95, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rc", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 142, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pylab.close", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pylab.subplots", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pylab.pause", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pylab.close", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pylab.subplots", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pylab.legend", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pylab.pause", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pylab.close", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pylab.subplots", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pylab.title", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pylab.pause", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 194, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pylab.draw", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pylab.pause", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 248, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 248, "usage_type": "name"}, {"api_name": "matplotlib.pylab.savefig", "line_number": 249, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 249, "usage_type": "name"}]} +{"seq_id": "43647914384", "text": "import logging\nimport operator\n\nimport sublime\n\nfrom . import cache, runners, settings, status, test_frameworks\nfrom .context import Context\nfrom .errors import EmptyHistory, Error, FrameworkNotFound, handle_errors\nfrom .history import History\nfrom .quick_panel_item import QuickPanelItem\nfrom .view_callbacks import ViewCallbacks\n\nSCOPE_LAST = \"last\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef _build_runner_quick_panel_item(runner):\n trigger = (\n runner.cmd\n if runner.scope == test_frameworks.TestFramework.SCOPE_SUITE\n else runner.relpath\n )\n if runner.scope == test_frameworks.TestFramework.SCOPE_LINE:\n trigger += \":{}\".format(runner.line)\n\n scope = \"modified\" if runner.modified else runner.scope\n\n return QuickPanelItem(\n trigger,\n \"[{}] in '{}' with {}\".format(scope, runner.dir, runner.name),\n runner.framework,\n )\n\n\nclass Plugin:\n @classmethod\n @handle_errors\n def show_last_output(cls, focus=True):\n History.current().last().show_output(focus=focus)\n\n @classmethod\n @handle_errors\n def edit_last(cls):\n runner = History.current().last()\n sublime.active_window().open_file(\n \"{}:{}\".format(runner.file, runner.line), sublime.ENCODED_POSITION\n )\n\n @classmethod\n @handle_errors\n def clear_history(cls):\n History.current().clear()\n\n def __init__(self, view):\n self.view = view\n\n @handle_errors\n def select_test_framework(self, scope, edit=False):\n cache.clear()\n\n items = sorted(\n test_frameworks.items(), key=operator.attrgetter(\"language\", \"framework\")\n )\n\n if len(items) == 1 and not settings.get(\"always_show_test_framework_selection\"):\n self.run_test(scope, edit, items[0])\n else:\n self.view.window().show_quick_panel(\n [QuickPanelItem(item.framework, \"\", item.language) for item in items],\n lambda index: index != -1 and self.run_test(scope, edit, items[index]),\n )\n\n @handle_errors\n def run_test(self, scope, edit=False, test_framework=None):\n cache.clear()\n\n try:\n runner = self.build_runner(scope, test_framework=test_framework)\n\n if edit and runner.editable:\n self.view.window().show_input_panel(\n \"Command\",\n runner.cmd,\n lambda cmd: self.process_runner(runner, cmd),\n lambda _: None,\n lambda: None,\n )\n elif edit:\n raise Error(\"Runner '{}' is not editable\".format(runner.name))\n else:\n self.process_runner(runner)\n except FrameworkNotFound as exc:\n if (\n settings.get(\"select_test_framework_when_not_found\")\n and scope != SCOPE_LAST\n ):\n status.update(\"Couldn't find a test framework, please select one\")\n self.view.run_command(\n \"any_test_run\", {\"scope\": scope, \"edit\": edit, \"select\": True}\n )\n else:\n raise exc\n\n @handle_errors\n def show_history(self):\n runners = list(History.current().runners)\n\n if not bool(runners):\n raise EmptyHistory\n\n self.view.window().show_quick_panel(\n [_build_runner_quick_panel_item(runner) for runner in runners],\n lambda index: index > -1 and self.process_runner(runners[index]),\n )\n\n def build_runner(self, scope, test_framework=None):\n if scope == SCOPE_LAST:\n return History.current().last()\n\n context = Context(self.view)\n if test_framework is None:\n test_framework = test_frameworks.find(context.file)\n\n runner = runners.find(test_framework)\n\n return runner.build(test_framework(context), scope)\n\n @handle_errors\n def process_runner(self, runner, cmd=\"\"):\n if runner is None:\n raise Error(\"Runner is not set\")\n\n if bool(cmd) and runner.cmd != cmd:\n runner = runner._replace(cmd=cmd, modified=True)\n\n ViewCallbacks(self.view).run()\n\n logger.debug(\"Running '%s' from '%s'\", runner.cmd, runner.dir)\n runner.run()\n History.current().add(runner)\n", "repo_name": "timfjord/AnyTest", "sub_path": "plugin/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4331, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "quick_panel_item.QuickPanelItem", "line_number": 29, "usage_type": "call"}, {"api_name": "history.History.current", "line_number": 40, "usage_type": "call"}, {"api_name": "history.History", "line_number": 40, "usage_type": "name"}, {"api_name": "errors.handle_errors", "line_number": 38, "usage_type": "name"}, {"api_name": "history.History.current", "line_number": 45, "usage_type": "call"}, {"api_name": "history.History", "line_number": 45, "usage_type": "name"}, {"api_name": "sublime.active_window", "line_number": 46, "usage_type": "call"}, {"api_name": "sublime.ENCODED_POSITION", "line_number": 47, "usage_type": "attribute"}, {"api_name": "errors.handle_errors", "line_number": 43, "usage_type": "name"}, {"api_name": "history.History.current", "line_number": 53, "usage_type": "call"}, {"api_name": "history.History", "line_number": 53, "usage_type": "name"}, {"api_name": "errors.handle_errors", "line_number": 51, "usage_type": "name"}, {"api_name": "operator.attrgetter", "line_number": 63, "usage_type": "call"}, {"api_name": "quick_panel_item.QuickPanelItem", "line_number": 70, "usage_type": "call"}, {"api_name": "errors.handle_errors", "line_number": 58, "usage_type": "name"}, {"api_name": "errors.Error", "line_number": 90, "usage_type": "call"}, {"api_name": "errors.FrameworkNotFound", "line_number": 93, "usage_type": "name"}, {"api_name": "errors.handle_errors", "line_number": 74, "usage_type": "name"}, {"api_name": "history.History.current", "line_number": 107, "usage_type": "call"}, {"api_name": "history.History", "line_number": 107, "usage_type": "name"}, {"api_name": "errors.EmptyHistory", "line_number": 110, "usage_type": "name"}, {"api_name": "errors.handle_errors", "line_number": 105, "usage_type": "name"}, {"api_name": "history.History.current", "line_number": 119, "usage_type": "call"}, {"api_name": "history.History", "line_number": 119, "usage_type": "name"}, {"api_name": "context.Context", "line_number": 121, "usage_type": "call"}, {"api_name": "context.file", "line_number": 123, "usage_type": "attribute"}, {"api_name": "errors.Error", "line_number": 132, "usage_type": "call"}, {"api_name": "view_callbacks.ViewCallbacks", "line_number": 137, "usage_type": "call"}, {"api_name": "history.History.current", "line_number": 141, "usage_type": "call"}, {"api_name": "history.History", "line_number": 141, "usage_type": "name"}, {"api_name": "errors.handle_errors", "line_number": 129, "usage_type": "name"}]} +{"seq_id": "709316721", "text": "\nimport os\nimport time\nfrom doit.action import CmdAction\nimport shutil\nimport pylab as pl\nimport pandas as pd\nimport numpy as np\nimport opensim as osm\n\nimport task\nimport utilities as util\nimport postprocessing as pp\n\n\nclass TaskCopyGenericModelFilesToResults(task.StudyTask):\n REGISTRY = []\n def __init__(self, study):\n super(TaskCopyGenericModelFilesToResults, self).__init__(study)\n self.name = '%s_copy_generic_model_files' % study.name\n self.doc = 'Copy generic model to the results directory.'\n self.add_action(\n [study.source_generic_model_fpath],\n [study.generic_model_fpath],\n self.copy_file)\n\n self.add_action(\n [study.source_reserve_actuators_fpath],\n [study.reserve_actuators_fpath],\n self.copy_file)\n\n if study.source_rra_actuators_fpath:\n self.add_action(\n [study.source_rra_actuators_fpath],\n [study.rra_actuators_fpath],\n self.copy_file)\n\n if study.source_cmc_actuators_fpath:\n self.add_action(\n [study.source_cmc_actuators_fpath],\n [study.cmc_actuators_fpath],\n self.copy_file)\n\n\nclass TaskCopyMotionCaptureData(task.StudyTask):\n \"\"\"This a very generic task for copying motion capture data (marker\n trajectories, ground reaction, electromyography) and putting it in\n place for creating simulations.\n\n You may want to create your own custom task(s) that is tailored to the\n organization of your experimental data.\n\n The other tasks expect an `expdata` folder in the condition folder (for\n treadmill trials) that contains `marker_trajectories.trc` and\n `ground_reaction.mot`.\n \n Task name: `_copy_data`\n \"\"\"\n REGISTRY = [] # TODO Find a way to make this unnecessary.\n def __init__(self, study, regex_replacements):\n \"\"\"Do not use this constructor directly; use `study.add_task()`.\n\n Parameters\n ----------\n study : \n This argument is provided internally by `study.add_task()`.\n regex_replacements : list of tuples\n Each tuple should have two elements: (a) the pattern to match with\n the path (relative to the motion capture data path) of any file\n within the `motion_capture_data_path`, and (b) the replacement that\n provides the path to where the file should be copied (relative to\n the `results_path`). The list contains as many of these tuples as\n you'd like. The regular expression replacements are performed with\n Python's `re.sub()`.\n\n Examples\n --------\n ```\n study.add_task(TaskCopyMotionCaptureData, [\n ('subject01/Data/Walk_100 02.trc',\n 'subject01/walk1/expdata/marker_trajectories.trc')])\n ```\n \"\"\"\n super(TaskCopyMotionCaptureData, self).__init__(study)\n self.name = '_'.join([study.name, 'copy_data'])\n self.doc = 'Copy and organize motion capture data.'\n self.regex_replacements = regex_replacements\n self.register_files()\n\n # self.add_action(self.registry.keys(), self.registry.values(),\n # self.copy_files)\n # May want to copy over files repeatedly during data processing,\n # so get rid of dependencies for now.\n self.actions += [self.copy_files]\n\n def register_files(self):\n # Keys are source file paths (file_dep), values are destination paths\n # (targets).\n self.registry = dict()\n mocap_dir = self.study.config['motion_capture_data_path']\n results_dir = self.study.config['results_path']\n # Use regular expressions to copy/rename files.\n import re\n\n # Check if each file in the mocap_dir matches any of the regular\n # expressions given to this task.\n for dirpath, dirnames, filenames in os.walk(mocap_dir):\n for fname in filenames:\n fpath = os.path.join(dirpath, fname)\n # Form path relative to the mocap directory.\n fpath_rel_to_mocap = os.path.relpath(fpath, mocap_dir)\n for pattern, replacement in self.regex_replacements:\n match = re.search(pattern, fpath_rel_to_mocap)\n if match != None:\n # Found at least one match.\n destination = os.path.join(results_dir, re.sub(pattern,\n replacement, fpath_rel_to_mocap))\n self.registry[fpath] = destination\n\n def copy_files(self):\n for source, destination in self.registry.items():\n fname = os.path.split(source)[1]\n to_dir = os.path.split(destination)[0]\n if not os.path.exists(to_dir): os.makedirs(to_dir)\n\n if os.path.exists(destination):\n overwriting = '(overwriting)'\n else:\n overwriting = ''\n\n print('%-30s -> %s %s' % (fname, to_dir, overwriting))\n\n import shutil\n shutil.copyfile(source, destination)\n\nclass TaskScaleSetup(task.SubjectTask):\n \"\"\"Create a setup file for the OpenSim Scale tool. You must place a\n template model markerset located at\n `templates/scale/prescale_markerset.xml`. This task creates a copy of this\n file for each subject, since you may need to tweak the markerset for\n individual subjects. The fields @STUDYNAME@ and @SUBJECTNAME@ in the\n template will be replaced by the correct values. You can find an example\n template in osimpipeline's templates directory.\n \n Task name: `subject_scale_setup`\n \"\"\"\n REGISTRY = []\n def __init__(self, subject, init_time, final_time,\n mocap_trial, edit_setup_function, addtl_file_dep=[]):\n \"\"\"\n Parameters\n ----------\n init_time : float\n The initial time from the motion capture trial at which to start\n averaging the marker frames.\n final_time : float\n The final time from the motion capture trial at which to stop\n averaging the marker frames.\n mocap_trial : study.Trial\n The Trial whose marker trajectories to use for both the model\n scaling and marker placer steps. This is ususally a static trial\n but it does not need to be.\n addtl_file_dep : list of str\n Any other files that should be added as file dependencies, changes\n to which would make this task out of date. Usually, you might\n specify the dodo file and whichever file contains the\n `edit_setup_function`.\n\n \"\"\"\n super(TaskScaleSetup, self).__init__(subject)\n self.subj_mass = self.subject.mass\n self.init_time = init_time\n self.final_time = final_time\n self.mocap_trial = mocap_trial\n self.name = '%s_scale_setup' % (self.subject.name)\n self.doc = \"Create a setup file for OpenSim's Scale Tool.\"\n self.edit_setup_function = edit_setup_function\n self.results_scale_path = os.path.join(\n self.study.config['results_path'], 'experiments',\n self.subject.rel_path, 'scale')\n self.output_model_fpath = os.path.join(\n self.study.config['results_path'], 'experiments',\n self.subject.rel_path, '%s.osim' % self.subject.name)\n self.output_motion_fpath = os.path.join(self.results_scale_path,\n '%s_%s_ik_solution.mot' % (self.study.name, self.subject.name))\n self.output_markerset_fpath = os.path.join(self.results_scale_path,\n '%s_%s_markerset.xml' % (self.study.name, self.subject.name))\n\n # setup.xml\n # ---------\n self.setup_fpath = os.path.join(self.results_scale_path, 'setup.xml')\n self.add_action(\n {'marker_traj':\n self.mocap_trial.marker_trajectories_fpath,\n 'generic_model': self.study.generic_model_fpath,\n },\n {'setup': self.setup_fpath\n },\n self.create_scale_setup)\n\n # MarkerSet for the Scale Tool.\n # -----------------------------\n self.source_scale_path = os.path.join(self.subject.rel_path, 'scale')\n self.prescale_template_fpath = 'templates/scale/prescale_markerset.xml'\n self.prescale_markerset_fname = '%s_prescale_markerset.xml' % (\n self.subject.name)\n self.source_prescale_markerset_fpath = os.path.join(\n self.source_scale_path, self.prescale_markerset_fname)\n self.results_prescale_markerset_fpath = os.path.join(\n self.results_scale_path, self.prescale_markerset_fname)\n if not os.path.exists(self.source_prescale_markerset_fpath):\n # The user does not yet have a markerset in place; fill out the\n # template.\n self.add_action(\n {'template': self.prescale_template_fpath},\n {'subjspecific': self.source_prescale_markerset_fpath},\n self.fill_prescale_markerset_template)\n self.actions.append((self.copy_file,\n [[self.source_prescale_markerset_fpath],\n [self.results_prescale_markerset_fpath]]))\n else:\n # We have already filled out the template prescale markerset,\n # and the user might have made changes to it.\n self.file_dep.append(self.source_prescale_markerset_fpath)\n self.add_action(\n [self.source_prescale_markerset_fpath],\n [self.results_prescale_markerset_fpath],\n self.copy_file)\n\n self.file_dep += addtl_file_dep\n\n def fill_prescale_markerset_template(self, file_dep, target):\n if not os.path.exists(target['subjspecific']):\n ft = open(file_dep['template'])\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@SUBJECTNAME@', self.subject.name)\n ft.close()\n if not os.path.exists(self.source_scale_path):\n os.makedirs(self.source_scale_path)\n f = open(target['subjspecific'], 'w')\n f.write(content)\n f.close()\n\n def create_scale_setup(self, file_dep, target):\n\n # EDIT THESE FIELDS IN PARTICULAR.\n # --------------------------------\n time_range = osm.ArrayDouble()\n time_range.append(self.init_time)\n time_range.append(self.final_time)\n\n tool = osm.ScaleTool()\n tool.setName('%s_%s' % (self.study.name, self.subject.name))\n tool.setSubjectMass(self.subject.mass)\n\n # GenericModelMaker\n # =================\n gmm = tool.getGenericModelMaker()\n gmm.setModelFileName(os.path.relpath(file_dep['generic_model'],\n self.results_scale_path))\n gmm.setMarkerSetFileName(os.path.relpath(self.prescale_markerset_fname))\n\n # ModelScaler\n # ===========\n scaler = tool.getModelScaler()\n scaler.setPreserveMassDist(True)\n marker_traj_rel_fpath = os.path.relpath(file_dep['marker_traj'],\n self.results_scale_path)\n scaler.setMarkerFileName(marker_traj_rel_fpath)\n\n scale_order_str = osm.ArrayStr()\n scale_order_str.append('manualScale')\n scale_order_str.append('measurements')\n scaler.setScalingOrder(scale_order_str)\n\n scaler.setTimeRange(time_range)\n\n mset = scaler.getMeasurementSet()\n\n # Manual scalings\n # ---------------\n sset = scaler.getScaleSet()\n\n # MarkerPlacer\n # ============\n placer = tool.getMarkerPlacer()\n placer.setStaticPoseFileName(marker_traj_rel_fpath)\n placer.setTimeRange(time_range)\n placer.setOutputModelFileName(os.path.relpath(\n self.output_model_fpath, self.results_scale_path))\n placer.setOutputMotionFileName(os.path.relpath(\n self.output_motion_fpath, self.results_scale_path))\n placer.setOutputMarkerFileName(os.path.relpath(\n self.output_markerset_fpath, self.results_scale_path))\n ikts = util.IKTaskSet(placer.getIKTaskSet())\n\n self.edit_setup_function(util, mset, sset, ikts)\n\n # Validate Scales\n # ===============\n model = osm.Model(file_dep['generic_model'])\n bset = model.getBodySet()\n for iscale in range(sset.getSize()):\n segment_name = sset.get(iscale).getSegmentName()\n if not bset.contains(segment_name):\n raise Exception(\"You specified a Scale for \"\n \"body %s but it's not in the model.\" % segment_name)\n\n if not os.path.exists(self.results_scale_path):\n os.makedirs(self.results_scale_path)\n tool.printToXML(target['setup'])\n \n\nclass TaskScale(task.SubjectTask):\n REGISTRY = []\n residual_actuators_template = 'templates/residual_actuators.xml'\n def __init__(self, subject, scale_setup_task, \n ignore_nonexistant_data=False,\n ):\n super(TaskScale, self).__init__(subject)\n self.name = '%s_scale' % (self.subject.name)\n self.doc = \"Run OpenSim's Scale Tool.\"\n self.ignore_nonexistant_data = ignore_nonexistant_data\n\n # file_dep\n # --------\n setup_fname = 'setup.xml'\n self.setup_fpath = scale_setup_task.setup_fpath\n self.generic_model_fpath = self.study.generic_model_fpath\n self.marker_trajectories_fpath = \\\n scale_setup_task.mocap_trial.marker_trajectories_fpath\n self.prescale_markerset_fpath = \\\n scale_setup_task.results_prescale_markerset_fpath\n self.file_dep = [\n self.setup_fpath,\n self.generic_model_fpath,\n self.prescale_markerset_fpath,\n self.marker_trajectories_fpath,\n self.residual_actuators_template,\n ]\n\n # actions\n # -------\n self.actions += [\n self.check_tasks,\n CmdAction(\n '\"' + os.path.join(self.study.config['opensim_home'],\n 'bin','scale') + '\" -S %s' % (setup_fname),\n cwd=scale_setup_task.results_scale_path),\n self.create_residual_actuators,\n ]\n\n # targets\n # -------\n self.output_model_fpath = scale_setup_task.output_model_fpath\n self.residual_actuators_fpath = os.path.join(\n self.study.config['results_path'], 'experiments',\n self.subject.rel_path, '%s_residual_actuators.xml' %\n self.subject.name)\n self.targets += [\n self.output_model_fpath,\n scale_setup_task.output_motion_fpath,\n scale_setup_task.output_markerset_fpath,\n self.residual_actuators_fpath,\n ]\n\n def check_tasks(self):\n \"\"\"Lists tasks that are 'd for markers that either\n don't exist in the model or are not in the TRC file.\n\n Also lists tasks for which there is data, but that are either not in\n the model or do not have an IK task.\n\n \"\"\"\n scale = osm.ScaleTool(self.setup_fpath)\n tasks = scale.getMarkerPlacer().getIKTaskSet()\n trc = util.TRCFile(self.marker_trajectories_fpath)\n trc_names = trc.marker_names\n model = osm.Model(self.generic_model_fpath)\n markerset = osm.MarkerSet(self.prescale_markerset_fpath)\n\n # Markers with IK tasks but without data.\n # ---------------------------------------\n markers_without_data = []\n for i in range(tasks.getSize()):\n task = tasks.get(i)\n name = task.getName()\n applied = task.getApply()\n\n if applied:\n if (not name in trc_names) or (not markerset.contains(name)):\n if task.getConcreteClassName() != 'IKCoordinateTask':\n markers_without_data.append(name)\n\n if markers_without_data != [] and not self.ignore_nonexistant_data:\n raise Exception('There are IK tasks for the following markers, '\n 'yet data does not exist for them: {}'.format(\n markers_without_data))\n del name\n\n # Markers for which there is data but they're not specified elsewhere.\n # --------------------------------------------------------------------\n unused_markers = []\n for name in trc.marker_names:\n if (not markerset.contains(name)) or (not tasks.contains(name)):\n unused_markers.append(name)\n if unused_markers != []:\n raise Exception(\"You have data for the following markers, but \"\n \"you are not using them in Scale's IK: {}\".format(\n unused_markers))\n\n # No data for these markers in the model or prescale markerset.\n # -------------------------------------------------------------\n excess_model_markers = []\n for im in range(markerset.getSize()):\n name = markerset.get(im).getName()\n if not tasks.contains(name):\n excess_model_markers.append(name)\n if excess_model_markers != []:\n raise Exception(\"The following model markers do not have tasks or \"\n \"experimental data: {}\".format(excess_model_markers))\n\n def create_residual_actuators(self):\n ft = open(self.residual_actuators_template)\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@SUBJECTNAME@', self.subject.name)\n\n def com_in_pelvis():\n import opensim\n m = opensim.Model(self.output_model_fpath)\n init_state = m.initSystem()\n com_in_ground = m.calcMassCenterPosition(init_state)\n com_in_pelvis = opensim.Vec3()\n simbody_engine = m.getSimbodyEngine()\n simbody_engine.transformPosition(init_state,\n m.getBodySet().get('ground'), com_in_ground,\n m.getBodySet().get('pelvis'), com_in_pelvis)\n com_xmeas = str(com_in_pelvis.get(0))\n com_ymeas = str(com_in_pelvis.get(1))\n com_zmeas = str(com_in_pelvis.get(2))\n return com_xmeas, com_ymeas, com_zmeas\n\n com_xmeas, com_ymeas, com_zmeas = com_in_pelvis()\n\n content = content.replace('@SYSTEM_COM_GLOBAL_X_MEAS@', com_xmeas)\n content = content.replace('@SYSTEM_COM_GLOBAL_Y_MEAS@', com_ymeas)\n content = content.replace('@SYSTEM_COM_GLOBAL_Z_MEAS@', com_zmeas)\n\n ft.close()\n\n f = open(self.residual_actuators_fpath, 'w')\n f.write(content)\n f.close()\n\n\nclass TaskGRFGaitLandmarks(task.TrialTask):\n # TODO not actually a trial task if for treadmill...\n REGISTRY = []\n def __init__(self, trial,\n right_grfy_column_name='ground_force_r_vy',\n left_grfy_column_name='ground_force_l_vy',\n threshold=5,\n **kwargs):\n super(TaskGRFGaitLandmarks, self).__init__(trial)\n self.name = '%s_gait_landmarks' % trial.id\n self.doc = 'Plot vertical ground reaction force.'\n self.right_grfy_column_name = right_grfy_column_name\n self.left_grfy_column_name = left_grfy_column_name\n self.kwargs = kwargs\n self.threshold = threshold\n self.add_action(\n [trial.ground_reaction_fpath],\n [os.path.join(trial.expdata_path, '..', '%s.pdf' % self.name)],\n self.save_gait_landmarks_fig)\n\n def save_gait_landmarks_fig(self, file_dep, target):\n util.gait_landmarks_from_grf(file_dep[0],\n right_grfy_column_name=self.right_grfy_column_name,\n left_grfy_column_name=self.left_grfy_column_name,\n threshold=self.threshold,\n do_plot=True,\n **self.kwargs)\n pl.gcf().savefig(target[0])\n\n\nclass TaskIKSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, **kwargs):\n super(TaskIKSetup, self).__init__('ik', trial, **kwargs)\n self.doc = 'Create a setup file for Inverse Kinematics.'\n self.solution_fpath = os.path.join(self.path, \n '%s_%s_ik_solution.mot' % (self.study.name, self.tricycle.id))\n self.model_markers_fpath = os.path.join(self.path, \n 'ik_model_marker_locations.sto')\n\n # Fill out tasks.xml template and copy over to results directory\n self.create_tasks_action()\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n def fill_setup_template(self, file_dep, target,\n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.path.relpath(self.subject.scaled_model_fpath, self.path))\n content = content.replace('@MARKER_FILE@',\n os.path.relpath(self.trial.marker_trajectories_fpath, \n self.path))\n content = content.replace('@TASKS@', os.path.relpath(\n self.results_tasks_fpath, self.path))\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n \n with open(target[0], 'w') as f:\n f.write(content)\n\n\nclass TaskIK(task.ToolTask):\n REGISTRY = []\n def __init__(self, trial, ik_setup_task, **kwargs):\n super(TaskIK, self).__init__(ik_setup_task, trial, **kwargs)\n self.doc = \"Run OpenSim's Inverse Kinematics tool.\"\n \n self.file_dep += [\n self.subject.scaled_model_fpath,\n ik_setup_task.results_tasks_fpath,\n ik_setup_task.results_setup_fpath\n ]\n self.targets += [\n ik_setup_task.solution_fpath,\n ik_setup_task.model_markers_fpath\n ]\n\n\nclass TaskIKPost(task.PostTask):\n REGISTRY=[]\n def __init__(self, trial, ik_setup_task, error_markers=None, side=None,\n **kwargs):\n super(TaskIKPost, self).__init__(ik_setup_task, trial, **kwargs)\n self.doc = 'Create plots from the results of Inverse Kinematics.'\n self.joint_angles_plotpath = '%s/joint_angles.pdf' % self.path\n self.marker_errors_plotpath = '%s/marker_error.pdf' % self.path\n self.error_markers = error_markers\n self.side = side\n\n self.add_action([ik_setup_task.solution_fpath],\n [self.joint_angles_plotpath],\n self.joint_angle_plots)\n\n if self.error_markers:\n self.add_action([self.subject.scaled_model_fpath, \n ik_setup_task.model_markers_fpath,\n self.trial.marker_trajectories_fpath],\n [self.marker_errors_plotpath],\n self.marker_error_plots)\n\n def joint_angle_plots(self, file_dep, target):\n # if os.path.exists(self.fig_fpath):\n # os.rename(self.fig_fpath,\n # self.fig_fpath.replace('.pdf', '_backup.pdf'))\n fig = pp.plot_lower_limb_kinematics(file_dep[0], self.gl, \n side=self.side)\n fig.savefig(target[0])\n pl.close(fig)\n\n def marker_error_plots(self, file_dep, target):\n # if os.path.exists(self.errorplot_fpath):\n # os.rename(self.errorplot_fpath,\n # self.errorplot_fpath.replace('.pdf', '_backup.pdf'))\n pp.plot_marker_error(target[0], self.error_markers, \n 10, self.gl, file_dep[0], file_dep[1], file_dep[2])\n\n \nclass TaskIDSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, ik_setup_task, **kwargs):\n super(TaskIDSetup, self).__init__('id', trial, **kwargs)\n self.doc = 'Create a setup file for Inverse Dynamics.'\n self.ik_setup_task = ik_setup_task\n self.rel_kinematics_fpath = os.path.relpath(\n ik_setup_task.solution_fpath, self.path)\n self.solution_fpath = os.path.join(\n self.path, 'results','%s_%s_id_solution.sto' % (\n self.study.name, trial.id))\n\n # Fill out external_loads.xml template and copy over to results \n # directory\n self.create_external_loads_action(self.rel_kinematics_fpath)\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n def fill_setup_template(self, file_dep, target,\n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.path.relpath(self.subject.scaled_model_fpath, self.path))\n content = content.replace('@COORDINATES_FILE@',\n self.rel_kinematics_fpath)\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n \n with open(target[0], 'w') as f:\n f.write(content)\n\nclass TaskID(task.ToolTask):\n REGISTRY = []\n def __init__(self, trial, id_setup_task, **kwargs):\n super(TaskID, self).__init__(id_setup_task, trial, **kwargs)\n self.doc = \"Run OpenSim's Inverse Dynamics tool.\"\n self.ik_setup_task = id_setup_task.ik_setup_task\n self.file_dep += [\n self.subject.scaled_model_fpath,\n id_setup_task.results_extloads_fpath,\n id_setup_task.results_setup_fpath,\n self.ik_setup_task.solution_fpath\n ]\n self.targets += [\n id_setup_task.solution_fpath\n ]\n\nclass TaskIDPost(task.PostTask):\n REGISTRY = []\n def __init__(self, trial, id_setup_task, **kwargs):\n super(TaskIDPost, self).__init__(id_setup_task, trial, **kwargs)\n self.doc = 'Create plots from the results of Inverse Dynamics.'\n self.trial = trial\n self.ik_setup_task = id_setup_task.ik_setup_task\n self.id_solution_fpath = id_setup_task.solution_fpath\n self.file_dep += [\n self.id_solution_fpath\n ]\n self.actions += [\n self.cycle_joint_torque_plots\n ]\n\n def cycle_joint_torque_plots(self):\n\n id_array = util.storage2numpy(self.id_solution_fpath)\n\n for cycle in self.trial.cycles:\n fname = 'joint_torques_cycle%02d.pdf' % cycle.num\n output_filepath = os.path.join(self.path, fname)\n\n pp.plot_gait_torques(output_filepath, id_array, \n self.trial.primary_leg, cycle.start, cycle.end,\n cycle.gl.right_strike, cycle.gl.left_strike, \n toeoff_time=cycle.gl.right_toeoff)\n\nclass TaskSOSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, ik_setup_task, **kwargs):\n super(TaskSOSetup, self).__init__('so', trial, **kwargs)\n self.doc = 'Create a setup file for Static Optimization.'\n self.ik_setup_task = ik_setup_task\n self.kinematics_fpath = ik_setup_task.solution_fpath\n self.rel_kinematics_fpath = os.path.relpath(\n self.kinematics_fpath, self.path)\n self.solution_fpath = os.path.join(\n self.path, 'results','%s_%s_so_StaticOptimization_activation.sto' % (\n self.study.name, self.tricycle.id))\n\n # Fill out external_loads.xml template and copy over to results \n # directory\n self.create_external_loads_action(self.rel_kinematics_fpath)\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n def fill_setup_template(self, file_dep, target,\n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.path.relpath(self.subject.scaled_model_fpath, self.path))\n content = content.replace('@COORDINATES_FILE@',\n self.rel_kinematics_fpath)\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n force_set_files = [\n os.path.relpath(\n os.path.join(self.study.config['results_path'],\n 'experiments',\n self.subject.rel_path, \n '%s_residual_actuators.xml' % self.subject.name),\n self.path),\n os.path.relpath(\n self.study.reserve_actuators_fpath, self.path),\n ]\n force_set_str = ' '.join(force_set_files)\n content = content.replace('@FORCE_SET_FILES@', force_set_str)\n \n with open(target[0], 'w') as f:\n f.write(content)\n\nclass TaskSO(task.ToolTask):\n REGISTRY = []\n def __init__(self, trial, so_setup_task, **kwargs):\n super(TaskSO, self).__init__(so_setup_task, trial, exec_name='analyze',\n **kwargs)\n self.doc = \"Run OpenSim's Static Optimization tool.\"\n\n self.file_dep += [\n self.subject.scaled_model_fpath,\n self.study.reserve_actuators_fpath,\n so_setup_task.kinematics_fpath,\n so_setup_task.results_extloads_fpath,\n ]\n self.targets += [\n so_setup_task.solution_fpath,\n ]\n\nclass TaskSOPost(task.PostTask):\n REGISTRY = []\n def __init__(self, trial, so_setup_task, **kwargs):\n super(TaskSOPost, self).__init__(so_setup_task, trial, **kwargs)\n self.doc = \"Create plots from the results of Static Optimization.\"\n self.cycle = so_setup_task.cycle\n\n # Generate muscle activations plots from SO\n self.add_action([so_setup_task.solution_fpath],\n [os.path.join(self.path, 'activations.pdf')],\n self.plot_activations)\n\n def plot_activations(self, file_dep, target):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n from copy import copy\n\n residuals = ['FX', 'FY', 'FZ', 'MX', 'MY', 'MZ']\n\n act = util.storage2numpy(file_dep[0])\n names = copy(residuals)\n # To help with plotting left and right actuators on same axes.\n for name in act.dtype.names:\n if name.endswith('_r'):\n names.append(name[:-2])\n n_subplots = len(names)\n n_cols = 6\n n_rows = n_subplots / n_cols + 1\n\n fig = plt.figure(figsize=(4.5 * n_cols, 4 * n_rows))\n grid = gridspec.GridSpec(n_rows, n_cols)\n i_row = 0\n i_col = 0\n\n for name in names:\n ax = plt.Subplot(fig, grid[i_row, i_col])\n fig.add_subplot(ax)\n\n if name in residuals:\n pp.plot_pgc(act['time'], act[name], self.cycle.gl,\n side=self.cycle.gl.primary_leg,\n axes=ax)\n else:\n for s in ['left', 'right']:\n pp.plot_pgc(act['time'], act['%s_%s' % (name, s[0])],\n self.cycle.gl, side=s, axes=ax)\n\n ax.set_title(name)\n ax.set_xlim(0, 100)\n\n if not (name in residuals or name.startswith('reserve_')):\n # This is a muscle; set the ylims to [0, 1].\n ax.set_ylim(0, 1)\n\n if i_col < (n_cols - 1):\n i_col += 1\n else:\n i_col = 0\n i_row += 1\n plt.tight_layout()\n fig.savefig(target[0])\n plt.close(fig)\n\nclass TaskRRAModelSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, adjust_body='torso', **kwargs):\n super(TaskRRAModelSetup, self).__init__('rramodel', trial, **kwargs)\n self.doc = \"Create a setup file for the Residual Reduction Algorithm, to create an adjusted model.\"\n self.adjust_body = adjust_body\n\n # Fill out external_loads.xml template and copy over to results \n # directory\n self.create_external_loads_action()\n\n # Fill out tasks.xml template and copy over to results directory\n self.create_tasks_action()\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n def fill_setup_template(self, file_dep, target,\n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.path.relpath(self.trial.model_to_adjust_fpath, self.path))\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n force_set_files = '%s %s' % (\n os.path.relpath(self.study.rra_actuators_fpath, self.path), \n os.path.relpath(self.subject.residual_actuators_fpath, \n self.path))\n content = content.replace('@FORCESETFILES@', force_set_files)\n content = content.replace('@ADJUSTCOMBODY@', self.adjust_body)\n # We always compute the mass change, but we just don't always USE\n # the resulting model.\n content = content.replace('@MODELADJUSTED@',\n self.adjusted_model)\n \n with open(target[0], 'w') as f:\n f.write(content)\n\nclass TaskRRAKinSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, **kwargs):\n super(TaskRRAKinSetup, self).__init__('rrakin', trial, **kwargs)\n self.doc = \"Create a setup file for the Residual Reduction Algorithm tool to adjust kinematics.\"\n\n # Fill out external_loads.xml template and copy over to results \n # directory\n self.create_external_loads_action()\n\n # Fill out tasks.xml template and copy over to results directory\n self.create_tasks_action()\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n def fill_setup_template(self, file_dep, target,\n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.join.relpath(self.adjusted_model_fpath, self.path))\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n force_set_files = '%s %s' % (\n os.path.relpath(self.study.rra_actuators_fpath, self.path), \n os.path.relpath(self.subject.residual_actuators_fpath, \n self.path))\n content = content.replace('@FORCESETFILES@', force_set_files)\n\n with open(target[0], 'w') as f:\n f.write(content)\n\nclass TaskRRA(task.ToolTask):\n REGISTRY = []\n def __init__(self, setup_task, trial, **kwargs):\n kwargs['exec_name'] = 'rra'\n super(TaskRRA, self).__init__(setup_task, trial, **kwargs)\n self.doc = \"Abstract class for OpenSim's RRA tool.\"\n self.des_kinematics_fpath = '%s/ik/%s_%s_ik_solution.mot' % (\n trial.results_exp_path, self.study.name, setup_task.tricycle.id)\n self.des_kinetics_fpath = \\\n '%s/expdata/ground_reaction_orig.mot' % trial.results_exp_path\n\n # Set file dependencies\n self.file_dep += [\n self.des_kinematics_fpath,\n self.des_kinetics_fpath,\n self.subject.residual_actuators_fpath,\n self.study.rra_actuators_fpath\n ]\n\n # Set targets for all RRA outputs\n for rra_output in ['Actuation_force.sto',\n 'Actuation_power.sto', 'Actuation_speed.sto',\n 'avgResiduals.txt', 'controls.sto', 'controls.xml',\n 'Kinematics_dudt.sto', 'Kinematics_q.sto', 'Kinematics_u.sto',\n 'pErr.sto', 'states.sto']:\n self.targets += ['%s/results/%s_%s_%s_%s' % (\n self.path, self.study.name, setup_task.tricycle.id,\n setup_task.tool, rra_output)]\n\nclass TaskRRAModel(TaskRRA):\n REGISTRY = []\n def __init__(self, trial, rramodel_setup_task, reenable_probes=False, \n **kwargs):\n super(TaskRRAModel, self).__init__(rramodel_setup_task, trial, \n **kwargs)\n self.doc = \"Run OpenSim's RRA tool to create an adjusted model.\"\n\n # Set common file dependencies\n self.file_dep += [\n trial.model_to_adjust_fpath,\n rramodel_setup_task.results_setup_fpath,\n rramodel_setup_task.results_tasks_fpath,\n rramodel_setup_task.results_extloads_fpath, \n ]\n\n self.targets += [rramodel_setup_task.adjusted_model_fpath]\n\n # Deal with the fact that this operation would otherwise overwrite\n # the valuable RRA log.\n cur_log = '%s/out.log' % self.path\n rra_log = '%s/out_rra.log' % self.path\n\n def copylog():\n if os.path.exists(cur_log): shutil.copyfile(cur_log, rra_log)\n def removelog():\n if os.path.exists(cur_log): os.remove(cur_log)\n def renamelog():\n if os.path.exists(rra_log): os.rename(rra_log, cur_log)\n self.actions += [\n copylog,\n removelog,\n renamelog,\n ]\n\n if reenable_probes:\n self.actions += [self.reenable_probes]\n\n def reenable_probes(self, rramodel_setup_task):\n # RRA disables the probes; let's re-enable them.\n import opensim\n m = opensim.Model(rramodel_setup_task.adjusted_model_fpath)\n util.enable_probes(rramodel_setup_task.adjusted_model_fpath)\n\nclass TaskRRAKin(TaskRRA):\n REGISTRY = []\n def __init__(self, trial, rrakin_setup_task, **kwargs):\n super(TaskRRAKin, self).__init__(rrakin_setup_task, trial, **kwargs)\n self.doc = \"Run OpenSim's RRA tool to adjust model kinematics\"\n \n # Set file dependencies\n self.file_dep += [\n rrakin_setup_task.adjusted_model_fpath,\n rrakin_setup_task.results_setup_fpath,\n rrakin_setup_task.results_tasks_fpath,\n rrakin_setup_task.results_extloads_fpath\n ]\n\nclass TaskCMCSetup(task.SetupTask):\n REGISTRY = []\n def __init__(self, trial, des_kinematics='rrakin', \n control_constraints=None, **kwargs):\n super(TaskCMCSetup, self).__init__('cmc', trial, **kwargs)\n self.doc = \"Create a setup file for Computed Muscle Control.\"\n self.des_kinematics = des_kinematics\n self.control_constraints = control_constraints\n\n # Set desired kinematics path\n if self.des_kinematics=='rrakin':\n self.des_kinematics_fpath = os.path.join(trial.results_exp_path, \n 'rrakin', 'results',\n '%s_%s_rrakin_Kinematics_q.sto' % (self.study.name, \n self.tricycle.id))\n elif self.des_kinematics=='ik':\n self.des_kinematics_fpath = os.path.join(trial.results_exp_path,\n 'ik', '%s_%s_ik_solution.mot' % (self.study.name, \n self.tricycle.id))\n else:\n raise Exception(\"TaskCMCSetup: %s is not a valid kinematics task \"\n \"source, please choose 'rrakin' or 'ik'.\" % des_kinematics)\n \n # Set control constraints path (if any)\n if self.control_constraints:\n raise Exception(\"TaskCMCSetup: control constraints have yet to be \"\n \"implemented.\")\n else:\n self.control_constraints_fpath = ''\n\n # TODO\n # self.coord_act_fpath\n\n # Fill out external_loads.xml template and copy over to results \n # directory\n self.create_external_loads_action()\n\n # Fill out tasks.xml template and copy over to results directory\n self.create_tasks_action()\n\n # Fill out setup.xml template and write to results directory\n self.create_setup_action()\n\n # Override derived action method since different desired kinematics\n # may be specified \n def fill_external_loads_template(self, file_dep, target):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@DESKINEMATICS@', \n os.join.relpath(self.des_kinematics_fpath, self.path))\n\n with open(target[0], 'w') as f:\n f.write(content)\n\n def fill_setup_template(self, file_dep, target, \n init_time=None, final_time=None):\n with open(file_dep[0]) as ft:\n content = ft.read()\n content = content.replace('@STUDYNAME@', self.study.name)\n content = content.replace('@NAME@', self.tricycle.id)\n content = content.replace('@MODEL@', \n os.path.relpath(self.adjusted_model_fpath, self.path))\n content = content.replace('@INIT_TIME@', '%.4f' % init_time)\n content = content.replace('@FINAL_TIME@', '%.4f' % final_time)\n force_set_files = '%s %s' % (\n os.path.relpath(self.study.cmc_actuators_fpath, self.path), \n os.path.relpath(self.subject.residual_actuators_fpath, \n self.path))\n content = content.replace('@FORCESETFILES@', force_set_files)\n content = content.replace('@DESKINEMATICS@',\n os.path.relpath(self.des_kinematics_fpath, self.path))\n content = content.replace('@CONTROLCONSTRAINTS@', \n os.path.relpath(self.control_constraints_fpath, self.path))\n \n with open(target[0], 'w') as f:\n f.write(content)\n\nclass TaskCMC(task.ToolTask):\n REGISTRY = []\n def __init__(self, trial, cmc_setup_task, **kwargs):\n super(TaskCMC, self).__init__(cmc_setup_task, trial, **kwargs)\n self.doc = \"Run OpenSim's Computed Muscle Control tool.\"\n self.des_kinetics_fpath = \\\n '%s/expdata/ground_reaction_orig.mot' % trial.results_exp_path\n\n self.file_dep += [\n cmc_setup_task.adjusted_model_fpath,\n cmc_setup_task.results_setup_fpath,\n cmc_setup_task.results_extloads_fpath,\n cmc_setup_task.results_tasks_fpath,\n cmc_setup_task.des_kinematics_fpath,\n self.des_kinetics_fpath,\n self.subject.residual_actuators_fpath,\n self.study.cmc_actuators_fpath\n ]\n\n if cmc_setup_task.control_constraints:\n self.file_dep += [self.control_contraints_fpath]\n\n # Set targets for all CMC outputs\n for cmc_output in ['Actuation_force.sto',\n 'Actuation_power.sto', 'Actuation_speed.sto',\n 'avgResiduals.txt', 'controls.sto', 'controls.xml',\n 'Kinematics_dudt.sto', 'Kinematics_q.sto', 'Kinematics_u.sto',\n 'pErr.sto', 'states.sto']:\n\n self.targets += ['%s/results/%s_%s_%s_%s' % (\n self.path, self.study.name, cmc_setup_task.tricycle.id, \n 'cmc', cmc_output)]", "repo_name": "stanfordnmbl/osimpipeline", "sub_path": "osimpipeline/vital_tasks.py", "file_name": "vital_tasks.py", "file_ext": "py", "file_size_in_byte": 44511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "task.StudyTask", "line_number": 16, "usage_type": "attribute"}, {"api_name": "task.StudyTask", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 134, "usage_type": "call"}, {"api_name": "task.SubjectTask", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path", "line_number": 233, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 239, "usage_type": "call"}, {"api_name": "os.path", "line_number": 239, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 240, "usage_type": "call"}, {"api_name": "opensim.ArrayDouble", "line_number": 249, "usage_type": "call"}, {"api_name": "opensim.ScaleTool", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 268, "usage_type": "call"}, {"api_name": "os.path", "line_number": 268, "usage_type": "attribute"}, {"api_name": "opensim.ArrayStr", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "utilities.IKTaskSet", "line_number": 296, "usage_type": "call"}, {"api_name": "opensim.Model", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path", "line_number": 310, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 311, "usage_type": "call"}, {"api_name": "task.SubjectTask", "line_number": 315, "usage_type": "attribute"}, {"api_name": "doit.action.CmdAction", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path", "line_number": 348, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 357, "usage_type": "call"}, {"api_name": "os.path", "line_number": 357, "usage_type": "attribute"}, {"api_name": "opensim.ScaleTool", "line_number": 376, "usage_type": "call"}, {"api_name": "utilities.TRCFile", "line_number": 378, "usage_type": "call"}, {"api_name": "opensim.Model", "line_number": 380, "usage_type": "call"}, {"api_name": "opensim.MarkerSet", "line_number": 381, "usage_type": "call"}, {"api_name": "task.getName", "line_number": 388, "usage_type": "call"}, {"api_name": "task.getApply", "line_number": 389, "usage_type": "call"}, {"api_name": "task.getConcreteClassName", "line_number": 393, "usage_type": "call"}, {"api_name": "opensim.Model", "line_number": 432, "usage_type": "call"}, {"api_name": "opensim.Vec3", "line_number": 435, "usage_type": "call"}, {"api_name": "task.TrialTask", "line_number": 458, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 475, "usage_type": "call"}, {"api_name": "os.path", "line_number": 475, "usage_type": "attribute"}, {"api_name": "utilities.gait_landmarks_from_grf", "line_number": 479, "usage_type": "call"}, {"api_name": "pylab.gcf", "line_number": 485, "usage_type": "call"}, {"api_name": "task.SetupTask", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 493, "usage_type": "call"}, {"api_name": "os.path", "line_number": 493, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 495, "usage_type": "call"}, {"api_name": "os.path", "line_number": 495, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 511, "usage_type": "call"}, {"api_name": "os.path", "line_number": 511, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 513, "usage_type": "call"}, {"api_name": "os.path", "line_number": 513, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 515, "usage_type": "call"}, {"api_name": "os.path", "line_number": 515, "usage_type": "attribute"}, {"api_name": "task.ToolTask", "line_number": 524, "usage_type": "attribute"}, {"api_name": "task.PostTask", "line_number": 541, "usage_type": "attribute"}, {"api_name": "postprocessing.plot_lower_limb_kinematics", "line_number": 567, "usage_type": "call"}, {"api_name": "pylab.close", "line_number": 570, "usage_type": "call"}, {"api_name": "postprocessing.plot_marker_error", "line_number": 576, "usage_type": "call"}, {"api_name": "task.SetupTask", "line_number": 580, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path", "line_number": 586, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 588, "usage_type": "call"}, {"api_name": "os.path", "line_number": 588, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 606, "usage_type": "call"}, {"api_name": "os.path", "line_number": 606, "usage_type": "attribute"}, {"api_name": "task.ToolTask", "line_number": 615, "usage_type": "attribute"}, {"api_name": "task.PostTask", "line_number": 631, "usage_type": "attribute"}, {"api_name": "utilities.storage2numpy", "line_number": 648, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 652, "usage_type": "call"}, {"api_name": "os.path", "line_number": 652, "usage_type": "attribute"}, {"api_name": "postprocessing.plot_gait_torques", "line_number": 654, "usage_type": "call"}, {"api_name": "task.SetupTask", "line_number": 659, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 666, "usage_type": "call"}, {"api_name": "os.path", "line_number": 666, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 668, "usage_type": "call"}, {"api_name": "os.path", "line_number": 668, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 686, "usage_type": "call"}, {"api_name": "os.path", "line_number": 686, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 692, "usage_type": "call"}, {"api_name": "os.path", "line_number": 692, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 693, "usage_type": "call"}, {"api_name": "os.path", "line_number": 693, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 698, "usage_type": "call"}, {"api_name": "os.path", "line_number": 698, "usage_type": "attribute"}, {"api_name": "task.ToolTask", "line_number": 707, "usage_type": "attribute"}, {"api_name": "task.PostTask", "line_number": 724, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 733, "usage_type": "call"}, {"api_name": "os.path", "line_number": 733, "usage_type": "attribute"}, {"api_name": "utilities.storage2numpy", "line_number": 743, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 744, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 753, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 753, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 754, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 754, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Subplot", "line_number": 759, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 759, "usage_type": "name"}, {"api_name": "postprocessing.plot_pgc", "line_number": 763, "usage_type": "call"}, {"api_name": "postprocessing.plot_pgc", "line_number": 768, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 783, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 783, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 785, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 785, "usage_type": "name"}, {"api_name": "task.SetupTask", "line_number": 787, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 811, "usage_type": "call"}, {"api_name": "os.path", "line_number": 811, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 815, "usage_type": "call"}, {"api_name": "os.path", "line_number": 815, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 816, "usage_type": "call"}, {"api_name": "os.path", "line_number": 816, "usage_type": "attribute"}, {"api_name": "task.SetupTask", "line_number": 828, "usage_type": "attribute"}, {"api_name": "os.join.relpath", "line_number": 851, "usage_type": "call"}, {"api_name": "os.join", "line_number": 851, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 855, "usage_type": "call"}, {"api_name": "os.path", "line_number": 855, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 856, "usage_type": "call"}, {"api_name": "os.path", "line_number": 856, "usage_type": "attribute"}, {"api_name": "task.ToolTask", "line_number": 863, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 916, "usage_type": "call"}, {"api_name": "os.path", "line_number": 916, "usage_type": "attribute"}, {"api_name": "shutil.copyfile", "line_number": 916, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 918, "usage_type": "call"}, {"api_name": "os.path", "line_number": 918, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 918, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 920, "usage_type": "call"}, {"api_name": "os.path", "line_number": 920, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 920, "usage_type": "call"}, {"api_name": "opensim.Model", "line_number": 933, "usage_type": "call"}, {"api_name": "utilities.enable_probes", "line_number": 934, "usage_type": "call"}, {"api_name": "task.SetupTask", "line_number": 950, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 961, "usage_type": "call"}, {"api_name": "os.path", "line_number": 961, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 966, "usage_type": "call"}, {"api_name": "os.path", "line_number": 966, "usage_type": "attribute"}, {"api_name": "os.join.relpath", "line_number": 1001, "usage_type": "call"}, {"api_name": "os.join", "line_number": 1001, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 1013, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1013, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 1017, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1017, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 1018, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1018, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 1022, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1022, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 1024, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1024, "usage_type": "attribute"}, {"api_name": "task.ToolTask", "line_number": 1029, "usage_type": "attribute"}]} +{"seq_id": "11726437502", "text": "from django.urls import path\nfrom api.views \\\nimport student, curriculum, \\\ncourse, university, section, \\\ngrade_stats, feedback, grade_stats, \\\ndepartment, staff_member, utils\n\n\nurlpatterns = [\n path('login', student.StudentLogin.as_view()),\n path('students', student.StudentList.as_view()),\n path('students/', student.StudentDetail.as_view()),\n path('students//sections/', section.EnrollStudent.as_view()),\n path('send-activation-email', student.send_activation_email),\n path('departments', department.DepartmentList.as_view()),\n path('instructors/', staff_member.StaffMemberDetail.as_view()),\n \n path('universities', university.UniversityList.as_view()),\n path('universities/', university.UniversityDetail.as_view()),\n path('universities//curriculums', curriculum.CurriculumList.as_view()),\n path('universities//courses', course.CourseList.as_view()),\n path('universities//sections-terms', section.get_sections_terms_by_university),\n path('universities//departments', department.get_departments_by_university),\n path('universities//instructors', staff_member.StaffMemberList.as_view()),\n\n path('departments//courses', course.get_courses_by_department_id),\n path('courses', course.CourseList.as_view()),\n path('courses/', course.CourseDetail.as_view()),\n\n path('sections', section.SectionList.as_view()),\n path('sections/', section.SectionDetail.as_view()),\n path('sections//feedbacks', feedback.FeedbackList.as_view()),\n path('sections//grade-stats', grade_stats.GradeStatsDetail.as_view()),\n\n path('loaderio-2cfac6fc22e5d24034475d0fcb53e6e1/', utils.get_loader_io),\n path('feedbacks/', feedback.FeedbackDetail.as_view()),\n path('students//feedbacks//like', feedback.FeedbackLike.as_view()),\n path('students//feedbacks//report', feedback.report_feedback),\n\n\n path('curriculums/', curriculum.CurriculumDetail.as_view()),\n path('curriculums', curriculum.CurriculumList.as_view()),\n path('activate-user//', student.activate_student, name='activate')\n]\n", "repo_name": "felixdasta/MyUniPerformance", "sub_path": "backend/api/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "api.views.student.StudentLogin.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "api.views.student.StudentLogin", "line_number": 10, "usage_type": "attribute"}, {"api_name": "api.views.student", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "api.views.student.StudentList.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "api.views.student.StudentList", "line_number": 11, "usage_type": "attribute"}, {"api_name": "api.views.student", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "api.views.student.StudentDetail.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "api.views.student.StudentDetail", "line_number": 12, "usage_type": "attribute"}, {"api_name": "api.views.student", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "api.views.section.EnrollStudent.as_view", "line_number": 13, "usage_type": "call"}, {"api_name": "api.views.section.EnrollStudent", "line_number": 13, "usage_type": "attribute"}, {"api_name": "api.views.section", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "api.views.student.send_activation_email", "line_number": 14, "usage_type": "attribute"}, {"api_name": "api.views.student", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "api.views.department.DepartmentList.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "api.views.department.DepartmentList", "line_number": 15, "usage_type": "attribute"}, {"api_name": "api.views.department", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "api.views.staff_member.StaffMemberDetail.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "api.views.staff_member.StaffMemberDetail", "line_number": 16, "usage_type": "attribute"}, {"api_name": "api.views.staff_member", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "api.views.university.UniversityList.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "api.views.university.UniversityList", "line_number": 18, "usage_type": "attribute"}, {"api_name": "api.views.university", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "api.views.university.UniversityDetail.as_view", "line_number": 19, "usage_type": "call"}, {"api_name": "api.views.university.UniversityDetail", "line_number": 19, "usage_type": "attribute"}, {"api_name": "api.views.university", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumList.as_view", "line_number": 20, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumList", "line_number": 20, "usage_type": "attribute"}, {"api_name": "api.views.curriculum", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "api.views.course.CourseList.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "api.views.course.CourseList", "line_number": 21, "usage_type": "attribute"}, {"api_name": "api.views.course", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "api.views.section.get_sections_terms_by_university", "line_number": 22, "usage_type": "attribute"}, {"api_name": "api.views.section", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "api.views.department.get_departments_by_university", "line_number": 23, "usage_type": "attribute"}, {"api_name": "api.views.department", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "api.views.staff_member.StaffMemberList.as_view", "line_number": 24, "usage_type": "call"}, {"api_name": "api.views.staff_member.StaffMemberList", "line_number": 24, "usage_type": "attribute"}, {"api_name": "api.views.staff_member", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "api.views.course.get_courses_by_department_id", "line_number": 26, "usage_type": "attribute"}, {"api_name": "api.views.course", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "api.views.course.CourseList.as_view", "line_number": 27, "usage_type": "call"}, {"api_name": "api.views.course.CourseList", "line_number": 27, "usage_type": "attribute"}, {"api_name": "api.views.course", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.course.CourseDetail.as_view", "line_number": 28, "usage_type": "call"}, {"api_name": "api.views.course.CourseDetail", "line_number": 28, "usage_type": "attribute"}, {"api_name": "api.views.course", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "api.views.section.SectionList.as_view", "line_number": 30, "usage_type": "call"}, {"api_name": "api.views.section.SectionList", "line_number": 30, "usage_type": "attribute"}, {"api_name": "api.views.section", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "api.views.section.SectionDetail.as_view", "line_number": 31, "usage_type": "call"}, {"api_name": "api.views.section.SectionDetail", "line_number": 31, "usage_type": "attribute"}, {"api_name": "api.views.section", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackList.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackList", "line_number": 32, "usage_type": "attribute"}, {"api_name": "api.views.feedback", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "api.views.grade_stats.GradeStatsDetail.as_view", "line_number": 33, "usage_type": "call"}, {"api_name": "api.views.grade_stats.GradeStatsDetail", "line_number": 33, "usage_type": "attribute"}, {"api_name": "api.views.grade_stats", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "api.views.utils.get_loader_io", "line_number": 35, "usage_type": "attribute"}, {"api_name": "api.views.utils", "line_number": 35, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackDetail.as_view", "line_number": 36, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackDetail", "line_number": 36, "usage_type": "attribute"}, {"api_name": "api.views.feedback", "line_number": 36, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackLike.as_view", "line_number": 37, "usage_type": "call"}, {"api_name": "api.views.feedback.FeedbackLike", "line_number": 37, "usage_type": "attribute"}, {"api_name": "api.views.feedback", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 38, "usage_type": "call"}, {"api_name": "api.views.feedback.report_feedback", "line_number": 38, "usage_type": "attribute"}, {"api_name": "api.views.feedback", "line_number": 38, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumDetail.as_view", "line_number": 41, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumDetail", "line_number": 41, "usage_type": "attribute"}, {"api_name": "api.views.curriculum", "line_number": 41, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumList.as_view", "line_number": 42, "usage_type": "call"}, {"api_name": "api.views.curriculum.CurriculumList", "line_number": 42, "usage_type": "attribute"}, {"api_name": "api.views.curriculum", "line_number": 42, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "api.views.student.activate_student", "line_number": 43, "usage_type": "attribute"}, {"api_name": "api.views.student", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "23463208958", "text": "import os\nimport json\nimport base64\nimport requests\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom svglib.svglib import svg2rlg\nfrom reportlab.graphics import renderPM\n\n\ndef check_output_folder(folder):\n if os.path.exists(folder) is False:\n os.makedirs(folder)\n\n\ndef save_to_file(filename, data):\n with open(filename, \"w\") as f:\n f.write(json.dumps(data, indent=4))\n\n\ndef save_to_json(func):\n def wrapped(obj, *args, **kwargs):\n obj_name = obj.__class__.__name__\n func_name = func.__name__\n\n result = func(obj, *args, **kwargs)\n\n check_output_folder(f\"results/{obj_name}\")\n save_to_file(f\"results/{obj_name}/{func_name}.json\", result)\n\n return result\n return wrapped\n\n\ndef get_pokemon_sprite(sprite_name, shiny=False):\n check_output_folder(f\"sprites/pokemon/shiny\")\n\n is_shiny = \"-shiny\" if shiny else \"\"\n url = f\"https://dev.bframework.de/static/pokedex/sprites/front{is_shiny}/{sprite_name}.gif\"\n\n res = requests.get(url)\n content = res.content\n\n shiny_prefix = \"shiny/\" if shiny else \"\"\n file_path = f\"sprites/pokemon/{shiny_prefix}{sprite_name}.gif\"\n\n with open(file_path, \"wb\") as o:\n o.write(content)\n\n return open(file_path, \"rb\")\n\n\ndef get_sprite(sprite_type, sprite_name, shiny=False):\n try:\n if sprite_type == \"pokemon\":\n return get_pokemon_sprite(sprite_name, shiny)\n elif sprite_type == \"streamer\":\n return get_streamer_avatar(sprite_name)\n return get_item_sprite(sprite_type, sprite_name)\n except:\n pass\n return None\n\n\ndef get_streamer_avatar(streamer):\n check_output_folder(f\"sprites/avatars\")\n file_path = f\"sprites/avatars/{streamer}.png\"\n\n if os.path.isfile(file_path):\n return open(file_path, \"rb\")\n\n res = requests.get(f\"https://www.twitch.tv/{streamer}\")\n soup = BeautifulSoup(res.text, \"html.parser\")\n avatar = soup.find(\"meta\", {\"name\": \"twitter:image\"})\n\n res = requests.get(avatar[\"content\"])\n with open(file_path, \"wb\") as o:\n o.write(res.content)\n\n im = Image.open(file_path)\n im = im.resize((64, 64))\n im.save(file_path)\n\n return open(file_path, \"rb\")\n\n\ndef get_item_sprite(sprite_type, sprite_name):\n check_output_folder(f\"sprites/{sprite_type}\")\n\n file_path = f\"sprites/{sprite_type}/{sprite_name}\"\n\n if os.path.isfile(file_path + \".png\"):\n return open(file_path + \".png\", \"rb\")\n\n url = f\"https://poketwitch.bframework.de/static/twitchextension/items/{sprite_type}/{sprite_name}\"\n try:\n\n res = requests.get(url + \".svg\")\n\n soup = BeautifulSoup(res.text, \"html.parser\")\n svg = soup.find(\"image\")\n\n if svg is None:\n with open(file_path + \".svg\", \"wb\") as o:\n o.write(res.content)\n\n drawing = svg2rlg(file_path + \".svg\")\n renderPM.drawToFile(drawing, file_path + \".png\", fmt=\"PNG\")\n\n else:\n href = svg[\"href\"]\n\n s = href.split(\"base64,\")[1]\n\n img_data = s.encode()\n content = base64.b64decode(img_data)\n with open(file_path + \".png\", \"wb\") as o:\n o.write(content)\n except:\n url = url + \".png\"\n\n res = requests.get(url)\n content = res.content\n\n if res.status_code != 200:\n return None\n\n with open(file_path + \".png\", \"wb\") as o:\n o.write(content)\n\n file_path = file_path + \".png\"\n\n if sprite_type != \"pokemon\":\n im = Image.open(file_path)\n im = im.resize((64, 64))\n im.save(file_path)\n\n return open(file_path, \"rb\")\n", "repo_name": "Dominic-Santos/twitch_bot", "sub_path": "TwitchChannelPointsMinerLocal/classes/entities/Pokemon/Utils.py", "file_name": "Utils.py", "file_ext": "py", "file_size_in_byte": 3627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 73, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 80, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 98, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 100, "usage_type": "call"}, {"api_name": "svglib.svglib.svg2rlg", "line_number": 107, "usage_type": "call"}, {"api_name": "reportlab.graphics.renderPM.drawToFile", "line_number": 108, "usage_type": "call"}, {"api_name": "reportlab.graphics.renderPM", "line_number": 108, "usage_type": "name"}, {"api_name": "base64.b64decode", "line_number": 116, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 134, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 134, "usage_type": "name"}]} +{"seq_id": "35415847265", "text": "from flask import Flask, render_template, flash, request, session, redirect, url_for\nfrom pony.orm import Database, Optional, Required, PrimaryKey, db_session, sql_debug, select\nimport datetime as dt\nfrom pathlib import Path\n#import statistics as st\nimport os\nimport pony\nimport pprint\nimport logging\nfrom logging.handlers import RotatingFileHandler\nfrom forms import DataEntryForm, SelectReadingForm, EditReadingForm\nfrom functions import decimalAverage\nfrom collections import namedtuple\n\n# App config.\n##DEBUG = True\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = os.urandom(512)\n\n@app.errorhandler(405)\ndef methodNotAllowed(e):\n f = request.full_path.split('?', 1)[0]\n return render_template('405.jinja2', **locals()), 405\n\ndbFile = \"/home/bill/glucose2/glucose.db\"\ndbPath = Path(dbFile)\ndb = Database()\n\nclass Readings(db.Entity):\n date = PrimaryKey(str) #(dt.datetime)\n average = Optional(float)\n comment = Optional(str)\n hold = Optional(float)\n\ndb.bind(provider='sqlite', filename=str(dbPath), create_db=False)\ndb.generate_mapping(create_tables=False)\n\n@app.route(\"/\", methods=['GET'])\ndef home():\n flash(dbPath)\n with db_session:\n numberOfHeldReadings = len(Readings.select(lambda c: c.hold is not None))\n if numberOfHeldReadings == 0:\n flash('There are no partial readings.')\n elif numberOfHeldReadings == 1:\n flash('There is one partial reading.')\n else:\n flash(f'There are {numberOfHeldReadings} partial readings.')\n return render_template('Home.jinja2', **locals())\n\n@app.route(\"/enter\", methods=['GET', 'POST'])\ndef enter():\n\n flash(dbPath)\n\n if request.method == 'GET':\n form = DataEntryForm(request.form)\n return render_template('EnterReading.jinja2', **locals())\n\n elif request.method == 'POST':\n form = DataEntryForm(request.form)\n reqdate = request.form['ddate']\n comment = request.form['annotation']\n morning = request.form['amreading']\n evening = request.form['pmreading']\n if evening == '':\n average = None\n hold = morning\n else:\n average = decimalAverage(morning, evening)\n hold = None\n try:\n with db_session:\n Readings(date = reqdate, average = average, comment = comment, hold = hold)\n except Exception as e:\n flash(f'ERROR (DataEnbry.py:64): {e}')\n return render_template('EnterReading.jinja2', **locals())\n\n else:\n return \"fall through\"\n\n@app.route(\"/select\", methods=['GET'])\ndef select():\n\n flash(dbPath)\n\n with db_session:\n heldReadings = Readings.select(lambda c: c.hold is not None).order_by(1)\n numberOfHeldReadings = len(heldReadings)\n heldReadingsList = list(heldReadings)\n if numberOfHeldReadings > 0:\n heldReadingDates = []\n index = 1\n for heldReading in heldReadingsList:\n heldReadingDates.append((f'D{index}', heldReading.date))\n index += 1\n form = SelectReadingForm()\n form.helddateslist.choices = heldReadingDates\n session['heldDates'] = heldReadingDates\n return render_template('SelectReading.jinja2', **locals()) # form=heldForm)\n else:\n return render_template('NoneHeld.jinja2', **locals())\n\n@app.route(\"/edit\", methods=['POST'])\ndef edit():\n\n flash(dbPath)\n\n form = SelectReadingForm(request.form)\n FormIndex = form.data['helddateslist']\n heldReadingDates = session['heldDates']\n # session.pop('heldDates')\n heldReadingDates = dict(heldReadingDates)\n WorkingDate = heldReadingDates[FormIndex]\n session['WorkingDate'] = WorkingDate\n with db_session:\n reading = Readings[WorkingDate]\n heldReading = namedtuple('heldReading', ['readingDate', 'amreading', 'annotation'])\n hr = heldReading(WorkingDate, reading.hold, reading.comment)\n form = EditReadingForm(obj=hr)\n return render_template('EditReading.jinja2', **locals())\n\n@app.route(\"/update\", methods=['POST'])\ndef update():\n WorkingDate = session['WorkingDate']\n session.pop('WorkingDate')\n form = EditReadingForm(request.form)\n evening = form.data['pmreading']\n if evening is None:\n return render_template('NoEvening.jinja2', **locals())\n with db_session:\n reading = Readings[WorkingDate]\n morning = reading.hold\n reading.hold = None\n reading.average = decimalAverage(morning, evening)\n reading.comment = form.data['annotation']\n return redirect(url_for('home'))\n\n\nif __name__ == \"__main__\":\n\n # handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)\n # handler.setLevel(logging.DEBUG)\n # handler.setLevel(logging.INFO)\n # app.logger.addHandler(handler)\n # app.logger.setLevel(logging.DEBUG)\n # l = app.logger.info\n\n app.debug = True\n app.logger = True\n\n app.run(host='wtrenker.com', port=7000, debug = True, use_reloader=False)\n# use_reloader=False is the key to getting multi-threaded debug working in PyCharm\n", "repo_name": "wtrenker/DataEntry", "sub_path": "DataEntry.py", "file_name": "DataEntry.py", "file_ext": "py", "file_size_in_byte": 5135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.full_path.split", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.full_path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "pony.orm.Database", "line_number": 28, "usage_type": "call"}, {"api_name": "pony.orm.PrimaryKey", "line_number": 31, "usage_type": "call"}, {"api_name": "pony.orm.Optional", "line_number": 32, "usage_type": "call"}, {"api_name": "pony.orm.Optional", "line_number": 33, "usage_type": "call"}, {"api_name": "pony.orm.Optional", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 41, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "forms.DataEntryForm", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "forms.DataEntryForm", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "functions.decimalAverage", "line_number": 71, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 86, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 88, "usage_type": "name"}, {"api_name": "forms.SelectReadingForm", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 108, "usage_type": "call"}, {"api_name": "forms.SelectReadingForm", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 116, "usage_type": "name"}, {"api_name": "pony.orm.db_session", "line_number": 117, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 119, "usage_type": "call"}, {"api_name": "forms.EditReadingForm", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 126, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 127, "usage_type": "name"}, {"api_name": "forms.EditReadingForm", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 131, "usage_type": "call"}, {"api_name": "pony.orm.db_session", "line_number": 132, "usage_type": "name"}, {"api_name": "functions.decimalAverage", "line_number": 136, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 138, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "29925118708", "text": "import pygame\nfrom Constantes import *\nimport base64\nimport json\nimport csv\nimport gzip\nfrom Jugador import *\nfrom nudo import *\nfrom Escalera import *\nfrom Bloque import *\nfrom Gema import *\nimport sys\nfrom pygame.locals import*\nfrom Nivel1 import *\n\nwidth=100\nheight=720\nblack = (0,0,0)\nblue = (0, 0, 255)\nwhite = (255,255,255)\nred = (180,0,0)\ngreen = (0,180,0)\n\nbackgroundColor = (205,69,159)\n\nredSelected = (255,0,0)\ngreenSelected = (0,255,0)\n\nscreen = pygame.display.set_mode((widht, height))\n\ndef uploadMap(name):\n\n global mapWidth,mapHeight,tileHeight,tileWidth,matrizMap\n\n f = open(name+\".json\", \"r\")\n data = json.load(f)\n f.close()\n\n tileWidth=data[\"tilewidth\"]\n tileHeight=data[\"tileheight\"]\n\n mapWidth=data[\"width\"]\n mapHeight=data[\"height\"]\n\n #obtener mapa\n for item in data[\"layers\"]:\n mapa=item[\"data\"]\n\n #print (mapa)\n\n for i in range(0, len(mapa), mapWidth):\n matrizMap.append(mapa[i:i+mapWidth])\n #for i in range(mapHeight):\n # print (matrizMap[i])\n\ndef arrayTileset(img):\n x=0\n y=0\n\n hojaTiles=[]\n\n for i in range(8):\n for h in range(8):\n imagen=cut(img,(x,y,64,64))\n hojaTiles.append(imagen)\n x+=64\n x=0\n y+=64\n\n return hojaTiles\n\ndef cut (img, rectangle):\n rect = pygame.Rect(rectangle)\n image = pygame.Surface(rect.size).convert()\n image.blit(img,(0,0), rect)\n return image\n\ndef nivel2():\n #pygame.quit()\n screen = pygame.display.set_mode((1200, 1000))\n Cinematica2()\n pygame.display.set_caption(\"sprite\")\n reloj = pygame.time.Clock()\n game_over = False\n jugador = Jugador([600, 300])\n desplazamientoX = 0 #-100\n desplazamientoY = 0 #-1400\n jugador.gemas=0\n #inicializamos pygame\n pygame.init()\n #imagen = pygame.image.load(\"fondo.png\")\n #screen.blit(imagen ,(0,0))\n pygame.display.set_caption(\"Mapa\")\n clock = pygame.time.Clock()\n img = pygame.image.load(\"juego.png\")\n uploadMap(\"mapa2\")\n hoja = arrayTileset(img)\n\n #Cargar las imagenes de los Sprites\n Bloque1 = img.subsurface(0,0,64,64)\n Bloque2 = img.subsurface(64,0,64,64)\n Bloque3 = img.subsurface(128,0,64,64)\n #BloqueL = img.subsurface(384,448,64,64)\n Caja = img.subsurface(384,0,64,64)\n Escalera1 = img.subsurface(256,320,64,64)\n Escalera2 = img.subsurface(320,320,64,64)\n imgGema = img.subsurface(128,384,64,64)\n Bloque4 = img.subsurface(256,0,64,64)\n\n #Cargamos la musica\n pygame.mixer.music.load(\"Musica.mp3\")\n pygame.mixer.music.play(-1)\n\n #Cargamos los sonidos\n #sonidoGema = pygame.mixer.Sound(\"Gema.mp3\")\n #sonidoMuerte = pygame.mixer.Sound(\"Muerte.mp3\")\n\n #Creamos los grupos de sprites\n jugadores = pygame.sprite.Group()\n gemas = pygame.sprite.Group()\n escaleras = pygame.sprite.Group()\n bloques = pygame.sprite.Group()\n enemigos = pygame.sprite.Group()\n\n #Creamos los sprites\n jugadores.add(jugador)\n\n for i in range(mapHeight):\n for j in range(mapWidth):\n minum = matrizMap[i][j]\n if minum == 1 :\n b = Bloque([j*64,i*64],Bloque1)\n bloques.add(b)\n elif minum == 2 :\n b = Bloque([j*64,i*64],Bloque2)\n bloques.add(b)\n #elif minum == 63:\n # b = Bloque([j*64,i*64],BloqueL)\n # bloques.add(b)\n elif minum == 3 :\n b = Bloque([j*64,i*64],Bloque3)\n bloques.add(b)\n elif minum == 7 :\n b = Bloque([j*64,i*64],Caja)\n bloques.add(b)\n elif minum == 45 :\n e = Escalera([j*64,i*64],Escalera1,True)\n escaleras.add(e)\n elif minum == 46 :\n e = Escalera([j*64,i*64],Escalera2,True)\n escaleras.add(e)\n elif minum == 51 :\n g = Gema([j*64,i*64],imgGema)\n gemas.add(g)\n elif minum == 5 :\n b = Bloque([j*64,i*64],Bloque4)\n bloques.add(b)\n\n while not game_over:\n\n screen.fill((112, 145, 241))\n clock.tick(10)\n for event in pygame.event.get():\n if event.type == QUIT:\n game_over = True\n sys.exit()\n\n jugador.bajar = False\n jugador.escalar = False\n jugador.escalando = False\n jugador.bajando = False\n\n ls = pygame.sprite.spritecollide(jugador,escaleras,False)\n if len(ls) > 0 :\n jugador.escalar = True\n for e in ls:\n if e.bajar:\n jugador.bajar = True\n\n temp = jugador.evento(event)\n bloques.update(temp)\n escaleras.update(temp)\n gemas.update(temp)\n\n\n ls = pygame.sprite.spritecollide(jugador,escaleras,False)\n if len(ls) > 0 :\n jugador.escalar = True\n\n\n ls = pygame.sprite.spritecollide(jugador,bloques,False)\n for e in ls:\n if jugador.rect.top < e.rect.bottom and temp[1] < 0 and not jugador.escalando:\n temp = [0,(e.rect.bottom-jugador.rect.top)]\n bloques.update(temp)\n escaleras.update(temp)\n gemas.update(temp)\n jugador.contGravedad = 0\n break\n elif jugador.rect.bottom > e.rect.top and temp[1] > 0 and not jugador.bajando:\n temp = [0,(e.rect.top-jugador.rect.bottom)]\n jugador.contGravedad = 0\n bloques.update(temp)\n escaleras.update(temp)\n gemas.update(temp)\n break\n\n ls = pygame.sprite.spritecollide(jugador,bloques,False)\n for e in ls:\n if jugador.rect.right > e.rect.left and temp[0] > 0 :\n temp = [(e.rect.left-jugador.rect.right),0]\n bloques.update(temp)\n escaleras.update(temp)\n gemas.update(temp)\n break\n elif jugador.rect.left < e.rect.right and temp[0] < 0:\n temp = [(e.rect.right-jugador.rect.left),0]\n bloques.update(temp)\n escaleras.update(temp)\n gemas.update(temp)\n break\n\n ls = pygame.sprite.spritecollide(jugador,gemas,True)\n for e in ls:\n jugador.gemas += 1\n print(jugador.gemas)\n \n for b in bloques :\n screen.blit(b.image,b.rect)\n for e in escaleras :\n screen.blit(e.image,e.rect)\n for g in gemas :\n screen.blit(g.image,g.rect)\n screen.blit(jugador.imagen,jugador.rect)\n pygame.display.flip()", "repo_name": "Cesar-2/Juego", "sub_path": "Nivel2.py", "file_name": "Nivel2.py", "file_ext": "py", "file_size_in_byte": 6652, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.display.set_mode", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 90, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 93, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 94, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.load", "line_number": 111, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 112, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 121, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 123, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 163, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 163, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 186, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 186, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 191, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 208, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 223, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 235, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 235, "usage_type": "attribute"}]} +{"seq_id": "13115225412", "text": "from sys import stdin\nfrom collections import deque\n\nn, k = map(int, stdin.readline().split())\nvirus = []\nd = [[] for _ in range(k + 1)]\ncnt = 0\n\nfor i in range(n): # 지도 입력 받음\n input = list(map(int, stdin.readline().split()))\n virus.append(input)\n for j in range(n):\n if input[j]:\n d[input[j]].append((i, j))\n\ns, x, y = map(int, stdin.readline().split())\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef bfs(k, tuple):\n queue = deque()\n while tuple:\n queue.append(tuple.pop())\n while queue:\n x, y = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or ny < 0 or nx >= n or ny >= n: # 범위 넘는 경우 패스\n continue\n if virus[nx][ny] == 0:\n virus[nx][ny] = k\n tuple.append((nx, ny))\n\n\nfor _ in range(s):\n for i in range(1, k + 1):\n bfs(i, d[i])\n\nprint(virus[x-1][y-1])\n", "repo_name": "olive-su/1day_1Algorithm", "sub_path": "22.02_PS/0220_경쟁적_전염.py", "file_name": "0220_경쟁적_전염.py", "file_ext": "py", "file_size_in_byte": 970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin.readline", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 4, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 16, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "73993507686", "text": "import os\nimport requests\nimport json\nimport ast\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom flask import Flask, redirect, render_template, request, session, url_for\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\n#loading enviorement virables \nload_dotenv()\nauth_string= os.getenv(\"auth_string\")\nauth_data = os.getenv(\"auth_data\")\nauth_data= ast.literal_eval(auth_data)\nauth_string=ast.literal_eval(auth_string)\nDBNAME = os.getenv(\"DBNAME\")\nDATABASEURI = os.getenv(\"URI\")\n\n#calling api for spotify token\ntoken_response = requests.post('https://accounts.spotify.com/api/token',\n data = auth_data,\n headers = auth_string,) \ntoken_response_data = token_response.json()\n#assiging token to a virable\nthe_token = token_response_data['access_token']\n\napp = Flask(__name__)\n\n#Connecting to database\napp.config[\"MONGO_DBNANE\"] = DBNAME\napp.config[\"MONGO_URI\"] = DATABASEURI\n\nmongo = PyMongo(app)\n\n\n@app.route('/')\n@app.route('/home')\n#Display function is responsible for getting images from Spotify API and displaying records from the database to a flask template\ndef display():\n #getting images from spotify api\n def get_playlist_image(id):\n image_response = requests.get('https://api.spotify.com/v1/playlists/' + id + '/images',\n headers={'Authorization':'Bearer '+ the_token})\n #checking if connection is succesful\n if(image_response.status_code == 200):\n image_response_data = image_response.json()\n image_url = image_response_data[0]['url']\n return image_url\n else:\n #displaying a placeholder image if connection failed'''\n image_url='./static/img/missing.jpg'\n return image_url\n #database requests\n playlists=mongo.db.playlists.find() \n is_empty = playlists.count()\n all_categories = mongo.db.categories.find() \n #sending virables to be used in flask template file\n return render_template(\"home.html\", is_empty=is_empty, playlists=playlists, get_playlist_image=get_playlist_image, categories=all_categories)\n\n@app.route('/category/')\n#displaying all items from database under category table\ndef display_category(category_id):\n #finding category by id\n the_categories = mongo.db.categories.find_one({'_id': ObjectId(category_id)})\n # we want to display an error message if category is empty\n #for this reason we are counting records found by find()\n #then we are passing is_empty to flask which will display an error is value is =0\n filter_query = { \"category\": the_categories['category_name']}\n filtered_playlists = mongo.db.playlists.find(filter_query)\n is_empty = filtered_playlists.count()\n #as above getting the images from spotify API\n def get_playlist_image(id):\n image_response = requests.get('https://api.spotify.com/v1/playlists/' + id + '/images',\n headers={'Authorization':'Bearer '+ the_token})\n if(image_response.status_code == 200):\n image_response_data = image_response.json()\n image_url = image_response_data[0]['url']\n return image_url\n else:\n image_url='./static/img/missing.jpg'\n return image_url\n all_categories = mongo.db.categories.find()\n\n return render_template(\"category.html\", is_empty=is_empty, playlists=filtered_playlists, get_playlist_image=get_playlist_image, category=the_categories, categories=all_categories)\n\n@app.route('/add')\ndef add():\n return render_template('add.html',\n categories=mongo.db.categories.find())\n\n@app.route('/insert', methods=['POST','GET'])\n#inseting newly added record trough flask template to database\ndef insert_playlist():\n playlists=mongo.db.playlists\n playlists.insert_one(request.form.to_dict())\n return redirect(url_for('display'))\n\n\n@app.route('/edit/')\ndef edit_playlist(playlist_id):\n the_playlist = mongo.db.playlists.find_one({'_id': ObjectId(playlist_id)})\n all_categories = mongo.db.categories.find()\n return render_template('edit.html', playlist=the_playlist,\n categories=all_categories)\n\n@app.route('/update_playlist/',methods=['POST'])\n#updating newly edited record trough flask template to database\ndef update_playlist(playlist_id):\n playlist = mongo.db.playlists \n playlist.update(\n {'_id': ObjectId(playlist_id)},\n {\n 'name':request.form.get('name'),\n 'category':request.form.get('category_name'),\n 'description': request.form.get('description'),\n 'spotify_id':request.form.get('spotify_id'),\n 'spotify_link':request.form.get('spotify_link'),\n }\n )\n return redirect(url_for('display'))\n\n@app.route('/delete_playlist/')\n#deleting chosen record \ndef delete_playlist(playlist_id):\n mongo.db.playlists.remove({'_id': ObjectId(playlist_id)})\n return redirect(url_for('display'))\n\n@app.route('/playlist/')\ndef display_tracks(playlist_id):\n the_playlist = mongo.db.playlists.find_one({'_id': ObjectId(playlist_id)})\n def status_check(id):\n status_response = requests.get('https://api.spotify.com/v1/playlists/'+ id +'/', headers={'Authorization':'Bearer '+ the_token})\n if(status_response.status_code == 200):\n return True\n else:\n return False\n\n def get_playlist_tracklist(id):\n tracklist_response = requests.get('https://api.spotify.com/v1/playlists/'+ id +'/tracks', headers={'Authorization':'Bearer '+ the_token})\n track_list = []\n count = 0\n #checking if connection is established \n #if not flask will display an error message to user \n if(tracklist_response.status_code==200):\n tracklist_response_data = tracklist_response.json()\n maxium_of_values = len(tracklist_response_data['items'])\n #writing data to virable\n while count < maxium_of_values:\n track_list.append(tracklist_response_data['items'][count]['track'])\n count+=1\n else:\n return track_list\n else:\n track_list = False\n return render_template('playlist_details.html', playlist=the_playlist, get_playlist_tracklist=get_playlist_tracklist, status_check=status_check)\n\n\nif __name__ == '__main__':\n app.run(host=os.environ.get('IP'),\n port=(os.environ.get('PORT')),\n debug=True)", "repo_name": "delegacz/md-milestone-project-3", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 12, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 13, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 14, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "flask_pymongo.PyMongo", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 58, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.form.to_dict", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 96, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 103, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 114, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 117, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 120, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 125, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 126, "usage_type": "call"}, {"api_name": "bson.objectid.ObjectId", "line_number": 130, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 132, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 155, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 159, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 160, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 160, "usage_type": "attribute"}]} +{"seq_id": "31943490450", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport simplejson as json\nimport time\nimport requests\nfrom api.controllers.tracking_controllers import tracking_lab_order_controller\nfrom vendor.models import WxMetaProductRelationship, LensSpecmap\nfrom pg_oms.settings import WX_META_SERVICE, WX_META_PURCHASE\nfrom oms.models.order_models import PurchaseOrderRecords\n\nclass wx_meta_purchase_controller:\n '''\n WX Meta Purchase Controller class\n '''\n def __init__(self):\n self.orderType = 'meta'\n self.deliverType = '顺丰寄付' #快递方式指定圆通\n self.customerLinkman = '李莲英' #联系人\n self.customerTel = '15518639392' #联系电话\n self.customerProvince ='上海市' #省\n self.customerCity = '上海市' #市\n self.customerCounty = '奉贤区' #区\n self.customerAddress='大叶公路4601号伟星工业园' #地址\n\n self.name = 'A100311' # 智镜客户码 固定的 A100311\n self.pwd = 'zhijin123' # 获取令牌密码\n\n self.host = WX_META_SERVICE if isinstance(WX_META_SERVICE, str) else WX_META_SERVICE[0]#伟星系统地址,写入配置文件\n self.token_url = WX_META_PURCHASE.get('TOKEN_URL') #'/all/account/login'#获取令牌URL\n self.wx_meta_prd_url = WX_META_PURCHASE.get('WX_META_PRD_URL') #'/api/product/listCustomerProducts' #获取现片产品清单\n self.add_order_url = WX_META_PURCHASE.get('ADD_ORDER_URL') #'/api/order/addOrder'添加订单URL\n self.order_status_url= WX_META_PURCHASE.get('ORDER_STATUS_URL')#'/api/order/getOrderStatus' #获取订单状态URL\n\n #库存片产品对应关系,需写入对应关系表中\n self.meta_product_relation = {\n 'KD56L': '00000000000000008736',\n 'KD56': '00000000000000002750',\n 'KDB56-C': '00000000000000002443',\n 'KD61L': '00000000000000004648',\n 'KDB61-H-SHMC': '00000000000000004048',\n 'KD61': '00000000000000002549',\n\n }\n # 获取令牌\n '''\n ### 参数\n * `name`:名字\n * `pwd`:密码\n '''\n def get_headers(self,content_type='application/json'):\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8'\n }\n try:\n url = self.host + self.token_url + \"?name=%s&pwd=%s\"%(self.name,self.pwd)\n logging.debug(url)\n result = requests.post(url, headers=headers, timeout=60)\n account = json.loads(result.text)\n if account['code'] == '200' and account['map']['data']['token']:\n headers = {\n \"Content-Type\": content_type,\n \"X-Auth-Token\": account['map']['data']['token']\n }\n return {\"code\":0,\"headers\":headers,\"msg\":\"成功获取token\"}\n else:\n return {\"code\":-1,\"token\":\"\",\"msg\":\"伟星接口返回信息变化\"}\n except Exception as e:\n return {\"code\": -1, \"token\": \"\", \"msg\": \"get token failed%s\"%str(e)}\n\n #获取现片产品列表\n '''\n /api/product/listCustomerProducts\n ### 参数\n 无\n\n ### 响应\n ```json\n {\n \"success\": true,\n \"code\": \"200\",\n \"message\": null,\n \"map\": {\n \"data\": [{\n productId: `'xxx'`, 销售品名id\n brand: `'xxx'`, 品牌\n lenType: `'xxx'`, 光型\n sphStart: `0.00`, 球镜起始值\n sphEnd: `0.00`, 球镜结束值\n cylStart: `0.00`, 柱镜起始值\n cylEnd: `0.00`, 柱镜结束值\n addStart: `0.00`, 加光起始值\n addEnd: `0.00`, 加光结束值\n\n productName: `'xxx'`, 销售品名\n zsl: `'xxx'`, 折射率\n dl: `'xxx'`, 大类\n price: `100.0`, 单价\n rate: `1.0`, 折扣\n }]\n }\n }\n ```\n '''\n def list_wx_meta_products(self):\n content_type = 'application/x-www-form-urlencoded;'\n try:\n result = self.get_headers(content_type)\n logging.debug(result)\n if (result['code'] == -1): # 返回出错信息\n return result\n headers = result['headers']\n url = self.host + self.wx_meta_prd_url\n result = requests.post(url, headers=headers, timeout=60)\n response = json.loads(result.text)\n if response['code'] == \"200\":\n return {\"code\": 0, \"data\": response['map']['data'], \"msg\": \"success\"}\n else:\n return {\"code\": -1, \"msg\": response['message']}\n except Exception as e:\n return {\"code\": -1, \"msg\": \"获取车房列表失败,异常信息:%s\" % str(e)}\n\n\n\n\n #生成订单\n '''\n * `addOrderDTOStr`: json字符串,json格式:\n * orderType: 订单类型,`meta/house`分别表示现片/车房\n * apiOrderNo: 第三方订单号\n * deliveryDate: `'yyyy-MM-dd'`,要求发货日期\n * deliverType: `'圆通'`,快递方式\n * customerLinkman: `'张三'`,联系人\n * customerTel: `'152xxxx'`,联系电话\n * customerProvince: `'浙江省'`,省\n * customerCity: `'台州市'`,市\n * customerCounty: `'xxx'`,区\n * customerAddress: `'xxx'`,地址\n * note: `'xxx'`,备注\n\n * items: 订单明细列表\n * type: `'meta'`, 值为meta/house/frame,分别代表现片/车房/镜架\n * productId: `'J123'`,销售品名id\n * quantity: `1`,数量\n * brand: `'白包装'`,品牌\n * lr: `'l'`,左右眼\n * sphval: `'1.0'`,球镜\n * cylval: `'1.0'`,柱镜\n * addval: `'1.0'`,加光\n * lentype: `'近视'`,光型\n * axis: `1`,光轴\n * cbase: `'1'`,基弯\n * coloring:`'wf-01'`,染色内容\n * prismA:`'1'`,棱镜a\n * directionA:`'内'`,方向a\n * prismB:`'1'`,棱镜b\n * directionB:`'上'`,方向b\n * isAsse:`'Y'`,是否装配\n * isCut:`'N'`,是否割边\n * framePd:`'65'`,瞳距\n * frameIpd:`''`,近用瞳距\n * framePh:`''`,瞳高\n * glassDistance:`''`,眼距\n * frontAngle:`''`,前倾角\n * moveIn:`''`,移心-内移\n * moveOut:`''`,移心-外移\n * processes: `{'镀膜': '蓝光'}`,json格式,`工艺类型:工艺ID`的映射\n\n ### 响应\n ```json\n {\n \"success\": true,\n \"code\": \"200\",\n \"message\": null,\n \"map\": {\n \"data\": null\n }\n }\n ```\n '''\n def add_meta_order(self, dict_data, delveryDate=\"\", brand='白包装', isAsse=\"N\", isCut=\"N\"):\n result = self.get_headers()\n if(result['code'] == -1): #返回出错信息\n return result\n headers = result['headers']\n try:\n wx_meta_lens = LensSpecmap.objects.filter(inner_code=dict_data.get('rsku'), active='ACTIVE', vendor=dict_data.get('vendor'))\n #wx_meta_lens = WxMetaProductRelationship.objects.filter(sku=dict_data.get('rsku'))\n if len(wx_meta_lens) == 0 or len(wx_meta_lens) > 1:\n return {\"code\": -1, \"data\": '', \"msg\": \"未找到对应关系\"}\n wx_meta_len = wx_meta_lens[0]\n l_product_id = wx_meta_len.outer_code\n r_product_id = wx_meta_len.outer_code\n\n if(not delveryDate): #默认要求当天发货\n delveryDate = time.strftime(\"%Y-%m-%d\", time.localtime())\n\n order_dict = {\n \"orderType\": self.orderType,#默认现片\n \"apiOrderNo\": dict_data.get('order_number', ''),\n \"deliveryDate\": delveryDate,\n \"deliverType\": self.deliverType,\n \"customerLinkman\": self.customerLinkman,\n \"customerTel\": self.customerTel,\n \"customerProvince\": self.customerProvince,\n \"customerCity\": self.customerCity,\n \"customerCounty\":self.customerCounty,\n \"customerAddress\": self.customerAddress,\n \"note\": dict_data.get('comments', ''),\n \"items\": [\n {\n \"type\": self.orderType,\n \"productId\": l_product_id,\n \"quantity\": 1,\n \"brand\": brand,\n \"lr\": \"l\",\n \"sphval\": dict_data.get('lsph', '0'),\n \"cylval\": dict_data.get('lcyl', '0'),\n \"lentype\": dict_data.get('l_lens_type', ''),\n \"axis\": dict_data.get('laxis', '0'),\n \"addval\": \"0.00\",\n \"cbase\": \"\",\n \"coloring\": \"\",\n \"prismA\": 0,\n \"directionA\": \"\",\n \"prismB\": 0,\n \"directionB\": \"\",\n \"isAsse\": isAsse, #是否装配\n \"isCut\": isCut, #切边\n \"framePd\": 0,\n \"frameIpd\": \"\",\n \"framePh\": \"\",\n \"glassDistance\": \"\",\n \"frontAngle\": \"\", #前倾角 无\n \"moveIn\": \"\",\n \"moveOut\": \"\",\n \"processes\": {} #工艺,无\n },\n {\n \"type\": self.orderType,\n \"productId\": r_product_id,\n \"quantity\": 1,\n \"brand\": brand,\n \"lr\": \"r\",\n \"sphval\": dict_data.get('rsph', '0'),\n \"cylval\": dict_data.get('rcyl', '0'),\n \"lentype\": dict_data.get('r_lens_type', ''),\n \"axis\": dict_data.get('raxis', '0'),\n \"addval\": \"0.00\",\n \"cbase\": \"\",\n \"coloring\": \"\",\n \"prismA\": 0,\n \"directionA\": \"\",\n \"prismB\": 0,\n \"directionB\": \"\",\n \"isAsse\": isAsse, # 是否装配\n \"isCut\": isCut, # 切边\n \"framePd\": 0,\n \"frameIpd\": \"\",\n \"framePh\": \"\",\n \"glassDistance\": \"\",\n \"frontAngle\": \"\", # 前倾角 无\n \"moveIn\": \"\",\n \"moveOut\": \"\",\n \"processes\": {} # 工艺,无\n },\n ]\n }\n #{\"code\":\"500\",\"map\":{\"data\":null},\"message\":\"没有价格!产品ID:00000000000000008736 品牌:白包装 光型:近视 球:1 柱:2 加光:0\",\"success\":false}\n url = self.host + self.add_order_url\n result = requests.post(url, data=json.dumps(order_dict), headers=headers, timeout=60)\n response = json.loads(result.text)\n if response['code'] == \"200\":\n # 存储下单信息\n purchase_order_records = PurchaseOrderRecords.objects.filter(lab_number=dict_data.get('order_number', ''))\n if len(purchase_order_records) > 0:\n pur_order_records = purchase_order_records[0]\n pur_order_records.order_data = json.dumps(order_dict)\n pur_order_records.vendor = '10'\n pur_order_records.save()\n else:\n pur_order_records = PurchaseOrderRecords()\n pur_order_records.lab_number = dict_data.get('order_number', '')\n pur_order_records.order_data = json.dumps(order_dict)\n pur_order_records.vendor = '10'\n pur_order_records.save()\n return {\"code\": 0, \"data\": response['map']['data'], \"msg\": \"success!\"}\n else:\n return {\"code\": -1, \"data\": response['map']['data'], \"msg\": response['message']}\n except Exception as e:\n return {\"code\": -1, \"data\": \"\", \"msg\": \"生成订单失败,异常信息:%s\"%str(e)}\n\n #根据伟星订单号获取伟星订单生产状态\n '''\n ### 参数\n * `orderNo`: 订单号(不是订单ID)\n\n ### 响应\n ```json\n {\n \"success\": true,\n \"code\": \"200\",\n \"message\": null,\n \"map\": {\n \"data\": {\n \"status\": \"1\",\n \"deliverNo\": \"123\",\n \"deliverCompany\": \"圆通\"\n }\n }\n }\n ```\n\n #### 订单状态\n * \"-1\": 已删除\n * \"0\": 待引入\n * \"1\": 待审核\n * \"23\": 待确认\n * \"25\": 已终止\n * \"27\": 已取消\n * \"30\": 生产中\n * \"35\": 割边处理\n * \"37\": 单证打印\n * \"38\": 部分发货\n * \"40\": 完成\n '''\n def getOrderStatus(self,orderNo):\n if(orderNo == \"\"):\n return {\"code\": -1,\"msg\": \"订单号不能为空\"}\n content_type ='application/x-www-form-urlencoded;'\n try:\n result = self.get_headers(content_type)\n logging.debug(result)\n if (result['code'] == -1): # 返回出错信息\n return result\n headers = result['headers']\n url = self.host + self.order_status_url + \"?orderNo=%s\"%orderNo\n result = requests.post(url, headers=headers, timeout=60)\n response = json.loads(result.text)\n if response['code'] == \"200\":\n return {\"code\":0,\"data\":response['map']['data'],\"msg\":\"success\"}\n else:\n return {\"code\":-1,\"msg\":response['message']}\n except Exception as e:\n return {\"code\": -1, \"msg\": \"获取订单状态错误,异常信息:%s\"%str(e)}\n\n #封装数据\n def pack_request_value(self, lab):\n data_dict = {}\n try:\n if float(lab.od_sph) <= 0:\n r_lens_type = '近视'\n else:\n r_lens_type = '老花'\n\n if float(lab.os_sph) <= 0:\n l_lens_type = '近视'\n else:\n l_lens_type = '老花'\n\n if int(lab.vendor) > 9:\n act_lens_sku = lab.act_lens_sku[3:]\n else:\n act_lens_sku = lab.act_lens_sku[2:]\n\n data_dict['order_number'] = lab.lab_number\n data_dict['vendor'] = lab.vendor\n data_dict['rsku'] = act_lens_sku\n data_dict['rsph'] = lab.od_sph\n data_dict['rcyl'] = lab.od_cyl\n data_dict['raxis'] = lab.od_axis\n data_dict['r_lens_type'] = r_lens_type\n\n data_dict['lsku'] = act_lens_sku\n data_dict['lsph'] = lab.os_sph\n data_dict['lcyl'] = lab.os_cyl\n data_dict['laxis'] = lab.os_axis\n data_dict['l_lens_type'] = l_lens_type\n data_dict['comments'] = lab.comments\n return data_dict\n except Exception as e:\n return data_dict\n\n #处理返回结果\n def analysis_result(self, request, lbo, purchase_order, res):\n stat_dict = {}\n try:\n if res['code'] == 0:\n purchase_order.vendor_order_reference = res['data']['orderNo']\n purchase_order.save()\n lbo.vendor_order_reference = res['data']['orderNo']\n lbo.save()\n # 记录日志\n tloc = tracking_lab_order_controller()\n tloc.tracking(lbo, request.user, 'LENS_PURCHASE', '镜片采购',\n res['data']['orderNo'])\n stat_dict[lbo.lab_number] = {'Success': True, 'Message': '下单成功'}\n else:\n stat_dict[lbo.lab_number] = {'Success': False, 'Message': res['msg']}\n return stat_dict\n except Exception as e:\n stat_dict[lbo.lab_number] = {'Success': False, 'Message': e}\n return stat_dict\n\n\n\n", "repo_name": "qiaozhizt/OMS", "sub_path": "oms/controllers/wx_meta_purchase_controller.py", "file_name": "wx_meta_purchase_controller.py", "file_ext": "py", "file_size_in_byte": 17349, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pg_oms.settings.WX_META_SERVICE", "line_number": 28, "usage_type": "argument"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE.get", "line_number": 29, "usage_type": "call"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE", "line_number": 29, "usage_type": "name"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE.get", "line_number": 30, "usage_type": "call"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE", "line_number": 30, "usage_type": "name"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE.get", "line_number": 31, "usage_type": "call"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE", "line_number": 31, "usage_type": "name"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE.get", "line_number": 32, "usage_type": "call"}, {"api_name": "pg_oms.settings.WX_META_PURCHASE", "line_number": 32, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 57, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 108, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 113, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "vendor.models.LensSpecmap.objects.filter", "line_number": 186, "usage_type": "call"}, {"api_name": "vendor.models.LensSpecmap.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "vendor.models.LensSpecmap", "line_number": 186, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 195, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 195, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 270, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 270, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 271, "usage_type": "call"}, {"api_name": "oms.models.order_models.PurchaseOrderRecords.objects.filter", "line_number": 274, "usage_type": "call"}, {"api_name": "oms.models.order_models.PurchaseOrderRecords.objects", "line_number": 274, "usage_type": "attribute"}, {"api_name": "oms.models.order_models.PurchaseOrderRecords", "line_number": 274, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 277, "usage_type": "call"}, {"api_name": "oms.models.order_models.PurchaseOrderRecords", "line_number": 281, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 283, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 332, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 337, "usage_type": "call"}, {"api_name": "simplejson.loads", "line_number": 338, "usage_type": "call"}, {"api_name": "api.controllers.tracking_controllers.tracking_lab_order_controller", "line_number": 393, "usage_type": "call"}]} +{"seq_id": "14804295146", "text": "#screenshot application by okay.anshu\nimport time \nimport pyautogui\nfrom PIL import Image\nimport tkinter as tk\n\n#creating a function for taking the screenshot\n\ndef screenshot():\n #picking a random name for the ss image\n \n name = int(round(time.time()*1000))\n \n #storing the image file into an address\n #user need to create a folder for storing the clicked img\n #then replace this file address to your file address.\n name = f\"D:/Coding files/pyhton/screenshot/screenshot_saved/{name}.png\"\n \n #calling the predefined screenshot method from pyautogui package \n \n img = pyautogui.screenshot(name)\n #using the Pillow module to show the image\n img.show()\n \n#working for the gui \n\n#creating a basic frame and adding two buttons one is for take ss one is for exit.\n\nroot = tk.Tk()\nframe = tk.Frame(root)\nframe.pack()\n#assigning the screenshot command to the take screenshot button\nbutton = tk.Button(\n frame,\n text=\"Take Screenshot\",\n command=screenshot\n)\n#assigning the exit command to the exit button\nbutton.pack(side=tk.LEFT)\nclose=tk.Button(\n frame,\n text= \"EXIT\",\n command=quit\n)\n\nclose.pack(side=tk.LEFT)\n\nroot.mainloop()\n\n#this code is contributed by Himanshu\n", "repo_name": "nycanshu/take-ss", "sub_path": "screenshot .py", "file_name": "screenshot .py", "file_ext": "py", "file_size_in_byte": 1222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 12, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 21, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 29, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 30, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 33, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 40, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "36333301450", "text": "from django.db import models\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils import timezone\n\n\nclass Category(MPTTModel):\n name = models.CharField(\n verbose_name=_(\"Category Name\"),\n help_text=_(\"Required and unique\"),\n max_length=255,\n unique=True,\n )\n slug = models.SlugField(\n verbose_name=_(\"Category safe URL\"),\n unique=True,\n max_length=255,\n )\n parent = TreeForeignKey(\n \"self\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n related_name=\"children\",\n )\n is_active = models.BooleanField(default=False)\n\n class MPTTMeta:\n order_insertion_by = (\"name\",)\n\n class Meta:\n verbose_name = _(\"Category\")\n verbose_name_plural = _(\"Categories\")\n\n def __str__(self):\n return self.name\n\n\nclass Brand(models.Model):\n name = models.CharField(\n verbose_name=_(\"Brand Name\"),\n help_text=_(\"Required and unique\"),\n max_length=255,\n unique=True,\n )\n slug = models.SlugField(\n verbose_name=_(\"Brand safe URL\"),\n unique=True,\n max_length=255,\n )\n is_active = models.BooleanField(default=False)\n\n class Meta:\n verbose_name = _(\"Brand\")\n verbose_name_plural = _(\"Brands\")\n\n def __str__(self):\n return self.name\n\n\nclass ProductType(models.Model):\n name = models.CharField(\n max_length=255,\n verbose_name=_(\"Name\"),\n help_text=_(\"Required\"),\n )\n is_active = models.BooleanField(default=True)\n\n class Meta:\n verbose_name = _(\"Product Type\")\n verbose_name_plural = _(\"Product Types\")\n\n def __str__(self):\n return self.name\n\n\nclass ProductSpecification(models.Model):\n product_type = models.ForeignKey(\n ProductType,\n on_delete=models.RESTRICT,\n )\n name = models.CharField(\n verbose_name=_(\"Name\"),\n help_text=_(\"Required\"),\n unique=True,\n max_length=255,\n )\n\n class Meta:\n verbose_name = _(\"Product Specification\")\n verbose_name_plural = _(\"Product Specifications\")\n\n def __str__(self):\n return self.name\n\n\nclass Product(models.Model):\n brand = models.ForeignKey(\n Brand,\n on_delete=models.RESTRICT,\n null=True,\n blank=True,\n )\n product_type = models.ForeignKey(\n ProductType,\n on_delete=models.RESTRICT,\n )\n category = models.ForeignKey(\n Category,\n on_delete=models.RESTRICT,\n )\n title = models.CharField(\n max_length=255,\n verbose_name=_(\"Title\"),\n help_text=_(\"Required\"),\n )\n short_description = models.CharField(\n max_length=255,\n verbose_name=_(\"Short description\"),\n help_text=_(\"Required\"),\n )\n description = models.TextField(\n blank=True,\n verbose_name=_(\"Description\"),\n help_text=_(\"Not Required\"),\n )\n slug = models.SlugField(max_length=255)\n regular_price = models.DecimalField(\n verbose_name=_(\"Regular price\"),\n help_text=_(\"Maximum 4999.99$\"),\n error_messages={\n \"name\": {\n \"max_length\": _(\"The price must be between 0 and 4999.99$\"),\n },\n },\n max_digits=6,\n decimal_places=2,\n )\n discount_price = models.DecimalField(\n verbose_name=_(\"Discount price\"),\n help_text=_(\"Maximum 4999.99$\"),\n error_messages={\n \"name\": {\n \"max_length\": _(\"The price must be between 0 and 4999.99$\"),\n },\n },\n max_digits=6,\n decimal_places=2,\n )\n is_active = models.BooleanField(\n verbose_name=_(\"Product visibility\"),\n help_text=_(\"Change product visibility\"),\n default=True,\n )\n created_at = models.DateTimeField(\n _(\"Created at\"),\n auto_now_add=True,\n editable=False,\n )\n updated_at = models.DateTimeField(\n _(\"Updated at\"),\n auto_now=True,\n )\n\n class Meta:\n ordering = (\"created_at\",)\n verbose_name = _(\"Product\")\n verbose_name_plural = _(\"Products\")\n\n def __str__(self):\n return self.title\n\n\nclass ProductSpecificationValue(models.Model):\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n )\n specification = models.ForeignKey(\n ProductSpecification,\n on_delete=models.RESTRICT,\n )\n value = models.CharField(\n max_length=255,\n verbose_name=_(\"Value\"),\n help_text=_(\"Product specification value (maximum of 255\"),\n )\n\n class Meta:\n verbose_name = _(\"Product Specification Value\")\n verbose_name_plural = _(\"Product Specification Values\")\n\n def __str__(self):\n return self.value\n\n\nclass ProductImage(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n image = models.ImageField(\n verbose_name=_(\"Image\"),\n help_text=_(\"Upload a product image\"),\n upload_to=\"images/products/\",\n default=\"images/products/default.png\",\n )\n alt_text = models.CharField(\n max_length=255,\n null=True,\n blank=True,\n verbose_name=_(\"Alternative text\"),\n help_text=_(\"Please add alternative text\"),\n )\n if_feature = models.BooleanField(default=False)\n created_at = models.DateTimeField(\n _(\"Created at\"),\n auto_now_add=True,\n editable=False,\n )\n updated_at = models.DateTimeField(\n _(\"Updated at\"),\n auto_now=True,\n )\n\n class Meta:\n verbose_name = _(\"Product image\")\n verbose_name_plural = _(\"Product images\")\n\n\nclass ProductComment(models.Model):\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n )\n reply = models.ForeignKey(\n \"ProductComment\",\n on_delete=models.CASCADE,\n related_name=\"children\",\n null=True,\n blank=True,\n )\n text = models.TextField()\n created_at = models.DateTimeField(default=timezone.now, editable=False)\n\n class Meta:\n verbose_name = _(\"Comment\")\n verbose_name_plural = _(\"Comments\")\n\n def __str__(self):\n return \"{} - {}\".format(self.user.__str__(), self.product.title)\n", "repo_name": "SorooshDaryabari/Ecommerce", "sub_path": "products/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 6507, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mptt.models.MPTTModel", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 10, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.SlugField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 16, "usage_type": "call"}, {"api_name": "mptt.models.TreeForeignKey", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models.CASCADE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 33, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 42, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models.SlugField", "line_number": 47, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 47, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 55, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 62, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 65, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 71, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 78, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 79, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.models.RESTRICT", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 84, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 85, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 91, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 98, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 99, "usage_type": "name"}, {"api_name": "django.db.models.RESTRICT", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 105, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 105, "usage_type": "name"}, {"api_name": "django.db.models.RESTRICT", "line_number": 107, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.RESTRICT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 115, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 116, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 118, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 118, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 120, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 121, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 123, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 123, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 125, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models.SlugField", "line_number": 128, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 128, "usage_type": "name"}, {"api_name": "django.db.models.DecimalField", "line_number": 129, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 129, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 130, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 131, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 134, "usage_type": "call"}, {"api_name": "django.db.models.DecimalField", "line_number": 140, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 140, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 141, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 142, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 151, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 151, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 152, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 156, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 156, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 157, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 161, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 161, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 162, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 168, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 169, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 175, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 175, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 176, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 176, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 178, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 178, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 180, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 180, "usage_type": "name"}, {"api_name": "django.db.models.RESTRICT", "line_number": 182, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 182, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 184, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 184, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 186, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 187, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 191, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 192, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 198, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 198, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 199, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 199, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 199, "usage_type": "attribute"}, {"api_name": "django.db.models.ImageField", "line_number": 200, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 200, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 201, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 202, "usage_type": "call"}, {"api_name": "django.db.models.CharField", "line_number": 206, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 206, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 210, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 211, "usage_type": "call"}, {"api_name": "django.db.models.BooleanField", "line_number": 213, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 213, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 214, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 214, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 215, "usage_type": "call"}, {"api_name": "django.db.models.DateTimeField", "line_number": 219, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 219, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 220, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 225, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 226, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 229, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 229, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 230, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 230, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 231, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 231, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 232, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 232, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 234, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 234, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 236, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 236, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 240, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 240, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 242, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 242, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 247, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 247, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 248, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 248, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 248, "usage_type": "attribute"}, {"api_name": "django.utils.timezone", "line_number": 248, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 251, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 252, "usage_type": "call"}]} +{"seq_id": "42091832688", "text": "from pickle import load\nfrom numpy import array\nfrom numpy import argmax\nfrom numpy import asarray\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import load_model\nfrom nltk.translate.bleu_score import corpus_bleu\n\n\n# load a clean dataset\ndef load_clean_sentences(filename):\n return load(open('pickle/' + filename, 'rb'))\n\n\n# fit a tokenizer\ndef create_tokenizer(lines):\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(lines)\n return tokenizer\n\n\n# max sentence length\ndef max_length(lines):\n return max(len(line.split()) for line in lines)\n\n\n# encode and pad sequences\ndef encode_sequences(tokenizer, length, lines):\n # integer encode sequences\n X = tokenizer.texts_to_sequences(lines)\n # pad sequences with 0 values\n X = pad_sequences(X, maxlen=length, padding='post')\n return X\n\n\n# map an integer to a word\ndef word_for_id(integer, tokenizer):\n for word, index in tokenizer.word_index.items():\n if index == integer:\n return word\n return None\n\n\n# generate target given source sequence\ndef predict_sequence(model, tokenizer, source):\n prediction = model.predict(source, verbose=0)[0]\n integers = [argmax(vector) for vector in prediction]\n target = list()\n for i in integers:\n word = word_for_id(i, tokenizer)\n if word is None:\n break\n target.append(word)\n return ' '.join(target)\n\n\n# evaluate the skill of the model\ndef evaluate_model(model, tokenizer, sources, raw_dataset):\n actual, predicted = list(), list()\n with open('fr_en_translations2.txt', 'w') as all_translations:\n for i, source in enumerate(sources):\n # translate encoded source text\n source = source.reshape((1, source.shape[0])) # convert vector to array for predict method\n translation = predict_sequence(model, tokenizer, source)\n raw_target, raw_src = raw_dataset[i]\n if i < 10:\n print('src=[%s], target=[%s], predicted=[%s]' %\n (raw_src, raw_target, translation))\n all_translations.write('src=[%s], target=[%s], predicted=[%s]\\n' %\n (raw_src, raw_target, translation))\n actual.append([raw_target.split()])\n predicted.append(translation.split())\n # calculate BLEU score\n print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))\n print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))\n print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))\n print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))\n\n\n# evaluate the skill of the model\ndef user_evaluate_model(model, tokenizer, source, raw_src):\n # translate encoded source text\n # source = source.reshape((1, source.shape[0])) # convert vector to array for predict method\n translation = predict_sequence(model, tokenizer, source)\n print('src=[%s], predicted=[%s]' % (raw_src, translation))\n\n\ntarget_language = 'french'\n# load datasets\ndataset = asarray(load_clean_sentences('english-%s-both.pkl' % target_language))\ntrain = asarray(load_clean_sentences('english-%s-train.pkl' % target_language))\ntest = asarray(load_clean_sentences('english-%s-test.pkl' % target_language))\n\n# prepare english tokenizer\n# eng_tokenizer = create_tokenizer(dataset[:, 0])\neng_tokenizer = load(open('pickle/english_tokenizer.pkl', 'rb'))\neng_vocab_size = len(eng_tokenizer.word_index) + 1\neng_length = max_length(dataset[:, 0])\n# prepare target tokenizer\n# ger_tokenizer = create_tokenizer(dataset[:, 1])\nger_tokenizer = load(open('pickle/%s_tokenizer.pkl' % target_language, 'rb'))\nger_vocab_size = len(ger_tokenizer.word_index) + 1\nger_length = max_length(dataset[:, 1])\n# prepare data, column 0 is english, 1 is german\ntrainX = encode_sequences(ger_tokenizer, ger_length, train[:, 1])\ntestX = encode_sequences(ger_tokenizer, ger_length, test[:, 1])\n\n# load model\nmodel = load_model('models/english_%s_model.h5' % target_language)\n# test on some training sequences\n# print('train')\n# evaluate_model(model, eng_tokenizer, trainX, train)\n# test on some test sequences\nprint('test')\nevaluate_model(model, eng_tokenizer, testX, test)\n# phrase = 'de rien'\n# user_evaluate_model(model, eng_tokenizer, encode_sequences(ger_tokenizer, ger_length, [phrase]), phrase)\n", "repo_name": "Harshwin/Comparison_statistical_and_neural_network_for_machine_translation", "sub_path": "tester.py", "file_name": "tester.py", "file_ext": "py", "file_size_in_byte": 4439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pickle.load", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.preprocessing.text.Tokenizer", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 48, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.corpus_bleu", "line_number": 75, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.corpus_bleu", "line_number": 76, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.corpus_bleu", "line_number": 77, "usage_type": "call"}, {"api_name": "nltk.translate.bleu_score.corpus_bleu", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 93, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 97, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "34078952330", "text": "from mongoengine import connect\n\n__author__ = 'Enis Simsar'\n\nfrom time import sleep\nfrom decouple import config\n\nfrom listen_module.twitter_stream_thread import StreamCreator\nfrom models.Topic import Topic\nfrom models.Tweet import Tweet\nfrom bson.objectid import ObjectId\n\n\nclass TwitterListen:\n def __init__(self):\n self.topic_dic = {}\n self.thread = None\n\n def setup(self, topic_list):\n if len(topic_list) != 0:\n for topic in topic_list:\n if str(topic['id']) not in self.topic_dic:\n self.topic_dic[str(topic['id'])] = topic\n self.thread = StreamCreator(self.topic_dic)\n self.thread.start()\n\n def restart(self, topic_list):\n self.topic_dic = {}\n if self.thread is not None:\n self.kill()\n if len(topic_list) != 0:\n for topic in topic_list:\n if str(topic['id']) not in self.topic_dic:\n self.topic_dic[str(topic['id'])] = topic\n self.thread = StreamCreator(self.topic_dic)\n self.thread.start()\n\n def kill(self):\n self.thread.terminate()\n del self.thread\n self.thread = None\n\n\ndef get_last_sequence_id():\n last_tweet = Tweet.objects.order_by('-_id').first()\n return last_tweet.id if last_tweet is not None else ObjectId()\n\n\ndef main():\n connect(\n config('MONGODB_DB'),\n username=config('MONGODB_USER'),\n password=config('MONGODB_PASSWORD'),\n host=config('MONGODB_HOST'),\n port=config('MONGODB_PORT', cast=int),\n authentication_source='admin',\n connect=False\n )\n\n running_topic_list = [topic.to_dict() for topic in Topic.objects.filter(is_active=True)]\n twitter_module = TwitterListen()\n twitter_module.setup(running_topic_list)\n\n last_sequence_id = get_last_sequence_id()\n\n count = 0\n while True:\n print(\"Loop is continuing. count = {0}\".format(count))\n count += 1\n sleep(300)\n new_running_topic_list = [topic.to_dict() for topic in Topic.objects.filter(is_active=True)]\n\n current_keywords = [i['keywords'] for i in running_topic_list]\n current_languages = [i['languages'] for i in running_topic_list]\n\n new_keywords = [i['keywords'] for i in new_running_topic_list]\n new_languages = [i['languages'] for i in new_running_topic_list]\n\n if current_keywords != new_keywords or current_languages != new_languages:\n running_topic_list = new_running_topic_list\n print(\"Restarting Twitter Module!\")\n twitter_module.restart(new_running_topic_list)\n\n if count % 6 == 0:\n new_last_sequence_id = get_last_sequence_id()\n print(\"last_id = {0}, new_last_id = {1}\".format(last_sequence_id, new_last_sequence_id))\n if last_sequence_id == new_last_sequence_id:\n running_topic_list = new_running_topic_list\n print(\"Unexpectedly Stopped Module, Restarting...\")\n twitter_module.restart(new_running_topic_list)\n last_sequence_id = get_last_sequence_id()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "openmaker-eu/watchtower-news", "sub_path": "listen_module/listen_controller.py", "file_name": "listen_controller.py", "file_ext": "py", "file_size_in_byte": 3161, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "listen_module.twitter_stream_thread.StreamCreator", "line_number": 24, "usage_type": "call"}, {"api_name": "listen_module.twitter_stream_thread.StreamCreator", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Tweet.Tweet.objects.order_by", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Tweet.Tweet.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Tweet.Tweet", "line_number": 45, "usage_type": "name"}, {"api_name": "bson.objectid.ObjectId", "line_number": 46, "usage_type": "call"}, {"api_name": "mongoengine.connect", "line_number": 50, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 51, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 52, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 53, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 54, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Topic.Topic.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Topic.Topic.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Topic.Topic", "line_number": 60, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 70, "usage_type": "call"}, {"api_name": "models.Topic.Topic.objects.filter", "line_number": 71, "usage_type": "call"}, {"api_name": "models.Topic.Topic.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.Topic.Topic", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "28450848157", "text": "# -*- coding:utf-8 -*-\r\n#@InnerAc\r\n#getNum.py\r\n\"\"\"\r\nBy this program, you can get the JSON file of nodes and edges.\r\n\"\"\"\r\nimport sqlite3\r\nimport math\r\nimport json\r\nimport re\r\n\r\nconn = None\r\n\r\ndef init(path):\r\n\tglobal conn\r\n\tconn = sqlite3.connect(path)\r\ndef getNodes():\r\n\tglobal conn\r\n\tcursor = conn.execute(\"SELECT url, name, size FROM node\")\r\n\tnodes = {}\r\n\ti = 0;\r\n\tfor row in cursor:\r\n\t\turl = row[0]\r\n\t\tname = row[1]\r\n\t\tsize = row[2]\r\n\t\tnodes[url] = {\"name\":name,\"size\":size}\r\n\t\ti += 1\r\n\t\tif(i == 50):\r\n\t\t\tbreak;\r\n\t\t# if i == 10:\r\n\t\t# \tbreak;\r\n\treturn nodes\r\ndef getLinks(nodes):\r\n\tglobal conn\r\n\tcursor = conn.execute(\"SELECT name_one,name_two,value FROM value\")\r\n\tedges = []\r\n\ti = 0;\r\n\tfor row in cursor:\r\n\t\tone = row[0]\r\n\t\ttwo = row[1]\r\n\t\tvalue = row[2]\r\n\t\tif one in nodes or two in nodes:\r\n\t\t\tedges.append({\"source\": one,\"target\": two,\"weight\": value})\r\n\t\t# i += 1\r\n\t\t# if i == 10:\r\n\t\t\t# break;\r\n\treturn edges\r\ndef close():\r\n\tglobal conn\r\n\tconn.close()\t\r\nif __name__ == '__main__':\r\n\tinit(\"person.db\")\r\n\tinfo = {}\r\n\tnodes = getNodes()\r\n\tedges = getLinks(nodes)\r\n\tinfo[\"nodes\"] = nodes\r\n\tinfo[\"edges\"] = edges\r\n\tf = open('info.json','w')\r\n\tf.write(json.dumps(info))\r\n\tf.close()\r\n\tclose()\r\n", "repo_name": "InnerAc/githubRelational", "sub_path": "getJson.py", "file_name": "getJson.py", "file_ext": "py", "file_size_in_byte": 1193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "11041025560", "text": "import hashlib\nimport hmac\nimport json\nimport logging\nimport pprint\nfrom datetime import datetime\n\nfrom odoo import http\nfrom odoo.exceptions import ValidationError\nfrom odoo.http import request\nfrom odoo.tools import consteq\n\nfrom odoo.addons.payment_stripe import utils as stripe_utils\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass StripeController(http.Controller):\n _checkout_return_url = '/payment/stripe/checkout_return'\n _validation_return_url = '/payment/stripe/validation_return'\n _webhook_url = '/payment/stripe/webhook'\n WEBHOOK_AGE_TOLERANCE = 10*60 # seconds\n\n @http.route(_checkout_return_url, type='http', auth='public', csrf=False)\n def stripe_return_from_checkout(self, **data):\n \"\"\" Process the data returned by Stripe after redirection for checkout.\n\n :param dict data: The GET params appended to the URL in `_stripe_create_checkout_session`\n \"\"\"\n # Retrieve the tx and acquirer based on the tx reference included in the return url\n tx_sudo = request.env['payment.transaction'].sudo()._get_tx_from_feedback_data(\n 'stripe', data\n )\n acquirer_sudo = tx_sudo.acquirer_id\n\n # Fetch the PaymentIntent, Charge and PaymentMethod objects from Stripe\n payment_intent = acquirer_sudo._stripe_make_request(\n f'payment_intents/{tx_sudo.stripe_payment_intent}', method='GET'\n )\n _logger.info(\"received payment_intents response:\\n%s\", pprint.pformat(payment_intent))\n self._include_payment_intent_in_feedback_data(payment_intent, data)\n\n # Handle the feedback data crafted with Stripe API objects\n request.env['payment.transaction'].sudo()._handle_feedback_data('stripe', data)\n\n # Redirect the user to the status page\n return request.redirect('/payment/status')\n\n @http.route(_validation_return_url, type='http', auth='public', csrf=False)\n def stripe_return_from_validation(self, **data):\n \"\"\" Process the data returned by Stripe after redirection for validation.\n\n :param dict data: The GET params appended to the URL in `_stripe_create_checkout_session`\n \"\"\"\n # Retrieve the acquirer based on the tx reference included in the return url\n acquirer_sudo = request.env['payment.transaction'].sudo()._get_tx_from_feedback_data(\n 'stripe', data\n ).acquirer_id\n\n # Fetch the Session, SetupIntent and PaymentMethod objects from Stripe\n checkout_session = acquirer_sudo._stripe_make_request(\n f'checkout/sessions/{data.get(\"checkout_session_id\")}',\n payload={'expand[]': 'setup_intent.payment_method'}, # Expand all required objects\n method='GET'\n )\n _logger.info(\"received checkout/session response:\\n%s\", pprint.pformat(checkout_session))\n self._include_setup_intent_in_feedback_data(checkout_session.get('setup_intent', {}), data)\n\n # Handle the feedback data crafted with Stripe API objects\n request.env['payment.transaction'].sudo()._handle_feedback_data('stripe', data)\n\n # Redirect the user to the status page\n return request.redirect('/payment/status')\n\n @http.route(_webhook_url, type='json', auth='public')\n def stripe_webhook(self):\n \"\"\" Process the `checkout.session.completed` event sent by Stripe to the webhook.\n\n :return: An empty string to acknowledge the notification with an HTTP 200 response\n :rtype: str\n \"\"\"\n event = json.loads(request.httprequest.data)\n _logger.info(\"event received:\\n%s\", pprint.pformat(event))\n try:\n if event['type'] == 'checkout.session.completed':\n checkout_session = event['data']['object']\n\n # Check the source and integrity of the event\n data = {'reference': checkout_session['client_reference_id']}\n tx_sudo = request.env['payment.transaction'].sudo()._get_tx_from_feedback_data(\n 'stripe', data\n )\n if self._verify_webhook_signature(\n stripe_utils.get_webhook_secret(tx_sudo.acquirer_id)\n ):\n # Fetch the PaymentIntent, Charge and PaymentMethod objects from Stripe\n if checkout_session.get('payment_intent'): # Can be None\n payment_intent = tx_sudo.acquirer_id._stripe_make_request(\n f'payment_intents/{tx_sudo.stripe_payment_intent}', method='GET'\n )\n _logger.info(\n \"received payment_intents response:\\n%s\", pprint.pformat(payment_intent)\n )\n self._include_payment_intent_in_feedback_data(payment_intent, data)\n # Fetch the SetupIntent and PaymentMethod objects from Stripe\n if checkout_session.get('setup_intent'): # Can be None\n setup_intent = tx_sudo.acquirer_id._stripe_make_request(\n f'setup_intents/{checkout_session.get(\"setup_intent\")}',\n payload={'expand[]': 'payment_method'},\n method='GET'\n )\n _logger.info(\n \"received setup_intents response:\\n%s\", pprint.pformat(setup_intent)\n )\n self._include_setup_intent_in_feedback_data(setup_intent, data)\n # Handle the feedback data crafted with Stripe API objects as a regular feedback\n request.env['payment.transaction'].sudo()._handle_feedback_data('stripe', data)\n except ValidationError: # Acknowledge the notification to avoid getting spammed\n _logger.exception(\"unable to handle the event data; skipping to acknowledge\")\n return ''\n\n @staticmethod\n def _include_payment_intent_in_feedback_data(payment_intent, data):\n data.update({'payment_intent': payment_intent})\n if payment_intent.get('charges', {}).get('total_count', 0) > 0:\n charge = payment_intent['charges']['data'][0] # Use the latest charge object\n data.update({\n 'charge': charge,\n 'payment_method': charge.get('payment_method_details'),\n })\n\n @staticmethod\n def _include_setup_intent_in_feedback_data(setup_intent, data):\n data.update({\n 'setup_intent': setup_intent,\n 'payment_method': setup_intent.get('payment_method')\n })\n\n def _verify_webhook_signature(self, webhook_secret):\n \"\"\" Check that the signature computed from the feedback matches the received one.\n\n See https://stripe.com/docs/webhooks/signatures#verify-manually.\n\n :param str webhook_secret: The secret webhook key of the acquirer handling the transaction\n :return: Whether the signatures match\n :rtype: str\n \"\"\"\n if not webhook_secret:\n _logger.warning(\"ignored webhook event due to undefined webhook secret\")\n return False\n\n notification_payload = request.httprequest.data.decode('utf-8')\n signature_entries = request.httprequest.headers.get('Stripe-Signature').split(',')\n signature_data = {k: v for k, v in [entry.split('=') for entry in signature_entries]}\n\n # Check the timestamp of the event\n event_timestamp = int(signature_data['t'])\n if datetime.utcnow().timestamp() - event_timestamp > self.WEBHOOK_AGE_TOLERANCE:\n _logger.warning(\"ignored webhook event due to age tolerance: %s\", event_timestamp)\n return False\n\n # Compare signatures\n received_signature = signature_data['v1']\n signed_payload = f'{event_timestamp}.{notification_payload}'\n expected_signature = hmac.new(\n webhook_secret.encode('utf-8'),\n signed_payload.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()\n if not consteq(received_signature, expected_signature):\n _logger.warning(\"ignored event with invalid signature\")\n return False\n\n return True\n", "repo_name": "anhjean/beanbakery_v15", "sub_path": "addons/payment_stripe/controllers/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.http.Controller", "line_number": 19, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 32, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 32, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 41, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 45, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 45, "usage_type": "name"}, {"api_name": "odoo.http.request.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 25, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 25, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 57, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 57, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 71, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 71, "usage_type": "name"}, {"api_name": "odoo.http.request.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 74, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 50, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 50, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 83, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 83, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 83, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 84, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 91, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 91, "usage_type": "name"}, {"api_name": "odoo.addons.payment_stripe.utils.get_webhook_secret", "line_number": 95, "usage_type": "call"}, {"api_name": "odoo.addons.payment_stripe.utils", "line_number": 95, "usage_type": "name"}, {"api_name": "pprint.pformat", "line_number": 103, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 114, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 118, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 118, "usage_type": "name"}, {"api_name": "odoo.exceptions.ValidationError", "line_number": 119, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 76, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 76, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.data.decode", "line_number": 153, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 153, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 153, "usage_type": "name"}, {"api_name": "odoo.http.request.httprequest.headers.get", "line_number": 154, "usage_type": "call"}, {"api_name": "odoo.http.request.httprequest", "line_number": 154, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 154, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 159, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 159, "usage_type": "name"}, {"api_name": "hmac.new", "line_number": 166, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 169, "usage_type": "attribute"}, {"api_name": "odoo.tools.consteq", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "38978588182", "text": "import datetime\nfrom sqlalchemy import select, func, and_\nfrom database.database_support_core import AREA, GALAXY\nfrom sqlalchemy.engine import create_engine\nfrom config import BOINC_DB_LOGIN, PROCESSED, COMPUTING\nfrom database.boinc_database_support_core import RESULT\nfrom utils.logging_helper import config_logger\n\nfrom utils.shutdown_detection import shutdown\n\nLOG = config_logger(__name__)\n\n\nclass CacheGalaxy:\n def __init__(self, galaxy_name, galaxy_id, area_min, area_max, ignore):\n self.galaxy_name = galaxy_name\n self.galaxy_id = galaxy_id\n self.area_min = area_min\n self.area_max = area_max\n self.ignore = ignore\n\n def __str__(self):\n return '{0}, {1}, {2}, {3}, {4}'.format(self.galaxy_name, self.galaxy_id, self.area_min, self.area_max, self.ignore)\n\n\ndef build_key(galaxy_name, galaxy_id):\n return '{0}_{1}'.format(galaxy_name, galaxy_id)\n\n\ndef get_cached_galaxy(cache_data, galaxy_name, area_number):\n \"\"\"\n Get any cached data for this name\n :param cache_data:\n :param galaxy_name:\n :param area_number:\n :return:\n \"\"\"\n cache_galaxies = cache_data.get(galaxy_name)\n if cache_galaxies is not None:\n for cached_galaxy in cache_galaxies:\n if cached_galaxy.area_min <= area_number <= cached_galaxy.area_max:\n return cached_galaxy\n\n return None\n\n\ndef sort_data(connection, current_jobs, modulus, remainder):\n \"\"\"\n Sort the list of jobs\n\n :param current_jobs:\n :return:\n \"\"\"\n cache_data = {}\n return_data = {}\n for job_name in current_jobs:\n #LOG.info('Checking {0}'.format(job_name))\n index = job_name.index('_area')\n galaxy_name = job_name[0:index]\n\n index1 = job_name.index('_', index + 5)\n area_number = job_name[index + 5: index1]\n\n cached_galaxy = get_cached_galaxy(cache_data, galaxy_name, int(area_number))\n #LOG.info('Cache check = {0}'.format(cached_galaxy))\n\n if cached_galaxy is None:\n # Get the area\n LOG.info('Area Number = {0}'.format(area_number))\n area = connection.execute(select([AREA]).where(AREA.c.area_id == area_number)).first()\n if area is None:\n LOG.info('Area with id={0} does not exist (Job: {1})'.format(area_number, job_name))\n continue\n ignore = True\n galaxy_id = int(area[AREA.c.galaxy_id])\n if modulus is None or galaxy_id % modulus == remainder:\n ignore = False\n key = build_key(galaxy_name, galaxy_id)\n areas = return_data.get(key)\n if areas is None:\n areas = []\n return_data[key] = areas\n\n areas.append(area_number)\n\n # Add this galaxy to the cache\n min_max = connection.execute(select([func.min(AREA.c.area_id), func.max(AREA.c.area_id)]).where(AREA.c.galaxy_id == galaxy_id)).first()\n # LOG.info('Adding to cache = {0} {1} {2}'.format(galaxy_name, min_max, ignore))\n list_galaxies = cache_data.get(galaxy_name)\n if list_galaxies is None:\n list_galaxies = []\n cache_data[galaxy_name] = list_galaxies\n\n list_galaxies.append(CacheGalaxy(galaxy_name, galaxy_id, min_max[0], min_max[1], ignore))\n\n else:\n if not cached_galaxy.ignore:\n key = build_key(galaxy_name, cached_galaxy.galaxy_id)\n areas = return_data.get(key)\n areas.append(area_number)\n\n return return_data\n\n\ndef finish_processing(galaxy_name, galaxy_id, sorted_data):\n \"\"\"\n Have we finished processing yet\n :param galaxy_id:\n :param galaxy_name:\n :param sorted_data:\n :return:\n \"\"\"\n return sorted_data.get(build_key(galaxy_name, galaxy_id)) is None\n\n\ndef processed_data(connection, modulus, remainder):\n \"\"\"\n Work out which galaxies have been processed\n\n :param connection:\n :return:\n \"\"\"\n # Get the work units still being processed\n engine = create_engine(BOINC_DB_LOGIN)\n connection_boinc = engine.connect()\n current_jobs = []\n LOG.info('Getting results from BOINC')\n # The use of appid ensures MySQL uses an index otherwise it does a full table scan\n for result in connection_boinc.execute(select([RESULT]).where(and_(RESULT.c.server_state != 5, RESULT.c.appid == 1))):\n current_jobs.append(result[RESULT.c.name])\n\n connection_boinc.close()\n LOG.info('Got results')\n\n sorted_data = sort_data(connection, current_jobs, modulus, remainder)\n for key in sorted(sorted_data.iterkeys()):\n LOG.info('{0}: {1} results'.format(key, len(sorted_data[key])))\n\n # Get the galaxies we know are still processing\n processed = []\n for galaxy in connection.execute(select([GALAXY]).where(GALAXY.c.status_id == COMPUTING)):\n if modulus is None or int(galaxy[GALAXY.c.galaxy_id]) % modulus == remainder:\n if finish_processing(galaxy[GALAXY.c.name], galaxy[GALAXY.c.galaxy_id], sorted_data):\n processed.append(galaxy[GALAXY.c.galaxy_id])\n LOG.info('%d %s has completed', galaxy[GALAXY.c.galaxy_id], galaxy[GALAXY.c.name])\n\n for galaxy_id in processed:\n connection.execute(GALAXY.update().where(GALAXY.c.galaxy_id == galaxy_id).values(status_id=PROCESSED, status_time=datetime.datetime.now()))\n\n if shutdown() is True:\n raise SystemExit\n\n LOG.info('Marked %d galaxies ready for archiving', len(processed))\n LOG.info('%d galaxies are still being processed', len(sorted_data))\n", "repo_name": "ICRAR/boinc-magphys", "sub_path": "server/src/archive/processed_galaxy_mod.py", "file_name": "processed_galaxy_mod.py", "file_ext": "py", "file_size_in_byte": 5609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.logging_helper.config_logger", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 70, "usage_type": "call"}, {"api_name": "database.database_support_core.AREA", "line_number": 70, "usage_type": "name"}, {"api_name": "database.database_support_core.AREA.c", "line_number": 70, "usage_type": "attribute"}, {"api_name": "database.database_support_core.AREA.c", "line_number": 75, "usage_type": "attribute"}, {"api_name": "database.database_support_core.AREA", "line_number": 75, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 87, "usage_type": "call"}, {"api_name": "sqlalchemy.func.min", "line_number": 87, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 87, "usage_type": "name"}, {"api_name": "database.database_support_core.AREA.c", "line_number": 87, "usage_type": "attribute"}, {"api_name": "database.database_support_core.AREA", "line_number": 87, "usage_type": "name"}, {"api_name": "sqlalchemy.func.max", "line_number": 87, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.create_engine", "line_number": 124, "usage_type": "call"}, {"api_name": "config.BOINC_DB_LOGIN", "line_number": 124, "usage_type": "argument"}, {"api_name": "sqlalchemy.select", "line_number": 129, "usage_type": "call"}, {"api_name": "database.boinc_database_support_core.RESULT", "line_number": 129, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 129, "usage_type": "call"}, {"api_name": "database.boinc_database_support_core.RESULT.c", "line_number": 129, "usage_type": "attribute"}, {"api_name": "database.boinc_database_support_core.RESULT.c", "line_number": 130, "usage_type": "attribute"}, {"api_name": "database.boinc_database_support_core.RESULT", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.select", "line_number": 141, "usage_type": "call"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 141, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 141, "usage_type": "attribute"}, {"api_name": "config.COMPUTING", "line_number": 141, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 142, "usage_type": "attribute"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 142, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 143, "usage_type": "attribute"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 143, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 144, "usage_type": "attribute"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 144, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 145, "usage_type": "attribute"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 145, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.update", "line_number": 148, "usage_type": "call"}, {"api_name": "database.database_support_core.GALAXY", "line_number": 148, "usage_type": "name"}, {"api_name": "database.database_support_core.GALAXY.c", "line_number": 148, "usage_type": "attribute"}, {"api_name": "config.PROCESSED", "line_number": 148, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "attribute"}, {"api_name": "utils.shutdown_detection.shutdown", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "22245144481", "text": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport csv\nimport glob\nimport argparse\n\nimport tifffile\nimport h5py\nimport numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nfrom keras.utils import multi_gpu_model\nfrom keras import optimizers\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras import backend as K\nfrom keras.losses import get\nfrom keras.models import load_model, Model\n\nfrom unet.unet import unet_model\nfrom unet.loss import multi_loss\nfrom util.load_data import load_train_data\nfrom util.load_batch import LoadBatchGenGPU, polar_zoom\nfrom util.read_parameter_from_log_file import read_parameter_from_log_file\nfrom util.postprocessing import postprocessing\n\n\ndef main():\n \"\"\"Train or test a U-Net model to analyze OCT images.\n\n Notes:\n **All arguments are bash arguments**.\n\n Args:\n exp_def: experiment definition\n models_path: path for saving models\n lr: learning rate\n lr_decay: learning rate step for decay\n data_pat: data folder path\n nEpoch: number of epochs\n nBatch: batch size\n outCh: size of output channel\n inCh: size of input channel\n nZ: size of input depth\n w: size of input width (number of columns)\n l: size of input Length (number of rows)\n loss_w: loss wights\n isAug: Is data augmentation\n isCarts: whether images should be converted into Cartesian\n isTest: Is test run instead of train\n testEpoch: epoch of the saved model for testing\n saveEpoch: epoch interval to save the model\n epochSize: number of samples per epoch\n nFeature: number of features in the first layer\n nLayer: number of layers in the U-Nnet model\n gpu_id: ID of GPUs to be used\n optimizer: keras optimizer. see :meth:`keras.optimizers`\n\n See Also:\n * :meth:`unet.unet.unet_model`\n * :meth:`unet.loss.multi_loss_fun`\n * :meth:`util.load_data.load_train_data`\n * :meth:`util.load_batch.load_batch_parallel`\n * :meth:`keras.utils.multi_gpu_model`\n * :meth:`keras.optimizers`\n\n \"\"\"\n\n # parse arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-exp_def\", type=str, default=\"test\", help=\"experiment definition\")\n parser.add_argument(\"-models_path\", type=str, default=\"model\", help=\"path for saving models\")\n parser.add_argument(\"-lr\", type=float, default=1e-4, help=\"learning rate\")\n parser.add_argument(\"-lr_decay\", type=float, default=0.0, help=\"learning rate decay\")\n parser.add_argument(\"-data_path\", type=str, default=\"D:\\\\MLIntravascularPolarimetry\\\\MLCardioPullbacks\\\\\",\n help=\"data folder path\")\n parser.add_argument(\"-nEpoch\", type=int, default=1000, help=\"number of epochs\")\n parser.add_argument(\"-nBatch\", type=int, default=30, help=\"batch size\")\n parser.add_argument(\"-outCh\", type=int, default=6, help=\"size of output channel\")\n parser.add_argument(\"-inCh\", type=int, default=3, help=\"size of input channel\")\n parser.add_argument(\"-nZ\", type=int, default=1, help=\"size of input depth\")\n parser.add_argument(\"-w\", type=int, default=512, help=\"size of input width (# of columns)\")\n parser.add_argument(\"-l\", type=int, default=512, help=\"size of input Length (# of rows)\")\n parser.add_argument(\"-loss_w\", type=str, default=\"1, 1, 1, 1, 1, 1, 1, 1, 1\", help=\"loss wights\")\n parser.add_argument(\"-isAug\", type=int, default=1, help=\"Is data augmentation\")\n parser.add_argument(\"-isCarts\", type=int, default=0, help=\"whether images should be converted into Cartesian\")\n parser.add_argument(\"-isTest\", type=int, default=0, help=\"Is test run instead of train. 1 when paramters are \"\n \"from arguments. 2 when paramters are from log file.\")\n parser.add_argument(\"-testEpoch\", type=int, default=10, help=\"epoch of the saved model for testing\")\n parser.add_argument(\"-testDir\", type=str, default=\"-\", help=\"test directory. Default is '-' for using the dataset.\")\n parser.add_argument(\"-saveEpoch\", type=int, default=100, help=\"epoch interval to save the model\")\n parser.add_argument(\"-epochSize\", type=int, default=10, help=\"number of samples per epoch as multiple of the \"\n \"training dataset size\")\n parser.add_argument(\"-nFeature\", type=int, default=8, help=\"number of features in the first layer\")\n parser.add_argument(\"-nLayer\", type=int, default=3, help=\"number of layers in the U-Net model\")\n parser.add_argument(\"-pool_scale\", type=int, default=2, help=\"max pooling scale factor.\")\n parser.add_argument(\"-gpu_id\", type=str, default=\"*\", help=\"ID of GPUs to be used. Use * for all and '' for none.\")\n parser.add_argument(\"-optimizer\", type=str, default=\"RMSprop\", help=\"optimizer\")\n parser.add_argument(\"-is_critique\", type=int, default=1, help=\"If critique model is used\")\n parser.add_argument(\"-critique_model\", type=str, default=\"critique-outCh6_v10\", help=\"critique definition\")\n parser.add_argument(\"-critiqueEpoch\", type=int, default=20000, help=\"epoch of the critique model\")\n parser.add_argument(\"-is_error_list\", type=int, default=0, help=\"use the error_list.txt file\")\n parser.add_argument(\"-error_case_ratio\", type=float, default=0.1, help=\"error case ratio in the batch\")\n parser.add_argument(\"--mode\", type=str)\n parser.add_argument(\"--port\", type=int)\n\n # assign the first part of args. The second part will ba assigned after reading parameter from log file\n args = parser.parse_args()\n experiment_def = args.exp_def\n isTest = args.isTest\n isTrain = 0 if args.isTest else 1\n models_path = args.models_path\n experiment_path = os.path.join(models_path, experiment_def)\n\n # prepare a folder for the saved models and log file\n if not os.path.exists(experiment_path):\n os.makedirs(experiment_path)\n save_file_name = os.path.join(experiment_path, 'model-epoch%06d.h5')\n log_file = os.path.join(experiment_path, 'log-' + experiment_def + '.csv')\n\n # read parameter from log file\n if args.isTest == 2:\n args = read_parameter_from_log_file(args, log_file)\n\n # assign the second part of args\n folder_path = args.data_path\n nEpoch = args.nEpoch\n nBatch = args.nBatch\n im_shape = (args.nZ, args.l, args.w, args.inCh)\n outCh = args.outCh\n loss_weight = np.array([float(i) for i in args.loss_w.split(',')], dtype='float32')\n loss_weight = loss_weight / np.sum(loss_weight)\n coord_sys = 'carts' if args.isCarts else 'polar'\n\n # initialize the log file or update the parameters\n if isTrain:\n if os.path.exists(log_file):\n with open(log_file, 'r') as f:\n L = f.readlines()\n L[0] = 'epoch, Time (hr), Train_Loss, Valid_Loss, ' + str(args) + '\\n'\n with open(log_file, 'w') as f:\n f.writelines(L)\n else:\n with open(log_file, 'w') as f:\n f.write('epoch, Time (hr), Train_Loss, Valid_Loss, ' + str(args) + '\\n')\n\n # GPU settings\n if '-' in args.gpu_id:\n numGPU = args.gpu_id.split('-')\n numGPU = int(numGPU[1]) - int(numGPU[0]) + 1\n elif '*' in args.gpu_id:\n numGPU = len(os.popen('nvidia-smi').read().split('+\\n')) - 5\n args.gpu_id = ','.join([str(i) for i in range(numGPU)])\n else:\n numGPU = len(args.gpu_id.split(','))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\n config = tf.ConfigProto(gpu_options=gpu_options)\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n set_session(tf.Session(config=config))\n\n # build the model\n model_template = unet_model(im_shape, nFeature=args.nFeature, outCh=outCh, nLayer=args.nLayer,\n pool_scale=args.pool_scale)\n\n # critique loss\n if args.is_critique:\n critique = load_model(os.path.join(models_path, args.critique_model,\n 'model-epoch%06d.h5' % args.critiqueEpoch),\n custom_objects={'loss': get(lambda y_, y: tf.reduce_mean(tf.abs(1 - y_ * y)))})\n critique.name = 'critique'\n for L in critique.layers:\n L.trainable = False\n\n model_template = Model(inputs=model_template.input,\n outputs=[model_template.output, critique([model_template.input, model_template.output])],\n name='with_critique')\n\n if isTrain:\n # load the last saved model if exists\n f = glob.glob(os.path.join(experiment_path, 'model-epoch*.h5'))\n f.sort()\n if len(f):\n iEpochStart = int(f[-1][-9:-3])\n model_template.load_weights(save_file_name % iEpochStart)\n with open(log_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=',', skipinitialspace=True)\n for row in reader:\n if int(row['epoch']) == iEpochStart:\n last_time = float(row['Time (hr)']) * 3600\n print('model at epoch %d is loaded.' % iEpochStart)\n iEpochStart += 1\n iEpoch = nEpoch # in case training is done\n else:\n iEpochStart = 1\n last_time = 0\n print('Model is initialized.')\n elif isTest:\n iEpoch = args.testEpoch\n model_template = load_model(save_file_name % iEpoch)\n print('model at epoch %d is loaded.' % args.testEpoch)\n\n if numGPU > 1:\n model = multi_gpu_model(model_template, gpus=numGPU)\n else:\n model = model_template\n optimizer = getattr(optimizers, args.optimizer)\n if args.is_critique:\n model.compile(optimizer=optimizer(lr=args.lr, decay=args.lr_decay),\n loss=[get(multi_loss(loss_weight[:-1], outCh)), get(lambda y_, y: 0.5 - 0.5 * y)],\n loss_weights=[sum(loss_weight[:-1]), loss_weight[-1]])\n else:\n model.compile(optimizer=optimizer(lr=args.lr, decay=args.lr_decay), loss=get(multi_loss(loss_weight, outCh)))\n\n # load data\n if args.testDir == '-':\n data_file = os.path.join(folder_path, 'Dataset ' + coord_sys + ' Z%d-L%d-W%d-C%d.h5' % im_shape)\n if os.path.exists(data_file):\n with h5py.File(data_file, 'r') as f:\n im, label_9class, train_data_id, test_data_id, valid_data_id, sample_caseID, sample_sliceID = \\\n np.array(f.get('/im')), np.array(f.get('/label')), np.array(f.get('/train_data_id')), \\\n np.array(f.get('/test_data_id')), np.array(f.get('/valid_data_id')), \\\n np.array(f.get('/sample_caseID')), np.array(f.get('/sample_sliceID'))\n else:\n im, label_9class, train_data_id, test_data_id, valid_data_id, sample_caseID, sample_sliceID = \\\n load_train_data(folder_path, im_shape, coord_sys, saveOutput=True)\n if args.is_error_list:\n with open('error_list.txt', 'r') as f:\n error_list = f.readlines()\n error_list = [int(i) - 1 for i in error_list]\n error_list = np.union1d(train_data_id, valid_data_id)[error_list]\n error_list = np.intersect1d(train_data_id, error_list)\n else:\n error_list = []\n\n # labels and masks\n label = np.zeros(label_9class.shape[:-1] + (outCh,))\n\n if outCh == 4:\n # 4 channel: Ch1: Lumen , Ch2: visible intima , Ch3: visible media ,\n # Ch0: others , note visible is without GW and nonIEL\n nonIEL_GW_mask = np.logical_not(np.logical_or(label_9class[..., 0], label_9class[..., 1]))\n label[..., 1] = label_9class[..., 2]\n label[..., 2] = np.logical_and(label_9class[..., 3], nonIEL_GW_mask)\n label[..., 3] = np.logical_and(label_9class[..., 4], nonIEL_GW_mask)\n label[..., 0] = np.all(np.logical_not(label[..., 1:]), axis=-1)\n\n elif outCh == 6:\n # 6 channel: Ch1: Lumen , Ch2: visible intima , Ch3: visible media ,\n # Ch4 : GW outside Lumen , Ch5: nonIEL outside Lumen and GW,\n # Ch0: others , note visible is without GW and nonIEL\n nonIEL_GW_mask = np.logical_not(np.logical_or(label_9class[..., 0], label_9class[..., 1]))\n label[..., 1] = label_9class[..., 2]\n label[..., 2] = np.logical_and(label_9class[..., 3], nonIEL_GW_mask)\n label[..., 3] = np.logical_and(label_9class[..., 4], nonIEL_GW_mask)\n IEL_EEL = np.any(label_9class[..., 3:5], axis=-1)\n label[..., 4] = np.logical_and(label_9class[..., 0], IEL_EEL)\n nonIEL_withoutGW = np.logical_and(label_9class[..., 1], np.logical_not(label_9class[..., 0]))\n label[..., 5] = np.logical_and(nonIEL_withoutGW, IEL_EEL)\n label[..., 0] = np.all(np.logical_not(label[..., 1:]), axis=-1)\n\n # # an experimental 6 label (called 2x3)\n # elif outCh == 6:\n # # Ch0 : not(intima or medai) , Ch1: intima , Ch2 : media\n # # Ch3 : not(GW or nonIEL) , Ch4: GW , Ch5: nonIEL\n # label[..., 0] = np.logical_not(np.logical_or(label_9class[..., 3], label_9class[..., 4]))\n # label[..., 1] = label_9class[..., 3]\n # label[..., 2] = label_9class[..., 4]\n # label[..., 3] = np.logical_not(np.logical_or(label_9class[..., 0], label_9class[..., 1]))\n # label[..., 4] = label_9class[..., 0]\n # label[..., 5] = label_9class[..., 1]\n\n\n # training\n if isTrain:\n train_data_gen = LoadBatchGenGPU(im, train_data_id, nBatch, label, isAug=args.isAug, coord_sys=coord_sys,\n prob_lim=0.5, isCritique=args.is_critique,\n error_list=error_list, error_case_ratio=args.error_case_ratio)\n if args.epochSize == 0:\n args.epochSize = np.ceil(train_data_id.size / nBatch).astype('int')\n else:\n args.epochSize = np.ceil(args.epochSize * train_data_id.size / nBatch).astype('int')\n print('Data is loaded. Training: %d, validation: %d' % (len(np.unique(sample_caseID[train_data_id])),\n len(np.unique(sample_caseID[valid_data_id]))))\n\n start = time.time() - last_time\n for iEpoch in range(iEpochStart, nEpoch + 1):\n model.fit_generator(train_data_gen, steps_per_epoch=args.epochSize, verbose=1)\n # evaluation\n if args.is_critique:\n train_loss = model.evaluate(im[train_data_id, ...],\n [label[train_data_id, ...], np.zeros((train_data_id.size, 1))],\n batch_size=nBatch, verbose=0)[0]\n valid_loss = model.evaluate(im[valid_data_id, ...],\n [label[valid_data_id, ...], np.zeros((valid_data_id.size, 1))],\n batch_size=nBatch, verbose=0)[0]\n else:\n train_loss = model.evaluate(im[train_data_id, ...], label[train_data_id, ...],\n batch_size=nBatch, verbose=0)\n valid_loss = model.evaluate(im[valid_data_id, ...], label[valid_data_id, ...],\n batch_size=nBatch, verbose=0)\n\n rem_time = (nEpoch - iEpoch) / iEpoch * (time.time() - start) / 3600.0\n print(\"Epoch%d: %.2f hr to finish, Train Loss: %f, Valid Loss: %f\" % (iEpoch, rem_time,\n train_loss, valid_loss))\n with open(log_file, 'a') as f:\n f.write(\"%d, %.2f, %f, %f, \\n\" % (iEpoch, (time.time() - start) / 3600.0, train_loss, valid_loss))\n\n # save model\n if iEpoch % args.saveEpoch == 0:\n model_template.save(save_file_name % iEpoch)\n\n # feed forward\n if args.testDir == '-':\n train_valid_data_id = np.union1d(train_data_id, valid_data_id)\n out = model.predict(im, batch_size=nBatch, verbose=1)\n if args.is_critique:\n out = np.array(out[0])\n\n # see the loss for the first 20 slices\n LOSS = np.zeros((20, ) + label.shape[1:-1], dtype='float32')\n for i in tqdm(range(LOSS.shape[0])):\n if args.is_critique:\n LOSS[[i], ...] = K.eval(model.loss[0](\n tf.constant(label[[train_valid_data_id[i]], ...].astype('float32')),\n tf.constant((out[[train_valid_data_id[i]], ...]).astype('float32'))))\n else:\n LOSS[[i], ...] = K.eval(model.loss(\n tf.constant(label[[train_valid_data_id[i]], ...].astype('float32')),\n tf.constant((out[[train_valid_data_id[i]], ...]).astype('float32'))))\n\n out = np.argmax(out, -1)\n label = np.argmax(label, -1)\n if len(out.shape) > 3:\n i = int(out.shape[1] // 2)\n label, out, im = label[:, i, ...].squeeze(), out[:, i, ...].squeeze(), im[:, i, ...].squeeze()\n\n # set the label intensity of the training slices background to the number of classes, which is one more than\n # the last class value\n i = label[train_data_id, ...]\n i[i == 0] = outCh\n label[train_data_id, ...] = i\n\n # write files\n tifffile.imwrite(os.path.join(experiment_path, 'a-label.tif'), label[train_valid_data_id, ...].astype(\n np.uint8))\n tifffile.imwrite(os.path.join(experiment_path, 'a-out-epoch%06d.tif' % iEpoch),\n out[train_valid_data_id, ...].astype(np.uint8))\n tifffile.imwrite(os.path.join(experiment_path, 'a-im.tif'),\n (im[train_valid_data_id, ...] * 255).astype(np.uint8).squeeze())\n tifffile.imwrite(os.path.join(experiment_path, 'a-loss.tif'), LOSS.astype('float32'))\n else:\n files = glob.glob(os.path.join(args.testDir, '*.pstif'))\n for f in tqdm(files):\n im = tifffile.imread(f)\n im = im.astype(np.float32) / 255\n im = np.moveaxis(np.reshape(im, (-1, 3,) + im.shape[1:]), 1, -1)\n im = polar_zoom(im, scale=im_shape[1] / im.shape[1])\n shift = (0, 128, 256, 384)\n out_ = np.zeros(im.shape[:-1] + (len(shift),))\n for j in range(len(shift)):\n im_ = im[:, :, np.r_[shift[j]:im_shape[2], 0:shift[j]]]\n out = model.predict(im_, batch_size=nBatch, verbose=1)\n if args.is_critique:\n out = np.array(out[0])\n out = np.argmax(out, -1)\n out_[..., j] = out[:, :, np.r_[(512 - shift[j]):im_shape[2], 0:(512 - shift[j])]]\n out = np.median(out_, axis=3)\n tifffile.imwrite(f[:-6] + '-fwd.tif', out.astype(np.uint8))\n tifffile.imwrite(f[:-6] + '-fwd-post.tif', postprocessing(out.astype(np.uint8)))\n tifffile.imwrite(f[:-6] + '-im.tif', (im * 255).astype(np.uint8).squeeze())\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "mhaft/OCTseg", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 19359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "util.read_parameter_from_log_file.read_parameter_from_log_file", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.popen", "line_number": 153, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 157, "usage_type": "attribute"}, {"api_name": "tensorflow.GPUOptions", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 159, "usage_type": "call"}, {"api_name": "keras.backend.tensorflow_backend.set_session", "line_number": 162, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 162, "usage_type": "call"}, {"api_name": "unet.unet.unet_model", "line_number": 165, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "keras.losses.get", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 172, "usage_type": "call"}, {"api_name": "tensorflow.abs", "line_number": 172, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 177, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 189, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 202, "usage_type": "call"}, {"api_name": "keras.utils.multi_gpu_model", "line_number": 206, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 209, "usage_type": "argument"}, {"api_name": "keras.losses.get", "line_number": 212, "usage_type": "call"}, {"api_name": "unet.loss.multi_loss", "line_number": 212, "usage_type": "call"}, {"api_name": "keras.losses.get", "line_number": 215, "usage_type": "call"}, {"api_name": "unet.loss.multi_loss", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "h5py.File", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 225, "usage_type": "call"}, {"api_name": "util.load_data.load_train_data", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.union1d", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 262, "usage_type": "call"}, {"api_name": "util.load_batch.LoadBatchGenGPU", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 286, "usage_type": "call"}, {"api_name": "time.time", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 297, "usage_type": "call"}, {"api_name": "time.time", "line_number": 305, "usage_type": "call"}, {"api_name": "time.time", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.union1d", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 323, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 324, "usage_type": "call"}, {"api_name": "keras.backend.eval", "line_number": 326, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 326, "usage_type": "name"}, {"api_name": "tensorflow.constant", "line_number": 327, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 328, "usage_type": "call"}, {"api_name": "keras.backend.eval", "line_number": 330, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 330, "usage_type": "name"}, {"api_name": "tensorflow.constant", "line_number": 331, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 335, "usage_type": "call"}, {"api_name": "tifffile.imwrite", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 347, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 348, "usage_type": "attribute"}, {"api_name": "tifffile.imwrite", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path", "line_number": 349, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 350, "usage_type": "attribute"}, {"api_name": "tifffile.imwrite", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 352, "usage_type": "attribute"}, {"api_name": "tifffile.imwrite", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path", "line_number": 353, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path", "line_number": 355, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 356, "usage_type": "call"}, {"api_name": "tifffile.imread", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 358, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 359, "usage_type": "call"}, {"api_name": "util.load_batch.polar_zoom", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 364, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 369, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 370, "usage_type": "call"}, {"api_name": "tifffile.imwrite", "line_number": 371, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 371, "usage_type": "attribute"}, {"api_name": "tifffile.imwrite", "line_number": 372, "usage_type": "call"}, {"api_name": "util.postprocessing.postprocessing", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 372, "usage_type": "attribute"}, {"api_name": "tifffile.imwrite", "line_number": 373, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 373, "usage_type": "attribute"}]} +{"seq_id": "16250654530", "text": "import os\n\nimport pytest\n\nimport audb\n\n\n@pytest.fixture(scope='package', autouse=True)\ndef cache(tmpdir_factory):\n r\"\"\"Provide a reusable cache for docstring tests.\n\n As we rely on emodb from the public repo,\n it makes sense to cache it\n across all docstring tests.\n\n \"\"\"\n cache = tmpdir_factory.mktemp('cache')\n # We use the environment variable here\n # to ensure audb.config.CACHE_ROOT\n # does still return the default config value\n # in the doctest\n env_cache = os.environ.get('AUDB_CACHE_ROOT', None)\n env_shared_cache = os.environ.get('AUDB_SHARED_CACHE_ROOT', None)\n os.environ['AUDB_CACHE_ROOT'] = str(cache)\n os.environ['AUDB_SHARED_CACHE_ROOT'] = str(cache)\n\n yield\n\n if env_cache is None:\n del os.environ['AUDB_CACHE_ROOT']\n else: # pragma: nocover\n os.environ['AUDB_CACHE_ROOT'] = env_cache\n\n if env_shared_cache is None:\n del os.environ['AUDB_SHARED_CACHE_ROOT']\n else: # pragma: nocover\n os.environ['AUDB_SHARED_CACHE_ROOT'] = env_shared_cache\n\n\n@pytest.fixture(autouse=True)\ndef public_repository(doctest_namespace):\n r\"\"\"Provide access to the public Artifactory repository.\n\n Some tests in the docstrings need access to the emodb database.\n As all the unit tests defined under ``tests/*``\n should not be able to see the public repository\n as the number of available databases would then not be deterministic.\n We provide this access here\n with the help of the ``doctest_namespace`` fixture.\n\n The ``conftest.py`` file has to be in the same folder\n as the code file where the docstring is defined.\n\n \"\"\"\n audb.config.REPOSITORIES = [\n audb.Repository(\n name='data-public',\n host='https://audeering.jfrog.io/artifactory',\n backend='artifactory',\n ),\n ]\n doctest_namespace['audb'] = audb\n\n yield\n\n # Remove public repo\n audb.config.REPOSITORIES.pop()\n", "repo_name": "audeering/audb", "sub_path": "audb/core/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 17, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ.get", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 8, "usage_type": "call"}, {"api_name": "audb.config", "line_number": 55, "usage_type": "attribute"}, {"api_name": "audb.Repository", "line_number": 56, "usage_type": "call"}, {"api_name": "audb.config.REPOSITORIES.pop", "line_number": 67, "usage_type": "call"}, {"api_name": "audb.config", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "70485631526", "text": "import geojson\nfrom sqlalchemy import (\n create_engine,\n Column,\n Integer,\n String,\n Float,\n)\nfrom geoalchemy2 import Geometry\nfrom sqlalchemy.dialects.postgresql import JSONB\nfrom sqlalchemy.orm import sessionmaker, declarative_base\n\nconnection_url = 'postgresql://postgres_user:geodata1234@127.0.0.1:5444/postgres' # noqa\nengine = create_engine(\n connection_url,\n pool_size=10,\n max_overflow=2,\n pool_recycle=300,\n pool_pre_ping=True,\n pool_use_lifo=True\n # echo=True,\n)\nSession = sessionmaker(bind=engine)\nsession = Session()\nBase = declarative_base()\n\n\nclass GeoData(Base):\n __tablename__ = 'geotable'\n\n id = Column(Integer, primary_key=True)\n geometry = Column(Geometry(srid=4326))\n area_ha = Column(Float)\n crop = Column(String)\n history = Column(JSONB)\n productivity = Column(String)\n region = Column(String)\n score = Column(String)\n type = Column(String)\n\n\nGeoData.__table__.drop(bind=engine)\nGeoData.__table__.create(bind=engine)\n\n\ndef geometry_to_ewkt(geom_dict):\n geom_type = geom_dict['type'].upper()\n if geom_type == 'POINT':\n coords = geom_dict['coordinates']\n return f'SRID=4326;POINT({coords[0]} {coords[1]})'\n elif geom_type == 'MULTIPOLYGON':\n polys = ','.join([','.join(\n [f\"(({','.join([' '.join(map(str, point)) for point in points])}))\"\n for points in poly]) for poly in geom_dict['coordinates']])\n return f'SRID=4326;MULTIPOLYGON({polys})'\n\ncount = 0\nwith engine.connect() as connection:\n with open('tools/fr-subset.geojsons') as f:\n for line in f:\n data = geojson.loads(line)\n\n geometry = None\n if data.get('geometry') is not None:\n geometry = geometry_to_ewkt(data['geometry'])\n\n geo_data = GeoData(\n geometry=geometry,\n id=data['properties']['id'],\n crop=data['properties']['crop'],\n productivity=data['properties']['productivity'],\n area_ha=float(data['properties']['area_ha']) if data['properties']['area_ha'] else None, # noqa\n history=data['properties']['history'],\n region=data['properties']['region'],\n score=data['properties']['score'],\n type=data['geometry']['type'].upper(),\n )\n session.add(geo_data)\n count += 1\n\n session.commit()\n\nprint(f'total docs: {count}', 'Data imported successfully!')\n", "repo_name": "Oless-N/search_geo_data", "sub_path": "tools/import_geo_data.py", "file_name": "import_geo_data.py", "file_ext": "py", "file_size_in_byte": 2496, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.declarative_base", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "geoalchemy2.Geometry", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Float", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 34, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.JSONB", "line_number": 35, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 37, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 38, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 39, "usage_type": "argument"}, {"api_name": "geojson.loads", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "38780289363", "text": "from pathlib import Path\r\nimport comtypes.client\r\nfrom glob import glob\r\n\r\nwd_format_pdf = 17\r\n\r\ninput_files = glob(\".\\doc\\*.docx\")\r\n\r\nword = comtypes.client.CreateObject(\"Word.Application\")\r\n\r\nfor file in input_files:\r\n in_file = str(Path(file).absolute())\r\n out_file = str(Path(file).absolute()).replace(\"docx\",\"pdf\")\r\n doc = word.Documents.Open(in_file)\r\n doc.SaveAs(out_file, FileFormat=wd_format_pdf)\r\n doc.Close()\r\n # print(out_file)\r\n\r\nword.Quit()", "repo_name": "stepheku/powerplan-rtf-gen", "sub_path": "convert_docx_to_pdf.py", "file_name": "convert_docx_to_pdf.py", "file_ext": "py", "file_size_in_byte": 472, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "glob.glob", "line_number": 7, "usage_type": "call"}, {"api_name": "comtypes.client.client.CreateObject", "line_number": 9, "usage_type": "call"}, {"api_name": "comtypes.client.client", "line_number": 9, "usage_type": "attribute"}, {"api_name": "comtypes.client", "line_number": 9, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "3517337578", "text": "from tkinter import *\r\nimport pandas as pd\r\nfrom csv import DictReader\r\nimport numpy as np\r\nimport datetime\r\nfrom datetime import date, timedelta\r\nfrom datetime import datetime\r\nimport decimal\r\nfrom datetime import *\r\nimport shutil\r\nimport csv\r\nimport os\r\nimport pdfkit\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport openpyxl\r\nimport openpyxl.worksheet.worksheet\r\nfrom win32com.client import Dispatch\r\n\r\n\r\nop=True\r\nlarge_font = ('Verdana',30)\r\nx = True\r\nList=[]\r\ns=0\r\ny=0.0\r\nz=0\r\ndf=''\r\nmaster = Tk()\r\nf1 = 2.5\r\nf2 = 2.5\r\nf3 = 2.5\r\nf4 = 2.5\r\nf5 = 1.7\r\nf6 = 2\r\nf7 = 2.5\r\nf8 = 1.7\r\nf9 = 1.7\r\nf10 = 2.5\r\nf11 = 5.0\r\nmaster.geometry('5000x5000')\r\ndef paaji():\r\n file_name = \"C:/Users/DELL/Downloads/S/print.xlsx\"\r\n df1000 = pd.read_excel(file_name) #Read Excel file as a DataFrame\r\n df1000.set_index(\"���िनांक\", inplace = True)\r\n wb_obj = openpyxl.load_workbook('print.xlsx')\r\n \r\n sheet_obj = wb_obj.active\r\n ab = df1000['कुल'].sum()\r\n print(ab)\r\n ac = df1000['एडवांस'].sum()\r\n c3 = sheet_obj['Z2']\r\n c3.value = ab\r\n c4 = sheet_obj['Z3']\r\n c4.value = ac\r\n c4 = sheet_obj['AA3']\r\n c4.value = ab-ac\r\n c5 = sheet_obj['AB3']\r\n c5.value = sum2+TVP-ad2\r\n wb_obj.save('print.xlsx')\r\n df9000 = pd.read_excel('C:/Users/DELL/Downloads/S/print.xlsx')\r\n df9001 = df9000.T\r\n df9001.to_excel('C:/Users/DELL/Downloads/S/print.xlsx')\r\n excel = Dispatch('Excel.Application')\r\n wb = excel.Workbooks.Open(\"C:/Users/DELL/Downloads/S/print.xlsx\")\r\n\r\n #Activate second sheet\r\n excel.Worksheets(1).Activate()\r\n\r\n #Autofit column in active sheet\r\n excel.ActiveSheet.Columns.AutoFit()\r\n wb.Save()\r\n\r\n wb.Close()\r\n os.startfile(\"C:/Users/DELL/Downloads/S/print.xlsx\",'print')\r\ndef Sum():\r\n Label(master, text='आरंभ दिनांक').grid(row=1,column=7)\r\n Label(master, text='समाप्ति दिनांक').grid(row=3,column=7)\r\n global bd\r\n global ed\r\n global bb\r\n bb = 'jan-27-21'\r\n bd = Entry(master)\r\n ed = Entry(master)\r\n bd.grid(row=1, column=8)\r\n ed.grid(row=3, column=8)\r\n pp = 0\r\n EN = ''\r\n D = ''\r\n X = 0.0\r\n def search():\r\n data2 = pd.read_csv(\"Records.csv\")\r\n data = pd.read_csv(\"Records.csv\")\r\n data.set_index(\"Date\", inplace = True)\r\n df82 = data.loc[bd.get():ed.get(),['Haudi Katai Paddy(40Kg,30Kg)','Rate','Haudi Katai Rice(60Kg,50Kg)','Rate2','Paddy Stack(40Kg,30Kg)','Rate3','Doubling','Rate4','Rice Loading(60kg,50kg,40kg)','Rate5','Rice Loading(25kg)','Rate6','Polish Loading','Rate7','Rice Dhala','Rate8','Bundle Stack/Loading','Rate9','Rice Stack(25Kg)','Rate10','Rice Stack(50Kg,60Kg)','Rate11','Adv']]\r\n df2 = df82['Adv']\r\n ok =df82['Haudi Katai Paddy(40Kg,30Kg)']\r\n ok2 = df82['Rate']\r\n pro1 = ok*ok2\r\n ok3 = df82['Haudi Katai Rice(60Kg,50Kg)']\r\n ok4 = df82['Rate2']\r\n pro2 = ok3*ok4\r\n ok5 = df82['Paddy Stack(40Kg,30Kg)']\r\n ok6 = df82['Rate3']\r\n pro3 = ok5*ok6\r\n ok7 = df82['Rice Loading(60kg,50kg,40kg)']\r\n ok8 = df82['Rate5']\r\n pro4 = ok7*ok8\r\n ok9 = df82['Rice Loading(25kg)']\r\n ok10 = df82['Rate6']\r\n pro5 = ok9*ok10\r\n ok11 = df82['Polish Loading']\r\n ok12 = df82['Rate7']\r\n pro6 = ok11*ok12\r\n ok13 = df82['Rice Dhala']\r\n ok14 = df82['Rate8']\r\n pro7 = ok13*ok14\r\n ok15 = df82['Bundle Stack/Loading']\r\n ok16 = df82['Rate9']\r\n pro8 = ok15*ok16\r\n ok17 = df82['Rice Stack(25Kg)']\r\n ok18 = df82['Rate10']\r\n pro9 = ok17*ok18\r\n ok19 = df82['Rice Stack(50Kg,60Kg)']\r\n ok20 = df82['Rate11']\r\n ok21 = df82['Doubling']\r\n ok22 = df82['Rate4']\r\n pro10 = ok19*ok20\r\n pro11 = ok21*ok22\r\n result = pro1+pro2+pro3+pro4+pro5+pro6+pro7+pro8+pro9+pro10+pro11\r\n df82.to_excel('print.xlsx')\r\n def Excel3():\r\n col_names = ['दिनांक','हौदी कटाई धान(40Kg,30Kg)','Rate1','हौदी कटाई चावल(60Kg,50Kg)','Rate2','धान स्टैक(40Kg,30Kg)','Rate3','डब्लिंग','Rate4','चावल लोड(60kg,50kg,40kg)','Rate5','चावल लोड(25kg)','Rate6','पोलिश लोड','Rate7','चावल ढला','Rate8','बंडल स्टैक/लोड','Rate9','चावल स्टैक(25Kg)','Rate10','चावल स्टैक(50Kg,60Kg)','Rate11','एडवांस']\r\n file_name = \"C:/Users/DELL/Downloads/S/print.xlsx\"\r\n df1000 = pd.read_excel(file_name) #Read Excel file as a DataFrame\r\n df1000.columns=col_names\r\n df1000.set_index(\"दिनांक\", inplace = True)\r\n df1000['कुल'] = df1000['हौदी कटाई धान(40Kg,30Kg)']*df1000['Rate1']+df1000['हौदी कटाई चावल(60Kg,50Kg)']*df1000['Rate2']+df1000['धान स्टैक(40Kg,30Kg)']*df1000['Rate3']+df1000['डब्लिंग']*df1000['Rate4']+df1000['चावल लोड(60kg,50kg,40kg)']*df1000['Rate5']+df1000['चावल लोड(25kg)']*df1000['Rate6']+df1000['पोलिश लोड']*df1000['Rate7']+df1000['चावल ढला']*df1000['Rate8']+df1000['बंडल स्टैक/लोड']*df1000['Rate9']+df1000['चावल स्टैक(25Kg)']*df1000['Rate10']+df1000['चावल स्टैक(50Kg,60Kg)']*df1000['Rate11']\r\n df1000.to_excel(\"C:/Users/DELL/Downloads/S/print.xlsx\") \r\n global button5\r\n button5 = Button(master, text='Print', width=25, command=lambda:[Excel3(),paaji()])\r\n button5.grid(row=20,column=12)\r\n data2 = pd.read_csv(\"VRecords.csv\")\r\n data2.set_index(\"Date\", inplace = True)\r\n df6 = data2.loc[bd.get():ed.get(),['Haudi Katai Paddy(40Kg,30Kg)','Rate','Haudi Katai Rice(60Kg,50Kg)','Rate2','Paddy Stack(40Kg,30Kg)','Rate3','Doubling','Rate4','Rice Loading(60kg,50kg,40kg)','Rate5','Rice Loading(25kg)','Rate6','Polish Loading','Rate7','Rice Dhala','Rate8','Bundle Stack/Loading','Rate9','Rice Stack(25Kg)','Rate10','Rice Stack(50Kg,60Kg)','Rate11']]\r\n okkk =df6['Haudi Katai Paddy(40Kg,30Kg)']\r\n okkk2 = df6['Rate']\r\n prooo1 = okkk*okkk2\r\n okkk3 = df6['Haudi Katai Rice(60Kg,50Kg)']\r\n okkk4 = df6['Rate2']\r\n prooo2 = okkk3*okkk4\r\n okkk5 = df6['Paddy Stack(40Kg,30Kg)']\r\n okkk6 = df6['Rate3']\r\n prooo3 = okkk5*okkk6\r\n okkk7 = df6['Rice Loading(60kg,50kg,40kg)']\r\n okkk8 = df6['Rate5']\r\n prooo4 = okkk7*okkk8\r\n okkk9 = df6['Rice Loading(25kg)']\r\n okkk10 = df6['Rate6']\r\n prooo5 = okkk9*okkk10\r\n okkk11 = df6['Polish Loading']\r\n okkk12 = df6['Rate7']\r\n prooo6 = okkk11*okkk12\r\n okkk13 = df6['Rice Dhala']\r\n okkk14 = df6['Rate8']\r\n prooo7 = okkk13*okkk14\r\n okkk15 = df6['Bundle Stack/Loading']\r\n okkk16 = df6['Rate9']\r\n prooo8 = okkk15*okkk16\r\n okkk17 = df6['Rice Stack(25Kg)']\r\n okkk18 = df6['Rate10']\r\n prooo9 = okkk17*okkk18\r\n okkk19 = df6['Rice Stack(50Kg,60Kg)']\r\n okkk20 = df6['Rate11']\r\n okkk21 = df6['Doubling']\r\n okkk22 = df6['Rate4']\r\n prooo10 = okkk19*okkk20\r\n prooo11 = okkk21*okkk22\r\n global result5\r\n result5 = prooo1+prooo2+prooo3+prooo4+prooo5+prooo6+prooo7+prooo8+prooo9+prooo10+prooo11\r\n global L5\r\n global L6\r\n global L7\r\n global canvas\r\n global scroll_y\r\n canvas = Canvas(master, width=400, height=200)\r\n scroll_y = Scrollbar(master,orient=\"vertical\", command=canvas.yview)\r\n\r\n frame = Frame(canvas)\r\n # group of widgets\r\n for i in range(20):\r\n with pd.option_context('display.max_rows', None, 'display.max_columns', None): \r\n global L\r\n global L2\r\n global L3\r\n global L4\r\n global L8\r\n global L9\r\n L=Label(master, text=\"Advance\")\r\n L.grid(row=1,column=13)\r\n L2=Label(master, text=\"Sum\")\r\n L2.grid(row=1,column=11)\r\n L3=Label(frame,text=df2)\r\n L3.grid(row=3,column=13)\r\n L4=Label(frame, text=result)\r\n L4.grid(row=3,column=11)\r\n L8=Label(master, text=\"Vardhman Payment\")\r\n L8.grid(row=1,column=12)\r\n L9=Label(frame, text=result5)\r\n L9.grid(row=3,column=12)\r\n canvas.create_window(1000,1000, anchor='nw', window=frame)\r\n canvas.update_idletasks()\r\n\r\n canvas.configure(scrollregion=canvas.bbox('all'), \r\n yscrollcommand=scroll_y.set)\r\n \r\n canvas.grid(row=2,column=12)\r\n scroll_y.grid(row=2,column=14)\r\n sum = result.sum()\r\n ad = df2.sum()\r\n va = result5.sum()\r\n global L10\r\n L5=Label(master, text=float(sum))\r\n L5.grid(row=5,column=11)\r\n L6=Label(master,text=float(ad))\r\n L6.grid(row=5,column=13)\r\n L10=Label(master, text=float(va))\r\n L10.grid(row=5,column=12)\r\n L7=Label(master, text=float(sum+va-ad))\r\n L7.grid(row=7,column=12)\r\n data6 = pd.read_csv(\"VRecords.csv\")\r\n data6.set_index(\"Date\", inplace = True)\r\n df8 = data6.loc[bb:ed.get(),['Haudi Katai Paddy(40Kg,30Kg)','Rate','Haudi Katai Rice(60Kg,50Kg)','Rate2','Paddy Stack(40Kg,30Kg)','Rate3','Doubling','Rate4','Rice Loading(60kg,50kg,40kg)','Rate5','Rice Loading(25kg)','Rate6','Polish Loading','Rate7','Rice Dhala','Rate8','Bundle Stack/Loading','Rate9','Rice Stack(25Kg)','Rate10','Rice Stack(50Kg,60Kg)','Rate11']]\r\n okkkk =df8['Haudi Katai Paddy(40Kg,30Kg)']\r\n okkkk2 = df8['Rate']\r\n proooo1 = okkkk*okkkk2\r\n okkkk3 = df8['Haudi Katai Rice(60Kg,50Kg)']\r\n okkkk4 = df8['Rate2']\r\n proooo2 = okkkk3*okkkk4\r\n okkkk5 = df8['Paddy Stack(40Kg,30Kg)']\r\n okkkk6 = df8['Rate3']\r\n proooo3 = okkkk5*okkkk6\r\n okkkk7 = df8['Rice Loading(60kg,50kg,40kg)']\r\n okkkk8 = df8['Rate5']\r\n proooo4 = okkkk7*okkkk8\r\n okkkk9 = df8['Rice Loading(25kg)']\r\n okkkk10 = df8['Rate6']\r\n proooo5 = okkkk9*okkkk10\r\n okkkk11 = df8['Polish Loading']\r\n okkkk12 = df8['Rate7']\r\n proooo6 = okkkk11*okkkk12\r\n okkkk13 = df8['Rice Dhala']\r\n okkkk14 = df8['Rate8']\r\n proooo7 = okkkk13*okkkk14\r\n okkkk15 = df8['Bundle Stack/Loading']\r\n okkkk16 = df8['Rate9']\r\n proooo8 = okkkk15*okkkk16\r\n okkkk17 = df8['Rice Stack(25Kg)']\r\n okkkk18 = df8['Rate10']\r\n proooo9 = okkkk17*okkkk18\r\n okkkk19 = df8['Rice Stack(50Kg,60Kg)']\r\n okkkk20 = df8['Rate11']\r\n okkkk21 = df8['Doubling']\r\n okkkk22 = df8['Rate4']\r\n proooo10 = okkkk19*okkkk20\r\n proooo11 = okkkk21*okkkk22\r\n global result8\r\n result8 = proooo1+proooo2+proooo3+proooo4+proooo5+proooo6+proooo7+proooo8+proooo9+proooo10+proooo11\r\n global TVP\r\n TVP = result8.sum()\r\n data3 = pd.read_csv(\"Records.csv\")\r\n data3.set_index(\"Date\", inplace = True)\r\n df5 = data3.loc[bb:ed.get(),['Haudi Katai Paddy(40Kg,30Kg)','Rate','Haudi Katai Rice(60Kg,50Kg)','Rate2','Paddy Stack(40Kg,30Kg)','Rate3','Doubling','Rate4','Rice Loading(60kg,50kg,40kg)','Rate5','Rice Loading(25kg)','Rate6','Polish Loading','Rate7','Rice Dhala','Rate8','Bundle Stack/Loading','Rate9','Rice Stack(25Kg)','Rate10','Rice Stack(50Kg,60Kg)','Rate11','Adv']]\r\n df6 = df5['Adv']\r\n okk =df5['Haudi Katai Paddy(40Kg,30Kg)']\r\n okk2 = df5['Rate']\r\n proo1 = okk*okk2\r\n okk3 = df5['Haudi Katai Rice(60Kg,50Kg)']\r\n okk4 = df5['Rate2']\r\n proo2 = okk3*okk4\r\n okk5 = df5['Paddy Stack(40Kg,30Kg)']\r\n okk6 = df5['Rate3']\r\n proo3 = okk5*okk6\r\n okk7 = df5['Rice Loading(60kg,50kg,40kg)']\r\n okk8 = df5['Rate5']\r\n proo4 = okk7*okk8\r\n okk9 = df5['Rice Loading(25kg)']\r\n okk10 = df5['Rate6']\r\n proo5 = okk9*okk10\r\n okk11 = df5['Polish Loading']\r\n okk12 = df5['Rate7']\r\n proo6 = okk11*okk12\r\n okk13 = df5['Rice Dhala']\r\n okk14 = df5['Rate8']\r\n proo7 = okk13*okk14\r\n okk15 = df5['Bundle Stack/Loading']\r\n okk16 = df5['Rate9']\r\n proo8 = okk15*okk16\r\n okk17 = df5['Rice Stack(25Kg)']\r\n okk18 = df5['Rate10']\r\n proo9 = okk17*okk18\r\n okk19 = df5['Rice Stack(50Kg,60Kg)']\r\n okk20 = df5['Rate11']\r\n okk21 = df5['Doubling']\r\n okk22 = df5['Rate4']\r\n proo10 = okk19*okk20\r\n proo11 = okk21*okk22\r\n result2 = proo1+proo2+proo3+proo4+proo5+proo6+proo7+proo8+proo9+proo10+proo11\r\n global sum2\r\n sum2 = result2.sum()\r\n global ad2\r\n ad2 = df6.sum()\r\n global L90\r\n L90=Label(master, text=float(\"{:.2f}\".format(sum2+TVP-ad2)))\r\n L90.grid(row=8,column=12)\r\n def delete_label():\r\n L.destroy()\r\n L2.destroy()\r\n L3.destroy()\r\n L4.destroy()\r\n L5.destroy()\r\n L6.destroy()\r\n L7.destroy()\r\n L8.destroy()\r\n L9.destroy()\r\n L10.destroy()\r\n L90.destroy()\r\n canvas.destroy()\r\n scroll_y.destroy()\r\n bd.delete(0,END)\r\n ed.delete(0,END)\r\n button5.destroy()\r\n button = Button(master, text='Submit', width=25, command=search)\r\n button.grid(row=5,column=8)\r\n button2 = Button(master, text='Clear', width=25, command=delete_label)\r\n button2.grid(row=6,column=8)\r\ndef Vardhman():\r\n def some_callbackk(event):\r\n event.widget.delete(0, \"end\")\r\n return None\r\n a1 = Label(master, text='दिनांक')\r\n a1.grid(row=20)\r\n a2 = Label(master, text='हौदी कटाई धान(40Kg,30Kg)')\r\n a2.grid(row=21)\r\n ee1 = Entry(master)\r\n ee2 = Entry(master)\r\n ee2.insert(0,0)\r\n ee2.bind(\"\", some_callbackk)\r\n ee1.grid(row=20, column=1)\r\n ee2.grid(row=21, column=1)\r\n a3 = Label(master, text='हौदी कटाई चावल(60Kg,50Kg)')\r\n a3.grid(row=22)\r\n a4 = Label(master, text='धान स्टैक(40Kg,30Kg)')\r\n a4.grid(row=23)\r\n ee3 = Entry(master)\r\n ee4 = Entry(master)\r\n ee3.grid(row=22, column=1)\r\n ee4.grid(row=23, column=1)\r\n ee3.bind(\"\", some_callbackk)\r\n ee4.bind(\"\", some_callbackk)\r\n ee3.insert(0,0)\r\n ee4.insert(0,0)\r\n a5 = Label(master, text='डब्लिंग')\r\n a5.grid(row=24)\r\n ee13 = Entry(master)\r\n ee13.bind(\"\", some_callbackk)\r\n ee13.grid(row=24, column=1)\r\n ee13.insert(0,0)\r\n a6 = Label(master, text='चावल लोड(60kg,50kg,40kg)')\r\n a6.grid(row=25)\r\n a7 = Label(master, text='चावल लोड(25kg)')\r\n a7.grid(row=26)\r\n ee5 = Entry(master)\r\n ee6 = Entry(master)\r\n ee5.bind(\"\", some_callbackk)\r\n ee6.bind(\"\", some_callbackk)\r\n ee5.grid(row=25, column=1)\r\n ee6.grid(row=26, column=1)\r\n ee5.insert(0,0)\r\n ee6.insert(0,0)\r\n a8 = Label(master, text='पोलिश लोड')\r\n a8.grid(row=27)\r\n a9 = Label(master, text='चावल ढला')\r\n a9.grid(row=28)\r\n ee7 = Entry(master)\r\n ee8 = Entry(master)\r\n ee7.bind(\"\", some_callbackk)\r\n ee8.bind(\"\", some_callbackk)\r\n ee7.grid(row=27, column=1)\r\n ee8.grid(row=28, column=1)\r\n ee7.insert(0,0)\r\n ee8.insert(0,0)\r\n a10 = Label(master, text='बंडल स्टैक/लोड')\r\n a10.grid(row=29)\r\n a11 = Label(master, text='चावल स्टैक(25Kg)')\r\n a11.grid(row=30)\r\n ee9 = Entry(master)\r\n ee10 = Entry(master)\r\n ee9.bind(\"\", some_callbackk)\r\n ee10.bind(\"\", some_callbackk)\r\n ee9.grid(row=29, column=1)\r\n ee10.grid(row=30, column=1)\r\n ee9.insert(0,0)\r\n ee10.insert(0,0)\r\n a12 = Label(master, text='चावल स्टैक(50Kg,60Kg)')\r\n a12.grid(row=31)\r\n ee11 = Entry(master)\r\n ee11.bind(\"\", some_callbackk)\r\n ee11.grid(row=31, column=1)\r\n ee11.insert(0,0)\r\n def export2():\r\n \r\n List2=[ee1.get(),ee2.get(),f1,ee3.get(),f2,ee4.get(),f3,ee13.get(),f11,ee5.get(),f4,ee6.get(),f5,ee7.get(),f6,ee8.get(),f7,ee9.get(),f8,ee10.get(),f9,ee11.get(),f10]\r\n with open('VRecords.csv', 'a',newline='') as f_object:\r\n df99 = pd.read_csv('VRecords.csv')\r\n\r\n new_df = df99.dropna()\r\n\r\n writer_object =csv.writer(f_object)\r\n \r\n writer_object.writerow(List2)\r\n ee1.delete(0,END)\r\n ee2.delete(0, END)\r\n ee3.delete(0,END)\r\n ee4.delete(0,END)\r\n ee5.delete(0,END)\r\n ee6.delete(0,END)\r\n ee7.delete(0,END)\r\n ee8.delete(0,END)\r\n ee9.delete(0,END)\r\n ee10.delete(0,END)\r\n ee11.delete(0,END)\r\n ee13.delete(0,END)\r\n ee2.insert(0,0)\r\n ee3.insert(0,0)\r\n ee4.insert(0,0) \r\n ee5.insert(0,0)\r\n ee6.insert(0,0)\r\n ee7.insert(0,0)\r\n ee8.insert(0,0)\r\n ee9.insert(0,0)\r\n ee10.insert(0,0)\r\n ee11.insert(0,0)\r\n ee13.insert(0,0)\r\n button3 = Button(master, text='Submit', width=25, command=export2)\r\n button3.grid(row=33,column=1)\r\n def Excel2():\r\n os.startfile(\"C:/Users/DELL/Downloads/S/VRecords.csv\")\r\n button4 = Button(master, text='Excel', width=25, command=Excel2)\r\n button4.grid(row=33,column=2)\r\n def clear2():\r\n ee1.destroy()\r\n ee2.destroy()\r\n ee3.destroy()\r\n ee4.destroy()\r\n ee5.destroy()\r\n ee6.destroy()\r\n ee7.destroy()\r\n ee8.destroy()\r\n ee9.destroy()\r\n ee10.destroy()\r\n ee11.destroy()\r\n ee13.destroy()\r\n a1.destroy()\r\n a2.destroy()\r\n a3.destroy()\r\n a4.destroy()\r\n a5.destroy()\r\n a6.destroy()\r\n a7.destroy()\r\n a8.destroy()\r\n a9.destroy()\r\n a10.destroy()\r\n a11.destroy()\r\n a12.destroy()\r\n button3.destroy()\r\n button4.destroy()\r\n button5.destroy()\r\n button5 = Button(master, text='Clear', width=25, command=clear2)\r\n button5.grid(row=33,column=3)\r\n\r\ndef input():\r\n def some_callback(event):\r\n event.widget.delete(0, \"end\")\r\n return None\r\n Label(master, text='दिनांक').grid(row=0)\r\n Label(master, text='हौदी कटाई धान(40Kg,30Kg)').grid(row=1)\r\n e1 = Entry(master)\r\n e2 = Entry(master)\r\n e2.insert(0,0)\r\n e2.bind(\"\", some_callback)\r\n e1.grid(row=0, column=1)\r\n e2.grid(row=1, column=1)\r\n Label(master, text='हौदी कटाई चावल(60Kg,50Kg)').grid(row=2)\r\n Label(master, text='धान स्टैक(40Kg,30Kg)').grid(row=3)\r\n e3 = Entry(master)\r\n e4 = Entry(master)\r\n e3.grid(row=2, column=1)\r\n e4.grid(row=3, column=1)\r\n e3.bind(\"\", some_callback)\r\n e4.bind(\"\", some_callback)\r\n e3.insert(0,0)\r\n e4.insert(0,0)\r\n Label(master, text='डब्लिंग').grid(row=4)\r\n e13 = Entry(master)\r\n e13.bind(\"\", some_callback)\r\n e13.grid(row=4, column=1)\r\n e13.insert(0,0)\r\n Label(master, text='चावल लोड(60kg,50kg,40kg)').grid(row=5)\r\n Label(master, text='चावल लोड(25kg)').grid(row=6)\r\n e5 = Entry(master)\r\n e6 = Entry(master)\r\n e5.bind(\"\", some_callback)\r\n e6.bind(\"\", some_callback)\r\n e5.grid(row=5, column=1)\r\n e6.grid(row=6, column=1)\r\n e5.insert(0,0)\r\n e6.insert(0,0)\r\n Label(master, text='पोलिश लोड').grid(row=7)\r\n Label(master, text='चावल ढला').grid(row=8)\r\n e7 = Entry(master)\r\n e8 = Entry(master)\r\n e7.bind(\"\", some_callback)\r\n e8.bind(\"\", some_callback)\r\n e7.grid(row=7, column=1)\r\n e8.grid(row=8, column=1)\r\n e7.insert(0,0)\r\n e8.insert(0,0)\r\n Label(master, text='बंडल स्टैक/लोड').grid(row=9)\r\n Label(master, text='चावल स्टैक(25Kg)').grid(row=10)\r\n e9 = Entry(master)\r\n e10 = Entry(master)\r\n e9.bind(\"\", some_callback)\r\n e10.bind(\"\", some_callback)\r\n e9.grid(row=9, column=1)\r\n e10.grid(row=10, column=1)\r\n e9.insert(0,0)\r\n e10.insert(0,0)\r\n Label(master, text='चावल स्टैक(50Kg,60Kg)').grid(row=11)\r\n Label(master, text='एडवांस').grid(row=12)\r\n e11 = Entry(master)\r\n e12 = Entry(master)\r\n e11.bind(\"\", some_callback)\r\n e12.bind(\"\", some_callback)\r\n e11.grid(row=11, column=1)\r\n e12.grid(row=12, column=1)\r\n e11.insert(0,0)\r\n e12.insert(0,0)\r\n\r\n def export():\r\n \r\n List=[e1.get(),e2.get(),f1,e3.get(),f2,e4.get(),f3,e13.get(),f11,e5.get(),f4,e6.get(),f5,e7.get(),f6,e8.get(),f7,e9.get(),f8,e10.get(),f9,e11.get(),f10,e12.get()]\r\n if(e2.get()!=0 and e13.get()!=0 and e3.get()!=0 and e4.get()!=0 and e5.get()!=0 and e6.get()!=0 and e7.get()!=0 and e8.get()!=0 and e9.get()!=0 and e10.get()!=0 and e11.get()!=0 and e12!=0 ):\r\n with open('Records.csv', 'a',newline='') as f_object:\r\n df = pd.read_csv('Records.csv')\r\n\r\n new_df = df.dropna()\r\n\r\n writer_object =csv.writer(f_object)\r\n \r\n writer_object.writerow(List)\r\n e1.delete(0,END)\r\n e2.delete(0, END)\r\n e3.delete(0,END)\r\n e4.delete(0,END)\r\n e5.delete(0,END)\r\n e6.delete(0,END)\r\n e7.delete(0,END)\r\n e8.delete(0,END)\r\n e9.delete(0,END)\r\n e10.delete(0,END)\r\n e11.delete(0,END)\r\n e12.delete(0,END)\r\n e13.delete(0,END)\r\n e2.insert(0,0)\r\n e3.insert(0,0)\r\n e4.insert(0,0) \r\n e5.insert(0,0)\r\n e6.insert(0,0)\r\n e7.insert(0,0)\r\n e8.insert(0,0)\r\n e9.insert(0,0)\r\n e10.insert(0,0)\r\n e11.insert(0,0)\r\n e12.insert(0,0)\r\n e13.insert(0,0)\r\n button = Button(master, text='Submit', width=25, command=export)\r\n button.grid(row=14,column=1)\r\n\r\ndef Excel():\r\n os.startfile(\"C:/Users/DELL/Downloads/S/Records.csv\")\r\n\r\n\r\n \r\nwhile (x==True):\r\n input()\r\n button = Button(master, text='Excel', width=25, command=Excel)\r\n button.grid(row=18,column=1)\r\n button2 = Button(master, text='Vardhman', width=25, command=Vardhman)\r\n button2.grid(row=19,column=1)\r\n Sum()\r\n\r\n mainloop()", "repo_name": "KUSHAGRAJAIN03/SanmatSoftware", "sub_path": "sanmat.py", "file_name": "sanmat.py", "file_ext": "py", "file_size_in_byte": 22979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_excel", "line_number": 46, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 63, "usage_type": "call"}, {"api_name": "win32com.client.Dispatch", "line_number": 66, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.option_context", "line_number": 194, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 233, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 273, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 417, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 421, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 450, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 557, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 561, "usage_type": "call"}, {"api_name": "os.startfile", "line_number": 593, "usage_type": "call"}]} +{"seq_id": "5297633404", "text": "\"\"\"Base module for the Python Durable functions.\n\nExposes the different API components intended for public consumption\n\"\"\"\nfrom .orchestrator import Orchestrator\nfrom .entity import Entity\nfrom .models.utils.entity_utils import EntityId\nfrom .models.DurableOrchestrationClient import DurableOrchestrationClient\nfrom .models.OrchestrationRuntimeStatus import OrchestrationRuntimeStatus\nfrom .models.DurableOrchestrationContext import DurableOrchestrationContext\nfrom .models.DurableEntityContext import DurableEntityContext\nfrom .models.RetryOptions import RetryOptions\nfrom .models.TokenSource import ManagedIdentityTokenSource\nimport json\nfrom pathlib import Path\nimport sys\nimport warnings\n\n\ndef validate_extension_bundles():\n \"\"\"Raise a warning if host.json contains bundle-range V1.\n\n Effects\n ------\n Warning: Warning prompting the user to update to bundles V2\n \"\"\"\n # No need to validate if we're running tests\n if \"pytest\" in sys.modules:\n return\n\n host_path = \"host.json\"\n bundles_key = \"extensionBundle\"\n version_key = \"version\"\n host_file = Path(host_path)\n\n if not host_file.exists():\n # If it doesn't exist, we ignore it\n return\n\n with open(host_path) as f:\n host_settings = json.loads(f.read())\n try:\n version_range = host_settings[bundles_key][version_key]\n except Exception:\n # If bundle info is not available, we ignore it.\n # For example: it's possible the user is using a manual extension install\n return\n # We do a best-effort attempt to detect bundles V1\n # This is the string hard-coded into the bundles V1 template in VSCode\n if version_range == \"[1.*, 2.0.0)\":\n message = \"Your application is currently configured to use Extension Bundles V1.\"\\\n \" Durable Functions for Python works best with Bundles V2,\"\\\n \" which provides additional features like Durable Entities, better performance,\"\\\n \" and is actively being developed.\"\\\n \" Please update to Bundles V2 in your `host.json`.\"\\\n \" You can set extensionBundles version to be: [2.*, 3.0.0)\"\n warnings.warn(message)\n\n\n# Validate that users are not in extension bundles V1\nvalidate_extension_bundles()\n\n__all__ = [\n 'Orchestrator',\n 'Entity',\n 'EntityId',\n 'DurableOrchestrationClient',\n 'DurableEntityContext',\n 'DurableOrchestrationContext',\n 'ManagedIdentityTokenSource',\n 'OrchestrationRuntimeStatus',\n 'RetryOptions'\n]\n\ntry:\n # disabling linter on this line because it fails to recognize the conditional export\n from .decorators.durable_app import (DFApp, Blueprint) # noqa\n __all__.append('DFApp')\n __all__.append('Blueprint')\nexcept ModuleNotFoundError:\n pass\n", "repo_name": "Azure/azure-functions-durable-python", "sub_path": "azure/durable_functions/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 113, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.modules", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "38142783554", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 16 21:01:00 2023\n\n@author: dreardon\n\"\"\"\n\nimport bilby\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom correlations_utils import *\nfrom KDEpy import FFTKDE\nfrom scipy.signal import savgol_filter\n\n\nfrom matplotlib import rc\nimport matplotlib\nrc('text', usetex=True)\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\nmatplotlib.rcParams.update({'font.size': 16})\n\npsrnames = ['J0030+0451',\n 'J0125-2327',\n 'J0437-4715',\n 'J0613-0200',\n 'J0614-3329',\n 'J0711-6830',\n 'J0900-3144',\n 'J1017-7156',\n 'J1022+1001',\n 'J1024-0719',\n 'J1045-4509',\n 'J1125-6014',\n 'J1446-4701',\n 'J1545-4550',\n 'J1600-3053',\n 'J1603-7202',\n 'J1643-1224',\n 'J1713+0747',\n 'J1730-2304',\n 'J1744-1134',\n 'J1832-0836',\n 'J1857+0943',\n 'J1902-5105',\n 'J1909-3744',\n 'J1933-6211',\n 'J1939+2134',\n 'J2124-3358',\n 'J2129-5721',\n 'J2145-0750',\n 'J2241-5236']\n\ncorr_hd_all = {}\ndatadir = '/Users/dreardon/Desktop/ppta_correlations/corr_chains'\npairs = get_pairs(psrnames)\nfor i in range(1, 436): # 435 pulsar pairs\n pair = pairs[str(1 + (int(i) - 1) % 435)]\n corr_hd_all['_'.join(pair)] = np.load(\"corr_chains/{}_corr_hd.npy\".format(i))\n\nimport json, random\n\nts_scrambles = []\n\nbad_list = []\n\n\n#new_pos = {}\n#for i, k in enumerate(pos.keys()):\n# new_pos[k] = np.random.uniform(low=-1, high=1, size=3)\n\nfor _ in range(0, 1):\n\n with open('positions.json', 'r') as f:\n pos = json.load(f)\n\n npos = len(np.array(list(pos.values())))\n inds = np.linspace(0, npos-1, npos)\n random.shuffle(inds)\n new_pos = {}\n for i, k in enumerate(pos.keys()):\n #new_pos[k] = np.array(list(pos.values()))[int(inds[i])]\n new_pos[k] = np.random.uniform(low=-1, high=1, size=3)\n pos = new_pos\n\n\n # Look through pairs and make plots\n kde = {}\n pdf = {}\n ptot = {}\n\n orf_bins_total = {}\n numerator = 0\n denominator = 0\n numerator_68 = 0\n denominator_68 = 0\n donepair = {}\n likelihood_hd = {}\n likelihood_curn = {}\n likelihood_null = {}\n likelihood_hd_global = 0\n likelihood_mono_global = 0\n likelihood_dipole_global = 0\n likelihood_curn_global = 0\n likelihood_null_global = 0\n bf_hd = {}\n n_bins = {}\n n_tot = 0\n\n y_flat = np.linspace(-1, 1, 256)\n null_prob = np.ones(np.shape(y_flat))\n null_prob /= np.sum(null_prob)\n null_prob /= np.mean(np.diff(np.linspace(-1, 1, 256)))\n #null_prob = np.ones(np.shape(null_prob))\n\n nseps = 8\n vals = np.linspace(0, 180, nseps+1)\n halfdiff = np.mean(np.diff(vals))/2\n vals = (vals - halfdiff)[1:]\n seps = {}\n\n nseps2 = 7\n vals2 = np.array([1e-3, 30.0, 50.0, 80.0, 100.0, 120.0, 150.0, 180.0])\n halfdiff2 = np.mean(np.diff(vals2))/2\n vals2 = (vals2[1:] - np.diff(vals2)/2)\n seps2 = {}\n\n data_bins = np.load(\"bins_chain_total.npy\")\n\n\n for ibin in range(1, nseps+1):\n seps[\"bin_{}\".format(ibin)] = vals[ibin-1]\n for ibin in range(1, nseps2+1):\n seps2[\"bin_{}\".format(ibin)] = vals2[ibin-1]\n\n for _ in range(0,1):\n\n #for psri in psrnames:\n orf_bins = {}\n for i in range(1, 436): # 435 pulsar pairs\n\n pair = pairs[str(1 + (int(i) - 1) % 435)]\n print(i, '_'.join(pair))\n\n psr1 = pair[0]\n psr2 = pair[1]\n\n psri='dummy2'\n\n #if ('0437' not in psr1) and ('0437' not in psr2):\n # continue\n #if ('1909' not in psr1) and ('1909' not in psr2):\n # continue\n\n #if '1600' in psr1 or '1600' in psr2:\n # continue\n #if '2145' in psr1 or '2145' in psr2:\n # continue\n #if '1545' in psr1 or '1545' in psr2:\n # continue\n #if '0437' in psr1 or '0437' in psr2:\n # continue\n\n #if '0437' in psr1 or '0437' in psr2:\n # continue\n #if '1713' in psr1 or '1713' in psr2:\n # continue\n\n if not '_'.join(pair) in donepair:\n donepair['_'.join(pair)] = False\n\n # corr_hd = np.array(corr_hd_all['_'.join(pair)]) # bw=0.1, refecting correlations but not amplitude. Slice taken at -14.69\n # y_flat = np.linspace(-1, 1, 256)\n\n \"\"\"\n ind = -6 # corr coeff\n ind_amp = -7 # amplitude\n\n chain = np.load(\"corr_chains/{}.npy\".format(i))\n\n chain_2d = chain[:, [ind_amp, ind]].squeeze()\n\n\n amp = chain_2d[:,0]\n corr = chain_2d[:,1]\n\n reweight_factor = 10**(amp) / (10**-14 - 10**-18) # to uniform\n\n a2 = (10**(amp * reweight_factor) * 10**-14)**2 # uniform in linear\n\n square_reweight = abs(10**56 * a2 * corr)\n\n cov = a2 * square_reweight * corr\n print(len(cov))\n\n medcov = np.median(cov)\n q16 = np.percentile(cov, q=16)\n q84 = np.percentile(cov, q=84)\n stdcov = np.std(cov)\n cov68 = (q84-q16)/2\n\n print(medcov, q16, q84, stdcov, cov68)\n\n plt.hist(cov, bins=1000, density=True)\n plt.title(' '.join(pair))\n\n kde = gaussian_kde(cov, bw_method=0.1)\n\n grid_coords = np.linspace(-1, 1, 1000)\n cov_hd = kde(grid_coords)\n\n cov_hd /= np.sum(cov_hd)\n cov_hd /= np.mean(np.diff(grid_coords))\n\n plt.plot(grid_coords, cov_hd)\n plt.xlim([-1, 1])\n plt.show()\n\n np.save(\"corr_chains/{}_cov_hd.npy\".format(i), cov_hd)\n continue\n \"\"\"\n\n\n\n # Find index corresponding to the correlation corefficient\n ind = -6 # corr coeff\n ind_amp = -7 # amplitude\n\n chain_name = \"corr_chains/{}_more1713.npy\".format(i)\n chain = np.load(chain_name)\n\n #if len(chain[:, ind]) <= 3000:\n # print(i, pair, len(chain[:, ind]))\n # continue\n\n if not \"fixA\" in chain_name:\n # FOR TAKING ONLY SOME SAMPLES\n # chain_2d = chain[1::2, [ind_amp, ind]].squeeze()\n chain_2d = chain[:, [ind_amp, ind]].squeeze()\n\n \"\"\"\n COMPUTE OPTIMAL STATISTIC\n \"\"\"\n\n log10_amps = chain_2d[:,0]\n corrs = chain_2d[:,1]\n cov = (10**log10_amps) ** 2 * corrs\n\n # uniform in A**2 * Gam\n weights = abs(corrs * 2**(2*log10_amps + 1) * 5**(2*log10_amps) * np.log(10))\n\n # Uniform in A * Gam\n weights = abs(corrs * 10**log10_amps * np.log(10))\n\n\n vals = plt.hist(cov, bins=200, range=[-10e-30, 10e-30], weights=weights, density=True)\n plt.xlim([-10*10**-30, 10*10**-30])\n plt.title(' '.join(pair))\n\n densities = vals[0]\n edges = vals[1]\n centres = edges[:-1] + np.diff(edges)/2\n\n smooth_dense = savgol_filter(densities, 10, 1)\n plt.plot(centres, smooth_dense)\n plt.close()\n\n np.savez(\"corr_chains/{}_os.npz\".format(i), smooth_dense, centres)\n print(\"saved\", \"corr_chains/{}_os.npz\".format(i))\n\n continue\n\n print(\"Samples above -14.69: {}\".format(len(np.argwhere(chain_2d[:, 0] >= -14.69))))\n if len(np.argwhere(chain_2d[:, 0] >= -14.69)) < 1000:\n bad_list.append('_'.join(pair))\n print(\"*** WARNING! TOO FEW SAMPLES! ***\")\n\n #plt.subplots(1, 2, figsize=(12,6))\n #plt.subplot(1, 2, 1)\n #h, xedges, yedges, _ = plt.hist2d(chain_2d[:, 0], chain_2d[:, 1], bins=100, density=True, range=[[-18, -14],[-1, 1]])\n #plt.ylabel('Corr coeff')\n #plt.xlabel('log10 A')\n #plt.colorbar()\n #plt.title(' '.join(pair))\n #plt.tight_layout()\n\n\n chain_2d_reflect_corrs_pos = chain_2d.copy()\n chain_2d_reflect_corrs_pos[:, 1] = 2*(1) - chain_2d_reflect_corrs_pos[:, 1]\n chain_2d_reflect_corrs_neg = chain_2d.copy()\n chain_2d_reflect_corrs_neg[:, 1] = 2*(-1) - chain_2d_reflect_corrs_neg[:, 1]\n\n chain_mirror_corr = np.concatenate((chain_2d_reflect_corrs_neg, chain_2d, chain_2d_reflect_corrs_pos))\n\n chain_2d_reflect_amp_pos = chain_mirror_corr.copy()\n chain_2d_reflect_amp_pos[:, 0] = 2*(-14) - chain_2d_reflect_amp_pos[:, 0]\n chain_2d_reflect_amp_neg = chain_mirror_corr.copy()\n chain_2d_reflect_amp_neg[:, 0] = 2*(-18) - chain_2d_reflect_amp_neg[:, 0]\n\n chain_mirror = np.concatenate((chain_2d_reflect_amp_neg, chain_mirror_corr, chain_2d_reflect_amp_pos))\n #chain_mirror = np.concatenate((chain_2d_reflect_amp_neg, chain_mirror_corr))\n #chain_mirror = chain_mirror_corr\n\n #gkde_2d = gaussian_kde(np.transpose(chain_mirror))\n\n #p, bins, _ = plt.hist(chain[:, ind], range=(-1, 1), bins=100, density=True)\n #plt.xlim([-1, 1])\n #plt.close()\n #centres = bins[0:-1] + np.diff(bins)\n #gkde = gaussian_kde(chain[:, ind].squeeze(), bw_method=0.2)\n\n bw = 0.05\n\n data = chain_mirror.squeeze()\n grid_points = 1024 # Grid points in each dimension\n kde = FFTKDE(kernel='gaussian', bw=bw)\n grid, points = kde.fit(data).evaluate((grid_points, grid_points))\n # The grid is of shape (obs, dims), points are of shape (obs, 1)\n x, y = np.unique(grid[:, 0]), np.unique(grid[:, 1])\n # x is amplitudes\n # y is corr coeff\n z = points.reshape(grid_points, grid_points).T\n\n z[y<=-1, :] = 0 # Set the KDE to zero outside of the domain\n z[y>=1, :] = 0 # Set the KDE to zero outside of the domain\n z[:, x<=-18] = 0 # Set the KDE to zero outside of the domain\n z[:, x>=-14] = 0 # Set the KDE to zero outside of the domain\n z = z.squeeze() * 9 # multiply the kde to get integral of ~1\n\n indx = np.argwhere((x>=-18)*(x<=-14)).squeeze()\n indy = np.argwhere((y>=-1)*(y<=1)).squeeze()\n\n y = y[indy]\n x = x[indx]\n z = z[indy, :][:, indx]\n\n #plt.subplot(1, 2, 2)\n #plt.pcolormesh(x,y,z)\n #plt.colorbar()\n #plt.xlim([-18,-14])\n #plt.ylim([-1,1])\n\n #plt.ylabel('Corr coeff')\n #plt.xlabel('log10 A')\n\n np.savez(\"corr_chains/{}_pdf_importance_more1713.npz\".format(i), x, y, z)\n\n # sys.exit()\n\n # x_flat = np.linspace(-18, -14, 20)\n # # x_flat = np.array([-14.69])\n # y_flat = np.linspace(-1, 1, 20)\n # xcentre = xedges[1:] - np.mean(np.diff(xedges))/2\n # ycentre = yedges[1:] - np.mean(np.diff(yedges))/2\n # x, y = np.meshgrid(xcentre, ycentre)\n # grid_coords = np.append(x.reshape(-1,1),y.reshape(-1,1),axis=1)\n\n # z = kde(grid_coords.T)\n\n # z = z.reshape(20, 20)\n # z /= np.sum(z)\n # z /= np.mean(np.diff(xcentre))\n # z /= np.mean(np.diff(ycentre))\n\n # plt.subplot(3, 1, 2)\n # plt.title(\"KDE, bw={}\".format(bw))\n # plt.pcolormesh(xedges, yedges, z)\n # plt.colorbar()\n # plt.ylabel('Corr coeff')\n # plt.xlabel('log10 A')\n\n\n # plt.subplot(3, 1, 3)\n # plt.pcolormesh(xedges, yedges, z - h.T)\n # plt.colorbar()\n # plt.title(\"Residual, standard deviation = {}\".format(round(np.std(z - h.T), 2)))\n # plt.ylabel('Corr coeff')\n # plt.xlabel('log10 A')\n\n #plt.tight_layout()\n\n #plt.savefig('corr_plots/{}_2dhist.png'.format('_'.join(pair)))\n #plt.show()\n\n\n nsamp = 1000\n\n samples = np.array([])\n\n if nsamp > 1:\n for _ in range(0, nsamp):\n # draw an amplitude\n amp = np.random.normal(loc=-14.69, scale=0.05)\n indx = np.argmin(np.abs(x - amp)).squeeze()\n\n pdf = z[:, indx]\n pdf /= np.sum(pdf)\n\n samps = np.random.choice(y, size=1000, p=pdf)\n samples = np.concatenate((samples, samps))\n\n samples_pos = samples.copy()\n samples_pos = 2*(1) - samples_pos\n samples_neg = samples.copy()\n samples_neg = 2*(-1) - samples_neg\n\n samples = np.concatenate((samples_neg, samples, samples_pos))\n\n bw = 0.1\n\n data = samples.squeeze()\n grid_points = 4096 # Grid points in each dimension\n kde = FFTKDE(kernel='gaussian', bw=bw)\n grid, points = kde.fit(data).evaluate(grid_points)\n # The grid is of shape (obs, dims), points are of shape (obs, 1)\n y2 = np.unique(grid)\n\n z2 = points.reshape(grid_points).T\n\n z2[y2<=-1] = 0 # Set the KDE to zero outside of the domain\n z2[y2>=1] = 0 # Set the KDE to zero outside of the domain\n z2 = z2.squeeze() * 3 # multiply the kde to get integral of ~1\n\n indy = np.argwhere((y2>=-1)*(y2<=1)).squeeze()\n y2 = y2[indy]\n\n corr_hd2 = z2[indy] / np.sum(z2[indy])\n\n indx = np.argmin(np.abs(x + 14.69)).squeeze()\n corr_hd = z[:, indx]\n corr_hd /= np.sum(corr_hd)\n\n # if ('1713' in psr1 or '1713' in psr2) and not '1909' in psr2:\n # corr_hd = np.ones(np.shape(corr_hd))\n #corr_hd /= np.sum(corr_hd)\n\n corr_hd /= np.mean(np.diff(y))\n corr_hd2 /= np.mean(np.diff(y2))\n\n #plt.plot(y, corr_hd)\n #plt.plot(y2, corr_hd2)\n #yl = plt.ylim()\n #plt.ylim([0, yl[1]*1.1])\n #plt.xlim([-1, 1])\n #plt.savefig('corr_plots/{}_1dpdf.png'.format('_'.join(pair)))\n #plt.xlabel('Corr coeff')\n #plt.show()\n np.savez(\"corr_chains/{}_corr_hd_importance_more1713.npz\".format(i), corr_hd2, y2)\n print(\"Saved:\", \"corr_chains/{}_corr_hd_importance_more1713.npz\".format(i))\n print(\"\")\n continue\n\n else: # fixA, so only a 1d chain\n chain_1d = chain[:, ind].squeeze()\n\n chain_1d_reflect_corrs_pos = chain_1d.copy()\n chain_1d_reflect_corrs_pos = 2 - chain_1d_reflect_corrs_pos\n chain_1d_reflect_corrs_neg = chain_1d.copy()\n chain_1d_reflect_corrs_neg = -2 - chain_1d_reflect_corrs_neg\n\n chain_mirror = np.concatenate((chain_1d_reflect_corrs_neg, chain_1d, chain_1d_reflect_corrs_pos))\n\n bws = [0.075, 0.1, 0.125, 0.15]\n\n for bw in bws:\n\n data = chain_mirror.squeeze()\n grid_points = 1024 # Grid points in each dimension\n\n kde = FFTKDE(kernel='gaussian', bw=bw)\n grid, points = kde.fit(data).evaluate(grid_points)\n # The grid is of shape (obs, dims), points are of shape (obs, 1)\n y = np.unique(grid)\n # y is corr coeff\n z = points.reshape(grid_points)\n\n z[y<=-1] = 0 # Set the KDE to zero outside of the domain\n z[y>=1] = 0 # Set the KDE to zero outside of the domain\n z = z.squeeze() * 3 # multiply the kde to get integral of ~1\n\n indy = np.argwhere((y>=-1)*(y<=1)).squeeze()\n\n y = y[indy]\n z = z[indy]\n\n np.savez(\"corr_chains/{}_corr_hd_importance_fixA_{}.npz\".format(i, bw), z, y)\n print(\"Saved:\", \"corr_chains/{}_corr_hd_importance_fixA_{}.npz\".format(i, bw))\n print(\"\")\n continue\n\n\n\n sigma_k = np.std(np.random.choice(np.linspace(-1, 1, 256), p=corr_hd/np.sum(corr_hd), size=10000))\n q_16 = np.percentile(np.random.choice(np.linspace(-1, 1, 256), p=corr_hd/np.sum(corr_hd), size=10000), q=16)\n q_84 = np.percentile(np.random.choice(np.linspace(-1, 1, 256), p=corr_hd/np.sum(corr_hd), size=10000), q=84)\n sigma_68 = np.abs(q_84 - q_16)/2\n\n pos1 = pos[psr1]\n pos2 = pos[psr2]\n\n angsep = np.arccos(np.dot(pos1, pos2)) * 180/np.pi\n orf_val = hd_orf(np.array([angsep]))[0]\n dipole_val = dipole(np.array([angsep]))[0]\n\n try:\n likelihood_hd[psri] += np.log(corr_hd[np.argmin(np.abs(y_flat - orf_val))])\n likelihood_curn[psri] += np.log(corr_hd[np.argmin(np.abs(y_flat))])\n likelihood_null[psri] += np.log(null_prob[0])\n except KeyError:\n likelihood_hd[psri] = np.copy(np.log(corr_hd[np.argmin(np.abs(y_flat - orf_val))]))\n likelihood_curn[psri] = np.copy(np.log(corr_hd[np.argmin(np.abs(y_flat))]))\n likelihood_null[psri] = np.log(null_prob[0])\n\n ibin = np.argmin(np.abs(vals - angsep)) + 1\n try: orf_bins[\"bin_{}\".format(ibin)] *= corr_hd\n except KeyError as e: orf_bins[\"bin_{}\".format(ibin)] = corr_hd\n\n if not donepair['_'.join(pair)]:\n numerator += (sigma_k)**-2\n denominator += (sigma_k)**-4\n numerator_68 += (sigma_68)**-2\n denominator_68 += (sigma_68)**-4\n try:\n orf_bins_total[\"bin_{}\".format(ibin)] *= corr_hd\n n_bins[\"bin_{}\".format(ibin)] +=1\n except KeyError as e:\n orf_bins_total[\"bin_{}\".format(ibin)] = np.copy(corr_hd * null_prob)\n n_bins[\"bin_{}\".format(ibin)] = 1\n\n try:\n likelihood_hd_global += np.log(corr_hd[np.argmin(np.abs(y_flat - orf_val))])\n likelihood_curn_global += np.log(corr_hd[np.argmin(np.abs(y_flat))])\n likelihood_mono_global += np.log(corr_hd[np.argmin(np.abs(y_flat - 1))])\n likelihood_dipole_global += np.log(corr_hd[np.argmin(np.abs(y_flat - dipole_val))])\n likelihood_null_global += np.log(null_prob[0])\n except KeyError:\n likelihood_hd_global = corr_hd[np.argmin(np.abs(y_flat - orf_val))]\n likelihood_curn_global = corr_hd[np.argmin(np.abs(y_flat))]\n likelihood_mono_global = corr_hd[np.argmin(np.abs(y_flat - 1))]\n likelihood_dipole_global = corr_hd[np.argmin(np.abs(y_flat - dipole_val))]\n likelihood_null_global = np.log(null_prob[0])\n\n n_tot += 1\n donepair['_'.join(pair)] = True\n\n\n import sys\n sys.exit()\n\n # fig, ax = plt.subplots(1,1, figsize=(9,6))\n # for k in orf_bins.keys():\n # orf_bins[k] /= np.sum(orf_bins[k])\n # orf_bins[k] /= np.mean(np.diff(np.linspace(-1, 1, 256)))\n # draws = np.random.choice(np.linspace(-1, 1, 256), p=orf_bins[k]/np.sum(orf_bins[k]), size=100000)\n # ax = plot_violin(ax, seps[k], draws)\n\n\n # theta = np.linspace(0, 180, 1000)\n # orf = hd_orf(theta)\n # plt.plot(theta, orf, color='k', linewidth=3)\n # plt.ylim([-1, 1])\n # plt.savefig('corr_plots/{}.png'.format(psri))\n # plt.savefig('corr_plots/{}.pdf'.format(psri))\n # plt.close()\n\n #bf_hd[psri] = (likelihood_hd[psri] - likelihood_curn[psri])\n\n\n fig, ax1 = plt.subplots(1, 1, figsize=(9,6))\n ax2 = ax1.twinx()\n\n values = []\n for bini in range(1, nseps+1):\n #values.append(n_bins['bin_{}'.format(bini)])\n try:\n values.append(n_bins['bin_{}'.format(bini)])\n except KeyError:\n n_bins['bin_{}'.format(bini)] = 0\n values.append(n_bins['bin_{}'.format(bini)])\n edges = np.append(np.array(list(seps.values()) - halfdiff), 180)\n\n ax2.fill_between(np.insert(np.array(list(seps.values()) + halfdiff),0, 0),\n np.insert(np.array(values),0,values[0]),\n y2=-np.ones(len(values)+1),\n step=\"pre\", alpha=0.15, color='k')\n #plt.stairs(values, edges=edges, linewidth=1, color='k')\n\n for k in orf_bins_total.keys():\n orf_bins_total[k] /= np.sum(orf_bins_total[k])\n orf_bins_total[k] /= np.mean(np.diff(np.linspace(-1, 1, 256)))\n np.save('/Users/dreardon/Desktop/ppta_correlations/{}.npy'.format(k), orf_bins_total[k]/np.sum(orf_bins_total[k]))\n draws = np.random.choice(np.linspace(-1, 1, 256), p=orf_bins_total[k]/np.sum(orf_bins_total[k]), size=10000)\n ax = plot_violin(ax1, seps[k], draws, width=np.mean(np.diff(vals)), alpha=0.9)\n\n ibin = int(k.split('_')[-1])\n\n # for k in seps2.keys():\n # ibin = int(k.split('_')[-1])\n # ax = plot_violin(ax1, seps2[k], data_bins[:, -(14-ibin-1)], width=np.mean(np.diff(vals2)), colour='crimson', alpha=0.5)\n\n\n theta = np.linspace(0, 180, 1000)\n orf = hd_orf(theta)\n ax1.plot(theta, orf, color='k', linewidth=3)\n ax1.set_ylim([-1, 1])\n ax2.set_ylim([0, round(max(n_bins.values()), -1)])\n ax2.set_ylabel('Number of pulsar pairs')\n plt.xlim([0, 180])\n plt.tight_layout()\n ax1.set_zorder(ax2.get_zorder()+1)\n ax1.set_frame_on(False)\n plt.savefig('corr_plots/corr_total.png')\n plt.savefig('corr_plots/corr_total.pdf')\n plt.show()\n plt.close()\n\n neff = numerator**2 / denominator\n neff_68 = numerator_68**2 / denominator_68\n print(\"Number of effective pulsar pairs, using standard deviation = {}\".format(neff))\n print(\"Number of effective pulsar pairs, using 68% confidence = {}\".format(neff_68))\n print(len(donepair.keys()))\n\n # inds = np.argsort(list(bf_hd.values()))\n # for i in range(0, len(np.flip(np.array(list(bf_hd.keys()))[inds]))):\n # print(np.flip(np.array(list(bf_hd.keys()))[inds])[i],\n # np.flip(np.array(list(bf_hd.values()))[inds])[i])\n\n # print('')\n # bf_hd_psr = np.array(list(likelihood_hd.values())) - np.array(list(likelihood_null.values()))\n # inds = np.argsort(bf_hd_psr)\n # for i in range(0, len(np.flip(np.array(list(bf_hd.keys()))[inds]))):\n # print(np.flip(np.array(list(bf_hd.keys()))[inds])[i],\n # np.flip(bf_hd_psr[inds])[i])\n\n # print('')\n # bf_curn_psr = np.array(list(likelihood_curn.values())) - np.array(list(likelihood_null.values()))\n # inds = np.argsort(bf_curn_psr)\n # for i in range(0, len(np.flip(np.array(list(bf_hd.keys()))[inds]))):\n # print(np.flip(np.array(list(bf_hd.keys()))[inds])[i],\n # np.flip(bf_curn_psr[inds])[i])\n\n print(\" \")\n print('HD log likelihood difference')\n print(likelihood_hd_global - likelihood_null_global)\n print(\" \")\n print(\"HD S/N estimate\")\n print(np.sqrt(2*(likelihood_hd_global - likelihood_null_global)))\n\n\n\n ts = 2*(likelihood_hd_global - likelihood_null_global)\n\n ts_scrambles.append(ts)\n\n new_pos.clear()\n kde.clear()\n pdf.clear()\n ptot.clear()\n orf_bins_total.clear()\n donepair.clear()\n likelihood_hd.clear()\n likelihood_curn.clear()\n likelihood_null.clear()\n bf_hd.clear()\n n_bins.clear()\n seps.clear()\n seps2.clear()\n orf_bins.clear()\n\nts_scrambles = np.load(\"likelihood_ratios.npy\")\nts_scrambles = np.array(ts_scrambles)\n\nplt.hist(ts_scrambles, bins=20)\nyl=plt.ylim()\nplt.plot([1, 1], yl)\nplt.ylim(yl)\nplt.ylabel('N')\nplt.xlabel('Test statistic')\n\nlen(np.array(ts_scrambles)[(ts_scrambles > 1)]) / len(np.array(ts_scrambles)[(ts_scrambles < 1)]) * 100\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "MattTMiles/MPTAGW", "sub_path": "process_correlations.py", "file_name": "process_correlations.py", "file_ext": "py", "file_size_in_byte": 24765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.rc", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.rcParams.update", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 59, "usage_type": "call"}, {"api_name": "json.load", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "numpy.diff", "line_number": 262, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 264, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.savez", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 300, "usage_type": "call"}, {"api_name": "KDEpy.FFTKDE", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 393, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 399, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 407, "usage_type": "call"}, {"api_name": "KDEpy.FFTKDE", "line_number": 413, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 424, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 427, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 429, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 437, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 461, "usage_type": "call"}, {"api_name": "KDEpy.FFTKDE", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.argwhere", "line_number": 481, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 493, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 494, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 495, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 495, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.arccos", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 501, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 502, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 503, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 507, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 508, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 514, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 527, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 531, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 533, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 534, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 537, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 539, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 539, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 540, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 541, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 548, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 569, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "numpy.append", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 582, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 583, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 584, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 592, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 592, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 593, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 602, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 608, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 608, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 609, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 609, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 612, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 612, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 613, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 613, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 614, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 614, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 615, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 615, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 647, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 671, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 673, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 673, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 674, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 674, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 675, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 675, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 676, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 676, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 677, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 677, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 678, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 678, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 680, "usage_type": "call"}]} +{"seq_id": "71806144484", "text": "# View all recorded frames and classify them. This will create the dataset for the neural network\n# To classify a frame, user will input a key, corresponding to the finger that is currently being pressed\n# the key-finger relationship is the same as putting fingers on a keyboard in the standard position, \n# in the middle row. The only exception, for pratical reasons is the thumbs, which correspond to \n# 'c' or 'v' instead of spacebar, which is reserved for 'raised' status.\n# Additionally it's also possible to undo last classification by pressing 2, and change the classification\n# by pressing 'q'\n#\n# Results are stored in the file pushing.txt\n\nimport os\nimport cv2\n\nl_mig = []\nl_anu = []\nl_mid = []\nl_ind = []\nthumb = []\nr_mig = []\nr_anu = []\nr_mid = []\nr_ind = []\nraised = []\n\nnames = os.listdir('./ds_building/inputs/pictures')\nnames.sort()\n\ncv2.namedWindow('Finger Picker')\n\ni = 0\nmaxi = i + 1000\nwhile (i < maxi ):\n try:\n fname = names[names.index(str(i) + '.png')]\n img = cv2.imread('./ds_building/inputs/pictures/' + fname)\n except:\n i+=1\n continue\n if(img is None):\n continue\n\n cv2.imshow(str(i), cv2.resize(img,(1280,720)))\n \n k = cv2.waitKey(0)\n if (k == ord('q')):\n i = i - 2\n elif (k == ord('a')):\n l_mig.append(fname)\n elif (k == ord('s')):\n l_anu.append(fname)\n elif (k == ord('d')):\n l_mid.append(fname)\n elif (k == ord('f')):\n l_ind.append(fname)\n elif (k == ord('c') or k == ord('c')):\n thumb.append(fname)\n elif (k == ord('j')):\n r_ind.append(fname)\n elif (k == ord('k')):\n r_mid.append(fname)\n elif (k == ord('l')):\n r_anu.append(fname)\n elif (k == ord(';')):\n r_mig.append(fname)\n elif (k == ord('2')):\n os.remove('./ds_building/inputs/pictures/' + fname)\n os.remove('./ds_building/inputs/results/' + fname[:-4])\n \n i += 1\n\n cv2.destroyAllWindows()\n\n\nlists = [l_mig,l_anu,l_mid,l_ind,thumb,r_ind,r_mid,r_anu,r_mig, raised]\n\nwith open('./ds_building/pushing.txt','w') as f:\n for i in range(len(lists)):\n for img in lists[i]: \n f.write(img +' '+str(i) +'\\n') ", "repo_name": "ianirolab/invisible_keyboard", "sub_path": "ds_building/finger_picker.py", "file_name": "finger_picker.py", "file_ext": "py", "file_size_in_byte": 2182, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 44, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 66, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "23668001260", "text": "from functools import wraps\nfrom flask_jwt_extended import verify_jwt_in_request, get_jwt_identity\nfrom ..services import conta_service, operacao_service\nfrom flask import make_response, jsonify\n\ndef user_conta(view_function):\n @wraps(view_function)\n def decorator_funtion(*args, **kwargs):\n verify_jwt_in_request() #verificar se existe uma chave token\n usuario_logado = get_jwt_identity()\n conta = conta_service.listar_conta_id(kwargs['id'])\n if conta is None:\n return make_response(jsonify(\"Conta não existe\"), 404)\n elif conta.usuario_id == usuario_logado:\n return view_function(*args, **kwargs)\n else:\n return make_response(jsonify(\"Esta conta não pertence ao usuário logado\"), 403)\n return decorator_funtion\n\ndef user_operacao(view_function):\n @wraps(view_function)\n def decorator_funtion(*args, **kwargs):\n verify_jwt_in_request()\n usuario_logado = get_jwt_identity()\n operacao = operacao_service.listar_operacao_id(kwargs['id'])\n if operacao is None:\n return make_response(jsonify(\"Operação não encontrada\"), 404)\n else:\n conta = conta_service.listar_conta_id(operacao.conta_id)\n if conta.usuario_id == usuario_logado:\n return view_function(*args, **kwargs)\n else:\n return make_response ( jsonify ( \"Esta operação não pertence ao usuário logado\" ), 403 )\n return decorator_funtion", "repo_name": "Marilainny/python-app-flutter", "sub_path": "api/decorators/autorizacao.py", "file_name": "autorizacao.py", "file_ext": "py", "file_size_in_byte": 1499, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask_jwt_extended.verify_jwt_in_request", "line_number": 9, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 10, "usage_type": "call"}, {"api_name": "services.conta_service.listar_conta_id", "line_number": 11, "usage_type": "call"}, {"api_name": "services.conta_service", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 17, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_jwt_extended.verify_jwt_in_request", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 24, "usage_type": "call"}, {"api_name": "services.operacao_service.listar_operacao_id", "line_number": 25, "usage_type": "call"}, {"api_name": "services.operacao_service", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "services.conta_service.listar_conta_id", "line_number": 29, "usage_type": "call"}, {"api_name": "services.conta_service", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.make_response", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "44775981936", "text": "import os\nimport cv2\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom IPython import display as ipythondisplay\nimport base64\nimport numpy as np\ndef evaluate_real(model, env, n_eval_episodes=3, episode_length=100, video_rate=None, video_prefix=\"\"):\n\n episodic_rewards = []\n for i in range(n_eval_episodes):\n print(f\"Start evaluation episode {i} of {n_eval_episodes}\")\n\n episodic_reward = 0\n obs = env.reset()\n for _step in tqdm(range(episode_length)):\n print(\"episode: \", i,)\n action, _ = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n episodic_reward += reward\n\n episodic_rewards.append(episodic_reward)\n mean_episodic_reward = np.mean(episodic_rewards)\n print(f\"Finished evaluation with mean episodic reward: {mean_episodic_reward}\")\ndef evaluate(model, env, n_eval_episodes=3, episode_length=100, video_rate=None, video_prefix=\"\"):\n video_folder = \"videos/\"\n # Create output folder if needed\n os.makedirs(video_folder, exist_ok=True)\n\n episodic_rewards = []\n for i in range(n_eval_episodes):\n print(f\"Start evaluation episode {i} of {n_eval_episodes}\")\n img_array = []\n episodic_reward = 0\n obs = env.reset()\n for _step in tqdm(range(episode_length)):\n action, _ = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n episodic_reward += reward\n if video_rate is not None:\n img = env.render(\"rgb_array\")\n if 0 not in img.shape:\n img_array.append(img)\n\n if video_rate is not None:\n video_file = f\"{video_prefix}_{i}\"\n path = Path(video_folder) / video_file\n print(\"Start video writer\")\n height, width, _ = img_array[-1].shape\n size = (width, height)\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n out = cv2.VideoWriter(f\"{video_folder}/temp.mp4\", fourcc, video_rate, size)\n\n for img in img_array:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n out.write(img)\n out.release()\n\n os.system(\n f\"ffmpeg -y -i {video_folder}/temp.mp4 -vcodec libx264 -f mp4 {path}.mp4 >> /tmp/ffmpeg_{video_prefix}_{i}.txt 2>&1\"\n )\n\n print(f\"Showing episode {i} with episodic reward: {episodic_reward}\")\n show_video(video_file=video_file, video_folder=video_folder)\n\n os.remove(f\"{video_folder}/temp.mp4\")\n\n episodic_rewards.append(episodic_reward)\n mean_episodic_reward = np.mean(episodic_rewards)\n print(f\"Finished evaluation with mean episodic reward: {mean_episodic_reward}\")\n\ndef show_video(video_file, video_folder=\"videos/\"):\n \"\"\"\n Adapted from https://colab.research.google.com/github/Stable-Baselines-Team/rl-colab-notebooks/blob/sb3/stable_baselines_getting_started.ipynb\n\n which was taken from https://github.com/eleurent/highway-env\n\n :param video_path: (str) Path to the folder containing videos\n :param prefix: (str) Filter the video, showing only the only starting with this prefix\n \"\"\"\n html = []\n mp4 = Path(video_folder) / f\"{video_file}.mp4\"\n video_b64 = base64.b64encode(mp4.read_bytes())\n html.append(\n \"\"\"\"\"\".format(\n mp4, video_b64.decode(\"ascii\")\n )\n )\n ipythondisplay.display(ipythondisplay.HTML(data=\"
\".join(html)))", "repo_name": "runyuma/eagerx_sideproject", "sub_path": "helper.py", "file_name": "helper.py", "file_ext": "py", "file_size_in_byte": 3675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tqdm.tqdm", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 23, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 59, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 69, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 82, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 83, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 92, "usage_type": "call"}, {"api_name": "IPython.display", "line_number": 92, "usage_type": "name"}, {"api_name": "IPython.display.HTML", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "13434894159", "text": "from collections import OrderedDict\nfrom urllib.parse import urlparse\n\nimport binascii\n'''\ntitle : blockchain.py\ndescription : A blockchain implemenation\nauthor : Adil Moujahid\ndate_created : 20180212\ndate_modified : 20180309\nversion : 0.5\nusage : python blockchain.py\n python blockchain.py -p 5000\n python blockchain.py --port 5000\npython_version : 3.6.1\nComments : The blockchain implementation is mostly based on [1]. \n I made a few modifications to the original code in order to add RSA encryption to the transactions \n based on [2], changed the proof of work algorithm, and added some Flask routes to interact with the \n blockchain from the dashboards\nReferences : [1] https://github.com/dvf/blockchain/blob/master/blockchain.py\n [2] https://github.com/julienr/ipynb_playground/blob/master/bitcoin/dumbcoin/dumbcoin.ipynb\n'''\n\nimport Crypto\nimport Crypto.Random\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\n\nimport hashlib\nimport json\n\nfrom time import time\nfrom uuid import uuid4\n\nMINING_SENDER = \"THE BLOCKCHAIN\"\nMINING_REWARD = 100\nMINING_DIFFICULTY = 2\n\nclass Blockchain:\n\n def __init__(self):\n \n self.votes = []\n self.chain = []\n self.nodes = set()\n #Generate random number to be used as node_id\n self.node_id = str(uuid4()).replace('-', '')\n #Create genesis block\n self.create_block(0, '00')\n\n def register_node(self, node_url):\n \"\"\"\n Add a new node to the list of nodes\n \"\"\"\n #Checking node_url has valid format\n parsed_url = urlparse(node_url)\n if parsed_url.netloc:\n self.nodes.add(parsed_url.netloc)\n elif parsed_url.path:\n # Accepts an URL without scheme like '192.168.0.5:5000'.\n self.nodes.add(parsed_url.path)\n else:\n raise ValueError('Invalid URL')\n\n def verify_vote_signature(self, voter_id, signature, vote):\n \"\"\"\n Check that the provided signature corresponds to vote\n signed by the public key (voter_id)\n \"\"\"\n public_key = RSA.importKey(binascii.unhexlify(voter_id))\n verifier = PKCS1_v1_5.new(public_key)\n h = SHA.new(str(vote).encode('utf8'))\n return verifier.verify(h, binascii.unhexlify(signature))\n\n def submit_vote(self, voter_id, poll_id, value, signature):\n \"\"\"\n Add a transaction to transactions array if the signature verified\n \"\"\"\n vote = OrderedDict({'voter_id': voter_id, \n 'poll_id': poll_id,\n 'value': value})\n\n # Reward for mining a block\n if voter_id == MINING_SENDER:\n self.votes.append(vote)\n return len(self.chain) + 1\n # Manages votess from wallet to another wallet\n else:\n vote_verification = self.verify_vote_signature(voter_id, signature, vote)\n if vote_verification:\n self.votes.append(vote)\n return len(self.chain) + 1\n else:\n return False\n\n def create_block(self, nonce, previous_hash):\n \"\"\"\n Add a block of votes to the blockchain\n \"\"\"\n block = {'block_number': len(self.chain) + 1,\n 'timestamp': time(),\n 'votes': self.votes,\n 'nonce': nonce,\n 'previous_hash': previous_hash}\n\n # Reset the current list of votes\n self.votes = []\n\n self.chain.append(block)\n return block\n\n def hash(self, block):\n \"\"\"\n Create a SHA-256 hash of a block\n \"\"\"\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n \n return hashlib.sha256(block_string).hexdigest()\n\n def proof_of_work(self):\n \"\"\"\n Proof of work algorithm\n \"\"\"\n last_block = self.chain[-1]\n last_hash = self.hash(last_block)\n\n nonce = 0\n while self.valid_proof(self.votes, last_hash, nonce) is False:\n nonce += 1\n\n return nonce\n\n def valid_proof(self, votes, last_hash, nonce, difficulty=MINING_DIFFICULTY):\n \"\"\"\n Check if a hash value satisfies the mining conditions. This function is used within the proof_of_work function.\n \"\"\"\n guess = (str(votes)+str(last_hash)+str(nonce)).encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:difficulty] == '0'*difficulty\n\n def valid_chain(self, chain):\n \"\"\"\n Check if a bockchain is valid\n \"\"\"\n last_block = chain[0]\n current_index = 1\n\n while current_index < len(chain):\n block = chain[current_index]\n #print(last_block)\n #print(block)\n #print(\"\\n-----------\\n\")\n # Check that the hash of the block is correct\n if block['previous_hash'] != self.hash(last_block):\n return False\n\n # Check that the Proof of Work is correct\n #Delete the reward vote\n votes = block['votes'][:-1]\n # Need to make sure that the dictionary is ordered. Otherwise we'll get a different hash\n vote_elements = ['voter_id', 'poll_id', 'value']\n votes = [OrderedDict((k, vote[k]) for k in vote_elements) for vote in votes]\n\n if not self.valid_proof(votes, block['previous_hash'], block['nonce'], MINING_DIFFICULTY):\n return False\n\n last_block = block\n current_index += 1\n\n return True\n\n def resolve_conflicts(self):\n \"\"\"\n Resolve conflicts between blockchain's nodes\n by replacing our chain with the longest one in the network.\n \"\"\"\n neighbours = self.nodes\n new_chain = None\n\n # We're only looking for chains longer than ours\n max_length = len(self.chain)\n\n # Grab and verify the chains from all the nodes in our network\n for node in neighbours:\n print('http://' + node + '/chain')\n response = requests.get('http://' + node + '/chain')\n\n if response.status_code == 200:\n length = response.json()['length']\n chain = response.json()['chain']\n\n # Check if the length is longer and the chain is valid\n if length > max_length and self.valid_chain(chain):\n max_length = length\n new_chain = chain\n\n # Replace our chain if we discovered a new, valid chain longer than ours\n if new_chain:\n self.chain = new_chain\n return True\n\n return False", "repo_name": "wpine215/p2poll", "sub_path": "example/blockchain/blockchain.py", "file_name": "blockchain.py", "file_ext": "py", "file_size_in_byte": 6905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "uuid.uuid4", "line_number": 48, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 57, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA.importKey", "line_number": 71, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 71, "usage_type": "name"}, {"api_name": "binascii.unhexlify", "line_number": 71, "usage_type": "call"}, {"api_name": "Crypto.Signature.PKCS1_v1_5.new", "line_number": 72, "usage_type": "call"}, {"api_name": "Crypto.Signature.PKCS1_v1_5", "line_number": 72, "usage_type": "name"}, {"api_name": "Crypto.Hash.SHA.new", "line_number": 73, "usage_type": "call"}, {"api_name": "Crypto.Hash.SHA", "line_number": 73, "usage_type": "name"}, {"api_name": "binascii.unhexlify", "line_number": 74, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 102, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 118, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 120, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 140, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "11932631444", "text": "import tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_datasets as tfds\nimport numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport io\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport json\n\n\"\"\"\n!wget https://raw.githubusercontent.com/aliakbarbadri/nlp-tf/master/Sarcasm_Headlines_Dataset_v2.json \\\n -O /tmp/sarcasm.json\n\"\"\"\n\nsarcasm_data = [json.loads(line) for line in open('/tmp/sarcasm.json', 'r')]\n# len(sarcasm_data), sarcasm_data[0]\nsentences = []\nlabels = []\n\nfor item in sarcasm_data:\n sentences.append(item['headline'])\n labels.append(item['is_sarcastic'])\n\n# ------------------------------------- Prepare data\n\nvocab_size = 1000\nembedding_dim = 16\nmax_length = 120\ntrunc_type = 'post'\npadding_type = 'post'\noov_tok = \"\"\ntraining_size = 20000\n\ntraining_sentences = sentences[0:training_size]\ntesting_sentences = sentences[training_size:]\ntraining_labels = labels[0:training_size]\ntesting_labels = labels[training_size:]\n\ntokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)\ntokenizer.fit_on_texts(training_sentences)\n\nword_index = tokenizer.word_index\n\ntraining_sequences = tokenizer.texts_to_sequences(training_sentences)\ntraining_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntesting_sequences = tokenizer.texts_to_sequences(testing_sentences)\ntesting_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)\n\ntraining_padded = np.array(training_padded)\ntraining_labels = np.array(training_labels)\ntesting_padded = np.array(testing_padded)\ntesting_labels = np.array(testing_labels)\n\n# --------------------------------------------------\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),\n tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),\n tf.keras.layers.Dense(24, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\nhistory = model.fit(training_padded, training_labels, epochs=50, validation_data=(testing_padded, testing_labels),\n verbose=1)\n", "repo_name": "salman-/small-codes-for-tensorflow-certificate-exam", "sub_path": "tf-datasets/sarcasem.py", "file_name": "sarcasem.py", "file_ext": "py", "file_size_in_byte": 2322, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.text.Tokenizer", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.sequence.pad_sequences", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Embedding", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Bidirectional", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "11231641586", "text": "from src.db_helper import session, User, MessageQueue, UserMessage\nimport src.logging_helper as logging\n\nfrom telethon.errors import FloodWaitError\nimport asyncio\nimport os\nimport configparser\nimport datetime\nfrom sqlalchemy.sql.expression import func\n\n\nconfig = configparser.ConfigParser(os.environ)\nconfig_path = os.path.dirname(__file__) + '/../config/' #we need this trick to get path to config folder\nconfig.read(config_path + 'settings.ini')\n\nlogger = logging.get_logger()\n\nasync def add_message_to_queue(message, is_test=False, session=None):\n new_message = MessageQueue(message=message, is_test=is_test)\n session.add(new_message)\n session.commit()\n\n if is_test == True: #if it's test message, send it only to admin\n users = session.query(User).filter(User.id == int(config['TELEGRAM']['ADMIN_ID'])).all()\n else:\n users = session.query(User).all()\n for user in users:\n user_message = UserMessage(user_id=user.id, message_queue_id=new_message.id, status='queued')\n session.add(user_message)\n session.commit()\n\n\nasync def process_message_queue(client, messages_to_send=10, delay_between_messages=10, session=None):\n unsent_user_messages = (\n session.query(UserMessage)\n .filter(UserMessage.status == 'queued')\n .join(MessageQueue)\n .order_by(func.random()) # Add this line to randomize the order of the rows\n .limit(messages_to_send)\n .all()\n )\n\n dialogs = await client.get_dialogs()\n\n for user_message in unsent_user_messages:\n message = user_message.message_queue\n user = session.query(User).filter(User.id == user_message.user_id).first()\n\n message_processed = False\n\n for dialog in dialogs:\n if dialog.id == user.id:\n try:\n await client.send_message(user.id, message.message, link_preview=False)\n user_message.sent_at = datetime.datetime.utcnow()\n user_message.status = 'sent'\n message_processed = True\n except Exception as e:\n if \"Too many requests (caused by SendMessageRequest)\" in str(e):\n if user_message.error_count < int(config['ANNOUNCE']['MAX_ERROR_COUNT']):\n user_message.error_count += 1\n session.commit()\n logger.warning(f\"Too many requests, stopping the script. Error details: {e}\")\n message_processed = True\n return\n else:\n logger.warning(f\"Too many requests more then MAX_ERROR_COUNT={int(config['ANNOUNCE']['MAX_ERROR_COUNT'])}. Error details: {e}\")\n user_message.status = 'error'\n message_processed = True\n return\n else:\n logger.error(f\"Error sending message to user {user.id}: {e}\")\n user_message.status = 'error'\n finally:\n break\n else:\n continue\n\n if message_processed == False: #if we haven't find user in the list of dialogs, try to send anyway\n try:\n await client.send_message(user.id, message.message, link_preview=False)\n user_message.sent_at = datetime.datetime.utcnow()\n user_message.status = 'sent'\n except Exception as e:\n if \"Too many requests (caused by SendMessageRequest)\" in str(e):\n if user_message.error_count < int(config['ANNOUNCE']['MAX_ERROR_COUNT']):\n user_message.error_count += 1\n session.commit()\n logger.warning(f\"Too many requests, stopping the script. Error details: {e}\")\n return\n else:\n logger.warning(f\"Too many requests more then MAX_ERROR_COUNT={int(config['ANNOUNCE']['MAX_ERROR_COUNT'])}. Error details: {e}\")\n user_message.status = 'error'\n return\n else:\n logger.error(f\"Error sending message to user {user.id}: {e}\")\n user_message.status = 'error'\n\n session.commit()\n\n # You can add a delay here if needed to avoid being flagged as spam\n await asyncio.sleep(delay_between_messages)", "repo_name": "rvnikita/RvChat_bot", "sub_path": "src/announce_helper.py", "file_name": "announce_helper.py", "file_ext": "py", "file_size_in_byte": 4490, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "configparser.ConfigParser", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "src.logging_helper.get_logger", "line_number": 16, "usage_type": "call"}, {"api_name": "src.logging_helper", "line_number": 16, "usage_type": "name"}, {"api_name": "src.db_helper.MessageQueue", "line_number": 19, "usage_type": "call"}, {"api_name": "src.db_helper.session.add", "line_number": 20, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 20, "usage_type": "name"}, {"api_name": "src.db_helper.session.commit", "line_number": 21, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 21, "usage_type": "name"}, {"api_name": "src.db_helper.session.query", "line_number": 24, "usage_type": "call"}, {"api_name": "src.db_helper.User", "line_number": 24, "usage_type": "argument"}, {"api_name": "src.db_helper.session", "line_number": 24, "usage_type": "name"}, {"api_name": "src.db_helper.User.id", "line_number": 24, "usage_type": "attribute"}, {"api_name": "src.db_helper.session.query", "line_number": 26, "usage_type": "call"}, {"api_name": "src.db_helper.User", "line_number": 26, "usage_type": "argument"}, {"api_name": "src.db_helper.session", "line_number": 26, "usage_type": "name"}, {"api_name": "src.db_helper.UserMessage", "line_number": 28, "usage_type": "call"}, {"api_name": "src.db_helper.session.add", "line_number": 29, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 29, "usage_type": "name"}, {"api_name": "src.db_helper.session.commit", "line_number": 30, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 30, "usage_type": "name"}, {"api_name": "src.db_helper.MessageQueue", "line_number": 37, "usage_type": "argument"}, {"api_name": "src.db_helper.session.query", "line_number": 35, "usage_type": "call"}, {"api_name": "src.db_helper.UserMessage", "line_number": 35, "usage_type": "argument"}, {"api_name": "src.db_helper.session", "line_number": 35, "usage_type": "name"}, {"api_name": "src.db_helper.UserMessage.status", "line_number": 36, "usage_type": "attribute"}, {"api_name": "src.db_helper.UserMessage", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.expression.func.random", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.expression.func", "line_number": 38, "usage_type": "name"}, {"api_name": "src.db_helper.session.query", "line_number": 47, "usage_type": "call"}, {"api_name": "src.db_helper.User", "line_number": 47, "usage_type": "argument"}, {"api_name": "src.db_helper.session", "line_number": 47, "usage_type": "name"}, {"api_name": "src.db_helper.User.id", "line_number": 47, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "attribute"}, {"api_name": "src.db_helper.session.commit", "line_number": 62, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 62, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "attribute"}, {"api_name": "src.db_helper.session.commit", "line_number": 88, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 88, "usage_type": "name"}, {"api_name": "src.db_helper.session.commit", "line_number": 99, "usage_type": "call"}, {"api_name": "src.db_helper.session", "line_number": 99, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "29223214494", "text": "from flask import Flask, request, send_from_directory, jsonify\nimport yaml\nfrom dotenv import load_dotenv\nfrom oauth import request_oauth_token\nfrom datetime import datetime\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\napp = Flask(__name__, static_folder='public')\nload_dotenv()\n\n# Configure logging\nlogging.basicConfig(filename='app.log', level=logging.INFO, format='%(asctime)s [Process %(process)d] [%(levelname)s] - %(message)s')\n\nhandler = RotatingFileHandler('app.log', maxBytes=5*1024*1024, backupCount=5)\nlogging.getLogger().addHandler(handler)\n\nlogger = logging.getLogger(__name__)\n\nclass Person:\n def __init__(self, username, real_name, first_chatted, last_chatted, location='', chatter_type='unknown', interaction_count=0, aspirations='', journal=[], message_count=0):\n self.username = username\n self.realName = real_name\n self.location = location\n self.chatter_type = chatter_type\n self.interaction_count = interaction_count\n self.aspirations = aspirations\n self.message_count = message_count\n self.journal = journal\n self.first_chatted = first_chatted\n self.last_chatted = last_chatted\n\n\ndef load_people_data_from_yaml(yaml_str):\n data = yaml.load(yaml_str, Loader=yaml.FullLoader)\n\n people = []\n for person_data in data.get('data', []):\n person = Person(\n person_data.get('username'),\n person_data.get('realName'),\n person_data.get('first_chatted'),\n person_data.get('last_chatted'),\n person_data.get('location'),\n person_data.get('chatter_type'),\n person_data.get('interaction_count', 0),\n person_data.get('aspirations'),\n person_data.get('journal'),\n person_data.get('message_count', 0)\n )\n people.append(person)\n return people\n\ndef reset_messages_count():\n filename = 'people.yaml'\n try:\n with open(filename, 'r') as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n print('Resetting message counts ...')\n\n for person_data in data.get('data', []):\n # Reset the message_count to 0\n person_data['message_count'] = 0\n\n # Write the updated data back to the same file\n with open(filename, 'w') as file:\n yaml.dump(data, file, default_flow_style=False)\n\n print(f\"Message counts reset and saved to {filename}\")\n except Exception as e:\n print(f\"An error occurred: {str(e)}\")\n\n@app.route('/')\ndef index():\n return send_from_directory('public', 'index.html')\n\n@app.route('/')\ndef serve_static_file(filename):\n return send_from_directory(app.static_folder, filename)\n\n@app.route('/log', methods=['GET'])\ndef log():\n data = request.json\n print('Data logged from client:', data)\n\n yaml_str = yaml.dump(data)\n with open('update.yaml', 'w') as file:\n file.write(yaml_str)\n return '', 200\n\n@app.route('/message_count', methods=['POST'])\ndef message_count():\n data = request.json\n username = data.get('username')\n\n if not username:\n return jsonify({\"error\": \"Username not provided\"}), 400\n\n with open('people.yaml', 'r') as file:\n people_list = yaml.safe_load(file)['data']\n\n user_found = False\n for person in people_list:\n if person['username'].lower() == username.lower():\n user_found = True\n if person['chatter_type'] != 'bot':\n person['message_count'] += 1\n person['last_chatted'] = datetime.now().strftime(\"%Y-%m-%d\")\n action = \"incremented\"\n logger.info(f\"Message count incremented for user {username}, and last_chatted updated\")\n else:\n logger.info(f\"{person['username'].lower()} was a bot, no changes were made\")\n return jsonify({\"message\": f\"User {username} was a bot, no changes were made\"}), 200\n\n if not user_found and username.lower() != 'bot':\n current_date = datetime.now().strftime(\"%Y-%m-%d\")\n new_person = {\n 'username': username,\n 'chatter_type': 'known',\n 'message_count': 1,\n 'first_chatted': current_date,\n 'last_chatted': current_date,\n 'journal': []\n }\n people_list.append(new_person)\n action = \"added\"\n\n updated_yaml_str = yaml.dump({\"data\": people_list})\n with open('people.yaml', 'w') as file:\n file.write(updated_yaml_str)\n\n if action == \"incremented\":\n return jsonify({\"message\": f\"Message count incremented for user {username}, and last_chatted updated\"}), 200\n elif action == \"added\":\n return jsonify({\"message\": f\"User {username} added and message count incremented\"}), 200\n\n@app.route('/journal', methods=['GET', 'POST'])\ndef journal():\n\n if request.method == 'GET':\n return \"I am up\", 200\n\n data = request.json\n username = data.get('username')\n entry = data.get('entry')\n\n if not username or not entry:\n return jsonify({\"error\": \"Username or entry not provided\"}), 400\n\n with open('people.yaml', 'r') as file:\n people_list = yaml.safe_load(file)['data']\n\n user_found = False\n for person in people_list:\n if person['username'].lower() == username.lower():\n user_found = True\n journal_entry = {\n 'date': datetime.now().strftime(\"%Y-%m-%d\"),\n 'entry': entry\n }\n person['journal'].append(journal_entry)\n person['chatter_type'] = 'known'\n break\n\n if not user_found:\n return jsonify({\"error\": f\"No user found with username {username}\"}), 404\n\n updated_data = {\"data\": people_list}\n updated_yaml_str = yaml.dump(updated_data)\n\n with open('people.yaml', 'w') as file:\n file.write(updated_yaml_str)\n\n return jsonify({\"message\": f\"Journal updated for user {username}\"}), 200\n\n\n@app.route('/people', methods=['GET'])\ndef people():\n with open('people.yaml', 'r') as file:\n yaml_str = file.read()\n parsed_yaml = yaml.load(yaml_str, Loader=yaml.FullLoader)\n return jsonify(parsed_yaml.get('data', []))\n\nif __name__ == '__main__':\n reset_messages_count()\n # Start the Flask app in a separate thread\n from threading import Thread\n def start_app():\n app.run(port=3000)\n\n app_thread = Thread(target=start_app)\n app_thread.start()\n", "repo_name": "justinhennessy/twitch-contacts", "sub_path": "backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 6465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 35, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 35, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 58, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 58, "usage_type": "attribute"}, {"api_name": "yaml.dump", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 97, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 116, "usage_type": "name"}, {"api_name": "yaml.dump", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 148, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 151, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 166, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 174, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 181, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 182, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 191, "usage_type": "call"}]} +{"seq_id": "36942134523", "text": "from typing import Dict, Union\nfrom pydantic import BaseModel, Extra\nimport xarray as xr\nfrom tsdat import DataReader\nimport os\nimport getpass\nfrom datetime import datetime\nimport pandas as pd\nimport numpy as np\nfrom mhkit.wave.resource import energy_flux, energy_period\nfrom mhkit.wave.performance import capture_length_matrix\n\n\nclass MbariWecDataReader(DataReader):\n \"\"\"---------------------------------------------------------------------------------\n Custom Data Reader for MBARI WEC 2021 dataset.\n\n Implementation based on https://github.com/SNL-WaterPower/fbWecCntrl/blob/master/mbari_wec/mbari_wec_2021_example.py\n\n ---------------------------------------------------------------------------------\"\"\"\n\n class Parameters(BaseModel, extra=Extra.forbid):\n \"\"\"\n density [km/m^3]: water density\n depth [m]: depth\n gravity [m/s^2]: earth's gravity acceleration\n \"\"\"\n\n density: float = 1025.0\n depth: float = 100.0\n gravity: float = 9.81\n\n parameters: Parameters = Parameters()\n \"\"\"Extra parameters that can be set via the retrieval configuration file. If you opt\n to not use any configuration parameters then please remove the code above.\"\"\"\n\n def read(self, input_key: str) -> Union[xr.Dataset, Dict[str, xr.Dataset]]:\n file_name = input_key\n rho = self.parameters.density\n h = self.parameters.depth\n g = self.parameters.gravity\n\n #%% Read bulk parameters\n date_parser = lambda epoch: pd.to_datetime(epoch, unit=\"s\")\n dat = pd.read_csv(\n file_name,\n index_col=3,\n usecols=np.insert(np.arange(13), -1, [364, 365, 366]),\n parse_dates=[\"Epoch Time\"],\n date_parser=date_parser,\n )\n b = dat.to_xarray()\n\n #%% Frequency array\n dat1 = pd.read_csv(\n file_name,\n index_col=[],\n usecols=np.arange(13, 13 + 38 + 1),\n )\n freq_array = dat1.iloc[0].to_xarray()\n freq_array.name = \"Frequency\"\n\n dat2 = pd.read_csv(\n file_name,\n index_col=[],\n usecols=np.arange(13 + 38 + 1, 13 + 2 * (38 + 1)),\n )\n df_array = (\n dat2.iloc[0]\n .to_xarray()\n .assign_coords(dict(index=freq_array.values))\n .rename(dict(index=\"Frequency\"))\n )\n df_array.name = \"df\"\n df_array.attrs[\"long_name\"] = \"Frequency spacing\"\n df_array.attrs[\"units\"] = \"Hz\"\n\n #%% a and b parameters\n names = [\"a1\", \"b1\", \"a2\", \"b2\"]\n tmp_list = []\n for idx, name in enumerate(names):\n dat_tmp = pd.read_csv(\n file_name,\n index_col=[0],\n usecols=np.insert(\n np.arange(13 + (2 + idx) * (38 + 1), 13 + (3 + idx) * (38 + 1)),\n 0,\n 3,\n ),\n date_parser=date_parser,\n )\n tmp_da = dat_tmp.to_xarray().to_array(dim=\"Frequency\", name=name)\n tmp_da = tmp_da.assign_coords({\"Frequency\": freq_array.values})\n tmp_list.append(tmp_da)\n\n ab_ds = xr.merge(tmp_list)\n\n #%% Spectral density, spreading, etc.\n dat_S = pd.read_csv(\n file_name,\n index_col=[0],\n usecols=np.insert(np.arange(13 + 6 * (38 + 1), 13 + 7 * (38 + 1)), 0, 3),\n date_parser=date_parser,\n )\n S = dat_S.to_xarray().to_array(dim=\"Frequency\", name=\"Variance density\")\n S = S.assign_coords({\"Frequency\": freq_array.values})\n\n dat_dir = pd.read_csv(\n file_name,\n index_col=[0],\n usecols=np.insert(np.arange(13 + 7 * (38 + 1), 13 + 8 * (38 + 1)), 0, 3),\n date_parser=date_parser,\n )\n Dir = dat_dir.to_xarray().to_array(dim=\"Frequency\", name=\"Direction\")\n Dir = Dir.assign_coords({\"Frequency\": freq_array.values})\n\n dat_spread = pd.read_csv(\n file_name,\n index_col=[0],\n usecols=np.insert(np.arange(13 + 8 * (38 + 1), 13 + 9 * (38 + 1)), 0, 3),\n date_parser=date_parser,\n )\n spread = dat_spread.to_xarray().to_array(\n dim=\"Frequency\", name=\"Directional spread\"\n )\n spread = spread.assign_coords({\"Frequency\": freq_array.values})\n\n #%% Combine, clean up\n\n da = xr.merge([b, ab_ds, S, Dir, spread, df_array])\n da[\"Battery Voltage (V)\"].attrs[\"units\"] = \"V\"\n da[\"Battery Voltage (V)\"].attrs[\"long_name\"] = \"Battery voltage\"\n\n da[\"Power (W)\"].attrs[\"units\"] = \"W\"\n da[\"Power (W)\"].attrs[\"long_name\"] = \"Battery power\"\n\n da[\"Humidity (%rel)\"].attrs[\"units\"] = \"1\"\n da[\"Humidity (%rel)\"].attrs[\"standard_name\"] = \"relative_humidity\"\n da[\"Humidity (%rel)\"].attrs[\"long_name\"] = \"Relative humidity\"\n\n da[\"Significant Wave Height (m)\"].attrs[\"units\"] = \"m\"\n da[\"Significant Wave Height (m)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_significant_height\"\n da[\"Significant Wave Height (m)\"].attrs[\"long_name\"] = \"Significant wave height\"\n\n da[\"Direction\"].attrs[\"units\"] = \"degree\"\n da[\"Direction\"].attrs[\"long_name\"] = \"\"\n\n da[\"Peak Period (s)\"].attrs[\"units\"] = \"s\"\n da[\"Peak Period (s)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_period_at_variance_spectral_density_maximum\"\n da[\"Peak Period (s)\"].attrs[\"long_name\"] = \"Peak period\"\n\n da[\"Mean Period (s)\"].attrs[\"units\"] = \"s\"\n da[\"Mean Period (s)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_zero_upcrossing_period\"\n da[\"Mean Period (s)\"].attrs[\"long_name\"] = \"Mean period\"\n\n da[\"Peak Direction (deg)\"].attrs[\"units\"] = \"degree\"\n da[\"Peak Direction (deg)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_from_direction_at_variance_spectral_density_maximum\"\n da[\"Peak Direction (deg)\"].attrs[\"long_name\"] = \"Peak direction\"\n\n da[\"Peak Directional Spread (deg)\"].attrs[\"units\"] = \"degree\"\n da[\"Peak Directional Spread (deg)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_directional_spread_at_variance_spectral_density_maximum\"\n da[\"Peak Directional Spread (deg)\"].attrs[\n \"long_name\"\n ] = \"Peak directional spread\"\n\n da[\"Mean Direction (deg)\"].attrs[\"units\"] = \"degree\"\n da[\"Mean Direction (deg)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_from_direction\"\n da[\"Mean Direction (deg)\"].attrs[\"long_name\"] = \"Mean direction\"\n\n da[\"Mean Directional Spread (deg)\"].attrs[\"units\"] = \"degree\"\n da[\"Mean Directional Spread (deg)\"].attrs[\n \"long_name\"\n ] = \"Mean directional spread\"\n\n da[\"Latitude (deg)\"].attrs[\"units\"] = \"degree_north\"\n da[\"Latitude (deg)\"].attrs[\"standard_name\"] = \"latitude\"\n da[\"Latitude (deg)\"].attrs[\"long_name\"] = \"Latitude\"\n\n da[\"Longitude (deg)\"].attrs[\"units\"] = \"degree_east\"\n da[\"Longitude (deg)\"].attrs[\"standard_name\"] = \"longitude\"\n da[\"Longitude (deg)\"].attrs[\"long_name\"] = \"Longitude\"\n\n da[\"Wind Speed (m/s)\"].attrs[\"units\"] = \"m/s\"\n da[\"Wind Speed (m/s)\"].attrs[\"standard_name\"] = \"wind_speed\"\n da[\"Wind Speed (m/s)\"].attrs[\"long_name\"] = \"Wind speed\"\n\n da[\"Wind Direction (deg)\"].attrs[\"units\"] = \"degree\"\n da[\"Wind Direction (deg)\"].attrs[\"standard_name\"] = \"wind_from_direction\"\n da[\"Wind Direction (deg)\"].attrs[\"long_name\"] = \"Wind direction\"\n\n da[\"Surface Temperature (°C)\"] = 274.15 * da[\"Surface Temperature (°C)\"]\n da[\"Surface Temperature (°C)\"].attrs[\"units\"] = \"K\"\n da[\"Surface Temperature (°C)\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_temperature\"\n da[\"Surface Temperature (°C)\"].attrs[\"long_name\"] = \"Surface temperature\"\n\n da[\"Frequency\"].attrs[\"units\"] = \"Hz\"\n da[\"Frequency\"].attrs[\"standard_name\"] = \"wave_frequency\"\n da[\"Frequency\"].attrs[\"long_name\"] = \"Frequency\"\n\n da[\"Variance density\"].attrs[\"units\"] = \"m^2/Hz\"\n da[\"Variance density\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_variance_spectral_density\"\n da[\"Variance density\"].attrs[\"long_name\"] = \"Spectral density\"\n\n da[\"Directional spread\"].attrs[\"units\"] = \"degree\"\n da[\"Directional spread\"].attrs[\n \"standard_name\"\n ] = \"sea_surface_wave_directional_spread\"\n da[\"Directional spread\"].attrs[\"long_name\"] = \"Directional spreading\"\n\n da = da.rename(\n {\n \"Epoch Time\": \"time\",\n \"Frequency\": \"freq\",\n \"Battery Voltage (V)\": \"batter_voltage\",\n \"Variance density\": \"S\",\n \"Direction\": \"wave_dir\",\n \"Power (W)\": \"battery_power\",\n \"Humidity (%rel)\": \"humidity\",\n \"Significant Wave Height (m)\": \"Hm0\",\n \"Peak Period (s)\": \"Tp\",\n \"Mean Period (s)\": \"Tm\",\n \"Peak Direction (deg)\": \"peak_dir\",\n \"Peak Directional Spread (deg)\": \"peak_spread\",\n \"Mean Direction (deg)\": \"mean_dir\",\n \"Mean Directional Spread (deg)\": \"mean_spread\",\n \"Directional spread\": \"spread\",\n \"Latitude (deg)\": \"spot_lat\",\n \"Longitude (deg)\": \"spot_lon\",\n \"Wind Speed (m/s)\": \"wind_speed\",\n \"Wind Direction (deg)\": \"wind_dir\",\n \"Surface Temperature (°C)\": \"temperature\",\n }\n )\n\n J = xr.DataArray(\n np.array(\n [\n energy_flux(\n da.isel(time=idx)[\"S\"].to_pandas(), h=h, rho=rho\n ).values[0][0]\n for idx in range(len(da.time))\n ]\n ),\n dims=\"time\",\n name=\"J\",\n ).assign_coords(dict(time=da.time.values))\n J.attrs[\"units\"] = \"W/m\"\n J.attrs[\"long_name\"] = \"Energy flux\"\n\n Te = xr.DataArray(\n np.array(\n [\n energy_period(\n da.isel(time=idx)[\"S\"].to_pandas(),\n )\n for idx in range(len(da.time))\n ]\n ).squeeze(),\n dims=\"time\",\n name=\"Te\",\n ).assign_coords(dict(time=da.time.values))\n Te.attrs[\"units\"] = \"s\"\n Te.attrs[\"long_name\"] = \"Energy period\"\n\n da = xr.merge([da, J, Te])\n\n da.time.attrs[\"long_name\"] = \"Epoch time\"\n\n da.attrs[\n \"institution\"\n ] = \"Sandia National Laboratories and Monterey Bay Aquarium Research Institute\"\n da.attrs[\"Conventions\"] = \"CF-1.8\"\n da.attrs[\"title\"] = file_name\n da.attrs[\"source\"] = \"Sofar spotter buoy\"\n da.attrs[\"history\"] = \"generated {:} by {:}\".format(\n datetime.now().strftime(\"%Y-%m-%d @ %H:%M:%S\"), getpass.getuser()\n )\n da.attrs[\n \"references\"\n ] = \"https://content.sofarocean.com/hubfs/Technical_Reference_Manual.pdf\"\n da = da.sortby(\"time\")\n da = da.drop_isel(time=0) # first sample appears anomalous\n\n ds = da\n\n return ds\n", "repo_name": "paulohy/ncei-global-marine-data-ingest", "sub_path": "pipelines/mbari_wec/readers.py", "file_name": "readers.py", "file_ext": "py", "file_size_in_byte": 11375, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tsdat.DataReader", "line_number": 14, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 22, "usage_type": "name"}, {"api_name": "pydantic.Extra.forbid", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pydantic.Extra", "line_number": 22, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 86, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 120, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 130, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 252, "usage_type": "call"}, {"api_name": "mhkit.wave.resource.energy_flux", "line_number": 254, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "mhkit.wave.resource.energy_period", "line_number": 269, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 292, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 292, "usage_type": "name"}, {"api_name": "getpass.getuser", "line_number": 292, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 37, "usage_type": "name"}, {"api_name": "xarray.Dataset", "line_number": 37, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "35604862809", "text": "import requests\r\nfrom time import sleep\r\n\r\nuuid = input('Enter your R6 Tab UUID:')\r\nstart = input('Type \"Start\" to begin sending:')\r\nsentBots = 0\r\nif start != 'Start':\r\n print('Invalid Line. Quitting.')\r\n sleep(2.0)\r\n exit()\r\nelse:\r\n while True:\r\n WebRequest = requests.get('https://r6tab.com/mainpage.php?page='+uuid)\r\n sentBots = sentBots + 1\r\n print(f'[{sentBots}]: Bot Sent Successfully')\r\n \r\n \r\n", "repo_name": "Inq-B/R6S-Tab-Botter", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 447, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.sleep", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "39404382983", "text": "from simple_colors import *\nimport random\nfrom termcolor import colored\n\n\n#Allows player to actually setup the game themselves\ndef setup():\n global n\n global d\n global numb\n global life\n life = 10\n print(\"\\nWelcome to the \" +\n colored(\"Number Guessing game\", 'yellow', attrs=['bold']))\n print(\"First off, I want you to choose your range \")\n n = input(\"\\nType in the minimum number you want me to guess from\\n\")\n d = input(\"Now type in the maximum number you want me to guess from \\n\")\n numb = random.randint(int(n), int(d))\n\n\n#Game itself\ndef guessing():\n global life\n guess = input(\"\\nTry guess a number between \" + str(n) + \" and \" + str(d) +\n \"\\n\")\n\n while int(guess) != int(numb) and life > 1:\n if int(guess) < int(numb):\n print(\"\\nThat guess is too low\")\n life = life - 1\n print(\" You now have \" +\n colored(str(life), 'red', attrs=['bold']) + \" Lives\")\n guess = input(\"\\n Try again\\n\")\n elif int(guess) > int(numb):\n print(\"\\nThat guess is\", red(\"too high\", [\"bold\"]))\n life = life - 1\n print(\" You now have \" +\n colored(str(life), 'green', attrs=['bold']) + \" Lives\")\n guess = input(\"\\n Try again\\n\")\n\n\n#Response once player has either won or lost all lives\ndef checking():\n global again\n if int(life) > 1:\n print(\"\\nWELL DONE you finished with \" + str(life) + \" Lives left\", )\n print(\" The number was \" + str(numb))\n again = input(\"\\nWanna play again? y/n\\n\")\n else:\n print(\"\\nLMFAO YOU DIDN'T GET IT\")\n print(\" The number was \" + str(numb))\n again = input(\"\\nWanna play again? y/n\\n\")\n\n\n#Beginning of the actual Code\nsetup()\nguessing()\nchecking()\n\n#Restart Game or Close Program options here\nwhile again.lower() == \"y\":\n setup()\n guessing()\n checking()\n\nif again.lower() == \"n\":\n print(\"\\nThanks for Playing, Have a good rest of your day,\", cyan(\"Bye bye now \",['bold']))\n", "repo_name": "ZaidMGames/Number-Guessing-Game", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "termcolor.colored", "line_number": 14, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 32, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "769838941", "text": "#! /usr/bin/env python3\n\n'''\nhypersphere.py is a python script that can find the volume of an n-dimensional hypersphere using the Monte Carlo\nmean-value method to approximate the value of the integral (specifically found for 0-12 dimensions) and plots the\nvolume as a function of dimension. Also finds the error using different number of sample points for a 10-d\nhypersphere and plots this error as a function of number of sample points.\n\nJaniris Rodriguez\nPHZ 4151C\nFeb 21, 2021\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef withinSphere(arg, dim):\n '''\n evaluates whether given point is within a {dim}-dimenional sphere\n input:\n arguement (list of floats): represents point to evaluate\n dim (int): number of elements in arguements, represents number of dimensions for sphere\n returns:\n 1 if the point (arguement) is within the sphere, 0 if it isn't\n '''\n\n # value will store the result of the sum of the arguements squared to check if it would be inside the {dim}-dimensional sphere\n value = 0\n\n for i in range(dim):\n value += (arg[i]**2)\n\n # checks to see if this point is within the sphere, it is within the sphere if it less than or equal to 1\n # if it is, result of function is 1, if not, it is 0\n if value <= 1:\n result = 1\n else:\n result = 0\n\n return result\n\n\ndef montecarloIntegration(func, dim, lim, N):\n '''\n based on textbook function for the Monte Carlo mean-value integration method of any dimension\n input:\n func (function): user-defined function to be integrated over, takes in a list as an arguement\n dim (int): number of dimensions being integrated over\n lim (2-d list of floats): stores limits of integration for dim dimensions. first index represents the dimension,\n second index represents either the lower limit (element 0) or upper limit (element 1) of integration\n N (int): number of sample points\n returns:\n result of integral and a list of lists of random values used as arguements for the function\n '''\n I = 1.0 / N # stores value of integral, starts by storing 1/N, which helps set up finding average of function\n summation = 0 # stores value of summation for N random arguemnts sent to function to find average value of function\n argList = [] # stores lists of random values used as arguements for the function\n\n # multiplies inetgral value by the subtraction of lower limit of integral from upper limit of integral for the ith dimension\n for i in range(dim):\n I *= (lim[i][1] - lim[i][0])\n\n for i in range(N):\n # stores arguements to send to function for one iteration (ie sample point)\n arg = []\n\n # generating dim number of random arguements within the bounds of integration for the jth dimension\n # to send to function being integrated\n for j in range(dim):\n arg.append(np.random.uniform(lim[j][0], lim[j][1]))\n\n # adds the result of function with these randomly produced arguements\n summation += func(arg, dim)\n\n #stores this iteration's line up of random numbers to find error\n argList.append(arg)\n\n # multiplies result of sum into value of integral\n I *= summation\n\n return I, argList\n\ndef errorMonteCarlo(func, arg, dim, lim, N):\n '''\n find the error on the integral when using the Monte Carlo mean-value method\n input:\n func (function): user-defined function to be integrated over, takes in a list as an arguement\n arg (2-d list): stores arguements used to find value of integral using the montecarloIntegration function. first index\n represents which sample point out of N is being evaluated, the second index represents a specific\n coordinate value for that sample point\n dim (int): number of dimensions being integrated over\n lim (2-d list): stores limits of integration for dim dimensions. first index represents the dimension,\n second index represents either the lower limit (element 0) or upper limit (element 1) of integration\n N (int): number of sample points\n returns:\n error on the integral estimation\n '''\n\n var_term1 = 1. / N # will store the expectation value of func^2 to find variance of func\n var_term2 = 1. / (N**2) # will store the expectation value squared of func to find variance of func\n summation1 = 0 # will store sum of func values for given arg values to calculate 1st term of variance of func\n summation2 = 0. # will store sum of func values for given arg values to calculate 2nd term of variance of func\n sigma = 1. / np.sqrt(N) # will store error\n\n # multiplies error value by the subtraction of lower limit of integral from upper limit of integral for the ith dimension\n for j in range(dim):\n sigma *= (lim[j][1] - lim[j][0])\n\n # finds values of function for a given arguement to add to summations for the terms in thhe variance of func\n for i in range(int(N)):\n val = func(arg[j], dim)\n summation1 += val ** 2\n summation2 += val\n\n # multiply in the respective summations to find the two terms of the variance\n var_term1 *= summation1\n var_term2 *= summation2\n\n # combine the two variance terms and take square root to find the error on func\n sigma *= np.sqrt(var_term1 - var_term2)\n\n return sigma\n\n\n# main code block\nnumPoints = 1000000 # number of sampling points for integration\nnumDimension = 10 # number of dimensions hypersphere is\nlimit = [-1,1] # bounds of integration, -1 to 1 for all dimensions\n\nlimit_10d = [] # list containing limits of integration for 10 dimensions\narguements_10d = [] # list containing arguements used for the integration in dimensions\n\n# limits of integration for all 10 dimensions are -1 to 1\nfor i in range(numDimension):\n limit_10d.append(limit)\n\n# calculate volume of 10-d hypersphere\nvolume_10d, arguements_10d = montecarloIntegration(withinSphere, numDimension, limit_10d, numPoints)\n\nprint(f'The volume of a 10-d hypersphere is {volume_10d}\\n')\n\nlowerDim = 0\nupperDim = 12\ndimensionRange = [*range(lowerDim, upperDim)] # range of dimensions from 0 to 12\nvolumes = [] # store volumes of spheres for given dimensions in dimensionRange\narguments = [] # stores arguements used to calculate the volume of spheres for given dimensions\n\nfor i in dimensionRange:\n # creating a temporary list of limits for each iteration\n limit_list = []\n # appends appropriate number of limits for a given dimension to send into Monte Carlo function\n for j in range(len(dimensionRange)):\n limit_list.append(limit)\n # find volumes for each dimension in dimensionRange\n # saves these to temporary variables to then append to their respective lists\n tempVolume, tempArg = montecarloIntegration(withinSphere, dimensionRange[i], limit_list, numPoints)\n volumes.append(tempVolume)\n arguments.append(tempArg)\n\n print(f'The (hyper)volume of a {i}-d sphere is approximately {volumes[i]:.4f}')\n\n\n#graph of hypervolume v. dimension\nplt.plot(dimensionRange, volumes)\nplt.xlabel('Dimension')\nplt.ylabel('Hypervolume')\nplt.title('Hypervolume v. Number of Dimension')\nax = plt.gca() # gets the axes for this specific plot\nax.spines['top'].set_visible(False) # gets rid of top and right axes (purely for aesthetics)\nax.spines['right'].set_visible(False)\nplt.savefig('dimension_hypervolume_plot.png') # saves figure\nplt.show()\nplt.clf()\n\nlowerNumPoints = 20000 # bottom of range for number of sampling points\nstep = 20000 # step between different point values\nnumber = int((numPoints - lowerNumPoints) / step) # calculates how many different numbers of sample points there should be for given step value\n\n# generates sample point range from {lowerNumPoints} to {numPoints} in steps of {step} as a list\nnumPointsRange = np.linspace(lowerNumPoints, numPoints, number).tolist()\n\nerrors_10d = [] # stores the errors in the estimation of integral for 10-d hypersphere\n\nprint('\\nFor a 10-d hypersphere, the error on the volume is:')\n\ncounter = 0 # for the purpose of printing out errors\n\nfor i in numPointsRange:\n # find error in integral result for 10-d hypersphere for i sample points\n errors_10d.append(errorMonteCarlo(withinSphere, arguements_10d, numDimension, limit_10d, i))\n print(f'{errors_10d[int(counter)]} for {i} steps')\n counter += 1\n\n#graph of error v. sample points\nplt.plot(numPointsRange, errors_10d)\nplt.xlabel('Number of Sample Points')\nplt.ylabel('Error on Volume')\nplt.title('Error on Volume of 10-d Hypershpere v. N')\nax = plt.gca() # gets the axes for this specific plot\nax.spines['top'].set_visible(False) # gets rid of top and right axes (purely for aesthetics)\nax.spines['right'].set_visible(False)\nplt.savefig('error_N_plot.png') # saves figure\nplt.show()\nplt.clf()\n", "repo_name": "janirisrodriguez/Compuational-Physics-HW", "sub_path": "hypersphere.py", "file_name": "hypersphere.py", "file_ext": "py", "file_size_in_byte": 9400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.uniform", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}]} +{"seq_id": "22009166944", "text": "from dataclasses import asdict, dataclass\nfrom typing import Tuple\n\nfrom elasticsearch import Elasticsearch\nfrom state_controller import StateController\n\n\n@dataclass\nclass ElasticsearchLoader:\n \"\"\"Класс для загрузки данных в ElasticSearch\"\"\"\n\n es: Elasticsearch\n es_sc: StateController\n page_size: int = 500\n\n def load(self, index: str,\n data: Tuple):\n ready_data = []\n self.es_sc.get_state()\n\n for i in range(len(data)):\n ready_data += [{'index': {'_index': index,\n '_id': str(data[i].id)}},\n asdict(data[i])]\n\n ready_data = ready_data[self.es_sc.state * 2:]\n\n while len(ready_data) > 0:\n if self.page_size * 2 > len(ready_data):\n ready_data, list_to_load = [], ready_data\n else:\n ready_data = ready_data[self.page_size * 2:]\n list_to_load = (ready_data[:self.page_size * 2])\n\n self.es.bulk(body=list_to_load)\n\n self.es_sc.state += self.page_size\n self.es_sc.set_state()\n", "repo_name": "Wiped-Out/yandex_16_team_work", "sub_path": "docker_compose/etl/elasticsearch_loader.py", "file_name": "elasticsearch_loader.py", "file_ext": "py", "file_size_in_byte": 1131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "elasticsearch.Elasticsearch", "line_number": 12, "usage_type": "name"}, {"api_name": "state_controller.StateController", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 17, "usage_type": "name"}, {"api_name": "dataclasses.asdict", "line_number": 24, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "19056792410", "text": "from odoo import http\nfrom odoo.http import request\nfrom odoo import api, fields, models, _, tools\nfrom datetime import datetime, date\nfrom odoo.exceptions import UserError, ValidationError\nimport pytz\nfrom dateutil import tz\n\nclass HelpdeskBackend(http.Controller):\n\n\t@http.route('/hr/fetch_dashboard_data', type=\"json\", auth='user')\n\tdef fetch_dashboard_data(self):\n\t\t\n\t\tuser_id = request.env.user.id\n\t\tdashboard_data = {}\n\t\tnow = datetime.now()\n\t\temployee = request.env['hr.employee'].sudo()\n\t\tannouncement = request.env['hr.announcement'].sudo()\n\t\tattendance = request.env['hr.attendance'].sudo()\n\t\texpense = request.env['hr.expense'].sudo()\n\t\tleaves = request.env['hr.leave'].sudo()\n\t\tpayslip = request.env['hr.payslip'].sudo()\n\t\tcontract = request.env['hr.contract'].sudo()\n\t\tdata = {}\n\t\tbirthdate_data = {}\n\t\tfor datas in employee.search([]):\n\t\t\tif datas.birthday:\n\t\t\t\tif datas.birthday.day <= now.date().day and datas.birthday.month <= now.date().month:\n\t\t\t\t\tbirthdate_data[datas.id] = datas.name\n\t\tdashboard_data['birthday'] = birthdate_data\n\n\t\tannouncement_data = {}\n\t\tfor datas in announcement.search([]):\n\t\t\tannouncement_data[str(datas.announcement_date)] = datas.name\n\t\tdashboard_data['announcement'] = announcement_data\n\n\t\tleave_count = {}\n\t\tleave_count['pending'] = leaves.search_count([('employee_id.name','=', request.env.user.name),('state','=','confirm')])\n\t\tleave_count['approved'] = leaves.search_count([('employee_id.name','=', request.env.user.name),('state','=','validate')])\n\t\tleave_count['cancelled'] = leaves.search_count([('employee_id.name','=', request.env.user.name),('state','=','cancel')])\n\t\tleave_count['refused'] = leaves.search_count([('employee_id.name','=', request.env.user.name),('state','=','refuse')])\n\n\t\ttotal_leave = {}\n\n\t\tapproved_leave = 0.0\n\t\tallocated_leave = 0.0\n\t\tif request.env.user.has_group('base.user_admin'):\n\t\t\tleaves_id = request.env['hr.leave.allocation'].search([])\n\t\telse:\n\t\t\tleaves_id = request.env['hr.leave.allocation'].search([('employee_id.name','=', request.env.user.name)])\n\n\t\tfor leave in leaves_id:\n\t\t\tif (leave.state == 'validate'):\n\t\t\t\tif leave.type_request_unit != 'hour':\n\t\t\t\t\tapproved_leave += leave.number_of_days_display\n\t\t\tif leave.type_request_unit != 'hour':\n\t\t\t\t\tallocated_leave += leave.number_of_days_display\n\n\t\ttotal_leave['leave_approved'] = approved_leave\n\t\ttotal_leave['leave_allocated'] = allocated_leave\n\t\t\n\t\tpayslip_vals = {}\n\t\tfor pay in payslip.search([('employee_id.name','=', request.env.user.name)]):\n\t\t\tpay_slip = {}\n\t\t\tpay_slip['number'] = pay.number if pay.number else '' \n\t\t\tpay_slip['date_from'] = pay.date_from\n\t\t\tpay_slip['date_to'] = pay.date_to\n\t\t\tpay_slip['state'] = dict(pay._fields['state'].selection).get(pay.state)\n\t\t\tpayslip_vals[pay.id] = pay_slip\n\n\t\texpenses_vals = {}\n\t\tfor expenses in expense.search([('employee_id.name','=', request.env.user.name)]):\n\t\t\texpense_dict = {}\n\t\t\texpense_dict['date'] = expenses.date\n\t\t\texpense_dict['name'] = expenses.name\n\t\t\texpense_dict['total'] = expenses.total_amount\n\t\t\texpense_dict['state'] = dict(expenses._fields['state'].selection).get(expenses.state)\n\t\t\texpenses_vals[expenses.id] = expense_dict\n\n\t\tattendance_vals = {}\n\n\t\tfor attendances in attendance.search([('employee_id.name','=', request.env.user.name)]):\n\t\t\tattendance_dict = {}\n\n\t\t\tuser_tz = pytz.timezone(request.env.context.get('tz') or request.env.user.tz or 'UTC')\n\t\t\tcheck_in_time = pytz.UTC.localize(fields.Datetime.from_string(attendances.check_in))\n\t\t\tcheck_in = check_in_time.astimezone(user_tz)\n\n\t\t\tif attendances.check_out:\n\t\t\t\tcheck_out_time = pytz.UTC.localize(fields.Datetime.from_string(attendances.check_out))\n\t\t\t\tcheck_out = check_out_time.astimezone(user_tz)\n\n\t\t\tattendance_dict['date'] = attendances.check_in.date() if attendances.check_in else ''\n\t\t\tattendance_dict['check_in'] = check_in.time() if attendances.check_in else ''\n\t\t\tattendance_dict['check_out'] = check_out.time() if attendances.check_out else '' \n\t\t\tattendance_vals[attendances.id] = attendance_dict\n\n\t\tleaves_vals = {}\n\t\tfor leaves in leaves.search([('employee_name.name','=', request.env.user.name)]):\n\t\t\tleave_dict = {}\n\t\t\tleave_dict['request_out'] = leaves.request_date_from\n\t\t\tleave_dict['request_in'] = leaves.request_date_to\n\t\t\tleave_dict['leave_type'] = leaves.holiday_status_id.name\n\t\t\tleave_dict['state'] = dict(leaves._fields['state'].selection).get(leaves.state)\n\t\t\tleaves_vals[leaves.id] = leave_dict\n\t\tcount = {}\n\t\tcount['payslip'] = payslip.search_count([('employee_id.name','=', request.env.user.name)])\n\n\t\tcount['attendance'] = attendance.search_count([('employee_id.name','=', request.env.user.name)])\n\t\ttotal = 0.0\n\t\tfor data in expense.search([('employee_id.name','=', request.env.user.name)]): \n\t\t\ttotal += data.total_amount\n\n\t\tcount['contract'] = contract.search_count([('employee_id.name','=', request.env.user.name)])\n\t\tcount['expense'] = '{0:.2f}'.format(total)\n\t\tdashboard_data['user'] = request.env.user.name\n\t\tdashboard_data['leaves_total'] = total_leave\n\t\tdashboard_data['leaves_count'] = leave_count\n\t\tdashboard_data['count'] = count\n\t\tdashboard_data['payslip'] = payslip_vals\n\t\tdashboard_data['expense'] = expenses_vals\n\t\tdashboard_data['attendance'] = attendance_vals\n\t\tdashboard_data['leaves'] = leaves_vals\n\n\t\treturn dashboard_data\n\n\t@http.route('/hr/fetch_birthdate_data', type=\"json\", auth='user')\n\tdef fetch_birthday_data(self, data):\n\t\tdashboard_data = {}\n\t\t# now = datetime.now()\n\t\ttry:\n\t\t\tdatetime.strptime(data, '%d/%m/%Y')\n\t\texcept ValueError:\n\t\t\traise UserError(_(\"Incorrect data format\"))\n\t\tdate_time_obj = datetime.strptime(data, '%d/%m/%Y')\n\t\temployee = request.env['hr.employee']\n\t\tbirthdate_data = {}\n\t\tfor datas in employee.search([]):\t\n\t\t\tif datas.birthday:\n\t\t\t\tif datas.birthday.day == date_time_obj.date().day and datas.birthday.month == date_time_obj.date().month:\n\t\t\t\t\tbirthdate_data[datas.id] = datas.name\n\t\tdashboard_data['birthday'] = birthdate_data\n\n\t\treturn dashboard_data\n\t\t", "repo_name": "prointec/demoprueba", "sub_path": "hr_dashboard_app_ent/controllers/backend.py", "file_name": "backend.py", "file_ext": "py", "file_size_in_byte": 5935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "odoo.http.Controller", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 14, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 17, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 17, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 19, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 20, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 21, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 22, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 22, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 23, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 38, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 38, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 39, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 39, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 40, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 41, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 41, "usage_type": "name"}, {"api_name": "odoo.http.request.env.user.has_group", "line_number": 47, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 47, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 47, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 48, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 50, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 50, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 63, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 63, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 72, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 72, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 82, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 82, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.http.request.env.context.get", "line_number": 85, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 85, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 85, "usage_type": "name"}, {"api_name": "pytz.UTC.localize", "line_number": 86, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 86, "usage_type": "attribute"}, {"api_name": "odoo.fields.Datetime.from_string", "line_number": 86, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 86, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 86, "usage_type": "name"}, {"api_name": "pytz.UTC.localize", "line_number": 90, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 90, "usage_type": "attribute"}, {"api_name": "odoo.fields.Datetime.from_string", "line_number": 90, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 90, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 90, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 99, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 99, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 107, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 107, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 109, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 109, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 111, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 111, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 114, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 114, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 116, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 116, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 11, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 132, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 134, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 134, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 136, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 136, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 127, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 127, "usage_type": "name"}]} +{"seq_id": "39080786745", "text": "import os\nimport pdfkit\nfrom PyPDF2 import PdfFileMerger\n\nclass PDFMerger:\n def __init__(self, src_dir, pathpdf):\n self.src_dir = src_dir\n self.pathpdf = pathpdf\n self.pname = []\n\n def get_name(self):\n string_parts = self.src_dir.split('_')\n self.pname = (string_parts[2], string_parts[1])\n\n def convert_html_to_pdf(self):\n enc_folder = os.listdir(self.src_dir)\n for folder in enc_folder: \n path = os.path.join(self.src_dir, folder)\n for html in os.listdir(path):\n if html.endswith(\".html\"):\n # Convert HTML file to PDF\n html_path = os.path.join(path, html)\n pdf_file = os.path.splitext(html)[0] + \".pdf\"\n pdf_path = os.path.join(self.pathpdf, pdf_file)\n try:\n pdfkit.from_file(html_path, pdf_path)\n except PermissionError:\n print(f\"Permission denied: {html_path}\")\n continue\n except Exception as e:\n print(f\"Error occurred when converting {html} to PDF: {e}\")\n continue\n\n def merge_pdfs(self):\n output_dir = 'D:\\\\MergedHTMLs' # replace with the path to your output folder\n output_filename = os.path.join(output_dir, self.pname[0] + \"_\" + self.pname[1] + \"_ENC.pdf\") # replace with your desired output filename and path\n\n pdf_merger = PdfFileMerger()\n\n # Merge all PDF files in the folder into one document\n for filename in os.listdir(self.pathpdf):\n if filename.endswith('.pdf'):\n filepath = os.path.join(self.pathpdf, filename)\n with open(filepath, 'rb') as f:\n pdf_merger.append(f)\n\n # Write the merged document to a new file in the output directory\n with open(output_filename, 'wb') as f:\n pdf_merger.write(f)\n\n def reset(self):\n for f in os.listdir(self.pathpdf):\n filepath = os.path.join(self.pathpdf, f)\n os.remove(filepath)\n\nsrc_dir = \"D:\\\\ProgressNotes\"\npathpdf = \"D:\\\\MergedPDFs\"\n\nmerger = PDFMerger(src_dir, pathpdf)\n\nfor folder in os.listdir(src_dir):\n merger.src_dir = os.path.join(src_dir, folder)\n merger.get_name()\n merger.convert_html_to_pdf()\n merger.merge_pdfs()\n merger.reset()\n", "repo_name": "Anish-I/EHRFileMigration", "sub_path": "Filemerger.py", "file_name": "Filemerger.py", "file_ext": "py", "file_size_in_byte": 2413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pdfkit.from_file", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "PyPDF2.PdfFileMerger", "line_number": 38, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 54, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "70212545446", "text": "import asyncio\nimport sqlite3\nimport schedule\nfrom config import DIRECTORY\n\ndirectory = r'{0}\\posts.db'.format(DIRECTORY)\nconnection = sqlite3.connect(directory)\ncursor = connection.cursor()\n\ncursor.execute('''CREATE TABLE IF NOT EXISTS posts (\n id INTEGER PRIMARY KEY AUTOINCREMENT,organization TEXT NOT NULL,date TEXT NOT NULL,\n description TEXT NOT NULL,address TEXT NOT NULL,lat FLOAT NOT NULL,lon FLOAT NOT NULL,media_id TEXT);''')\n\nconnection.commit()\n\ncursor.close()\nconnection.close()\n\n\ndef insert(organization, date, description, address, lat, lon):\n con = sqlite3.connect(directory)\n cur = con.cursor()\n\n cur.execute(\"INSERT INTO posts (organization,date,description,address,lat,lon) VALUES (?, ?, ?, ?, ?, ?)\",\n (organization, date, description, address, lat, lon))\n result = cur.lastrowid\n\n con.commit()\n\n cur.close()\n con.close()\n\n return result\n\n\ndef fetch(post_id):\n con = sqlite3.connect(directory)\n cur = con.cursor()\n\n data = cur.execute(\"SELECT * FROM posts WHERE id = ?\", (post_id,)).fetchone()\n\n cur.close()\n con.close()\n\n return data\n\n\ndef fetch_by_organization(org):\n con = sqlite3.connect(directory)\n cur = con.cursor()\n\n data = cur.execute(\"SELECT * FROM posts WHERE organization = ?\", (org,)).fetchone()\n\n cur.close()\n con.close()\n\n return data\n\n\ndef nearest(lon, lat):\n con = sqlite3.connect(directory)\n cur = con.cursor()\n \n data = cur.execute(\"SELECT id, date, lat, lon FROM posts ORDER BY date\").fetchall()\n\n cur.close()\n con.close()\n\n grouped_data_by_date = {}\n\n for row in data:\n distance = (((row[2]) - lat) ** 2 + ((row[3]) - lon) ** 2) ** 0.5\n if row[1] in grouped_data_by_date:\n grouped_data_by_date[row[1]].append([row[0], distance])\n else:\n grouped_data_by_date[row[1]] = [[row[0], distance]]\n\n sorted_data = {\n key: sorted(value, key=lambda x: x[1])\n for key, value in grouped_data_by_date.items()\n }\n print(sorted_data)\n result_string = ','.join(str(item[0]) for value in sorted_data.values() for item in value)\n\n return result_string\n\n\ndef size():\n con = sqlite3.connect(directory)\n cur = con.cursor()\n\n result = len(cur.execute(\"SELECT * FROM posts\").fetchall())\n \n cur.close()\n con.close()\n \n return result\n\n\ndef delete_posts():\n con = sqlite3.connect(directory)\n cur = con.cursor()\n\n cur.execute(\"DELETE FROM posts WHERE date < date('now')\")\n\n con.commit()\n \n cur.close()\n con.close()\n\n\n# Запускаем задачу удаления ежедневно в определенное время\nschedule.every().day.at(\"00:00\").do(delete_posts)\n\n\nasync def schedule():\n while True:\n schedule.run_pending()\n await asyncio.sleep(1)\n\n\nif __name__ == '__main__':\n schedule()\n", "repo_name": "ha1mg/partyOnBot", "sub_path": "db/posts.py", "file_name": "posts.py", "file_ext": "py", "file_size_in_byte": 2874, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.DIRECTORY", "line_number": 6, "usage_type": "argument"}, {"api_name": "sqlite3.connect", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 89, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 101, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 113, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 118, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "21079172249", "text": "\"\"\"\nto do: \n finish system tests for:\n request_current_sensor_present\n request_current_sensor_value\n request_current_sensor_nominal\n request_display_leds_present\n request_display_solenoids_present\n\ntopics subscribed:\n cmd_all_off\n cmd_play_score\n cmd_set_number\n cmd_set_phrase\n connected\n request_computer_details\n request_current_sensor_nominal\n request_current_sensor_present\n request_current_sensor_value\n request_display_leds_present\n request_display_solenoids_present\n request_system_tests\n\ntopics published:\n\n connected\n response_computer_details\n response_current_sensor_nominal*\n response_current_sensor_present*\n response_current_sensor_value*\n response_display_leds_present\n response_display_solenoids_present\n\n\"\"\"\nimport importlib\nimport os\nimport queue\nimport RPi.GPIO as GPIO\nimport sys\nimport threading\nimport time\nimport traceback\n\napp_path = os.path.dirname((os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))\nsys.path.append(os.path.split(app_path)[0])\n\nimport settings\nimport common.deadman as deadman\nfrom thirtybirds3 import thirtybirds\nfrom thirtybirds3.adapters.gpio.hc595 import HC595_shift_reg as hc595\n\nfrom thirtybirds3.adapters.sensors import ina260_current_sensor as ina260\n# scores are located outside this script because they're voluminous\nimport roles.display.score_by_mode.system_test as system_test_scores\nimport roles.display.score_by_mode.singles as single_notes\n\nscores = {\n \"system_test_scores_descending_scale\":system_test_scores.descending_scale,\n \"c_piano\":single_notes.c_piano,\n \"c_mezzo\":single_notes.c_mezzo,\n \"c_forte\":single_notes.c_forte,\n \"asharp_piano\":single_notes.asharp_piano,\n \"asharp_mezzo\":single_notes.asharp_mezzo,\n \"asharp_forte\":single_notes.asharp_forte,\n \"gsharp_piano\":single_notes.gsharp_piano,\n \"gsharp_mezzo\":single_notes.gsharp_mezzo,\n \"gsharp_forte\":single_notes.gsharp_forte,\n \"g_piano\":single_notes.g_piano,\n \"g_mezzo\":single_notes.g_mezzo,\n \"g_forte\":single_notes.g_forte,\n \"f_piano\":single_notes.f_piano,\n \"f_mezzo\":single_notes.f_mezzo,\n \"f_forte\":single_notes.f_forte,\n}\n\n###########################\n# S Y S T E M T E S T S #\n###########################\n\n# machine measurements #\n# Check communication with TLC5947\n# Check communication with HC595\n# [unit 1 only] measure 24V current \n \n# tests for human observation #\n# [one station at a time] cycle through all digits and phrases\n# [one station at a time] cycle through all 5 pitches\n\n###################\n# ACRYLIC DISPLAY #\n###################\n\nclass Acrylic_Display():\n \"\"\" This class is the hardware init and control for the acrylic displays and \n their shift registers. It also acts as simple receiver of commands. \n All animations and sophisticated behaviors reside elsewhere and call this over thirtybirds\n \"\"\"\n def __init__(self):\n self.current_phrase = \"juega\"\n self.current_number = 0\n self.shift_register_states = [0x00,0x00,0x00,0x00,0x00]\n self.shift_register_chain = hc595.HC595(bus=0,deviceId=0)\n self.Display_LED_Mapping = {\n \"digit\": {\n \"a\": {\n 0: {\"bit\": 0, \"shift_register_index\": 0},\n 1: {\"bit\": 1, \"shift_register_index\": 0},\n 2: {\"bit\": 2, \"shift_register_index\": 0},\n 3: {\"bit\": 3, \"shift_register_index\": 0},\n 4: {\"bit\": 4, \"shift_register_index\": 0},\n 5: {\"bit\": 5, \"shift_register_index\": 0},\n 6: {\"bit\": 6, \"shift_register_index\": 0},\n 7: {\"bit\": 7, \"shift_register_index\": 0},\n 8: {\"bit\": 0, \"shift_register_index\": 1},\n 9: {\"bit\": 1, \"shift_register_index\": 1}\n },\n \"b\": {\n 0: {\"bit\": 2, \"shift_register_index\": 1},\n 1: {\"bit\": 3, \"shift_register_index\": 1},\n 2: {\"bit\": 4, \"shift_register_index\": 1},\n 3: {\"bit\": 5, \"shift_register_index\": 1},\n 4: {\"bit\": 6, \"shift_register_index\": 1},\n 5: {\"bit\": 7, \"shift_register_index\": 1},\n 6: {\"bit\": 0, \"shift_register_index\": 2},\n 7: {\"bit\": 1, \"shift_register_index\": 2},\n 8: {\"bit\": 2, \"shift_register_index\": 2},\n 9: {\"bit\": 3, \"shift_register_index\": 2}\n },\n \"c\": {\n 0: {\"bit\": 0, \"shift_register_index\": 3},\n 1: {\"bit\": 1, \"shift_register_index\": 3},\n 2: {\"bit\": 2, \"shift_register_index\": 3},\n 3: {\"bit\": 3, \"shift_register_index\": 3},\n 4: {\"bit\": 4, \"shift_register_index\": 3},\n 5: {\"bit\": 5, \"shift_register_index\": 3},\n 6: {\"bit\": 6, \"shift_register_index\": 3},\n 7: {\"bit\": 7, \"shift_register_index\": 3},\n 8: {\"bit\": 0, \"shift_register_index\": 4},\n 9: {\"bit\": 1, \"shift_register_index\": 4}\n }\n },\n \"display_phrase\": {\n \"como\": {\"bit\": 2, \"shift_register_index\": 4},\n \"fue\":{\"bit\": 3, \"shift_register_index\": 4},\n \"dinero\": {\"bit\": 4, \"shift_register_index\": 4},\n \"trueque\": {\"bit\": 5, \"shift_register_index\": 4},\n \"juega\": {\"bit\": 6, \"shift_register_index\": 4}\n }\n }\n\n def set_phrase(self, phrase): # [ \"\"| juega | trueque | dinero | como | que ]\n self.current_phrase = phrase\n self.update_display()\n \n def generate_phrase_bytes(self):\n if self.current_phrase != \"\":\n shift_register_index = self.Display_LED_Mapping[\"display_phrase\"][self.current_phrase][\"shift_register_index\"]\n bit = self.Display_LED_Mapping[\"display_phrase\"][self.current_phrase][\"bit\"]\n self.shift_register_states[shift_register_index] = self.shift_register_states[shift_register_index] + (1 << bit)\n\n def set_number(self, number):\n if number > 999:\n number = number % 1000\n self.current_number = number\n self.update_display()\n\n def generate_number_bytes(self):\n a,b,c = '{:>03d}'.format(self.current_number)\n\n shift_register_index = self.Display_LED_Mapping[\"digit\"][\"a\"][int(a)][\"shift_register_index\"]\n bit = self.Display_LED_Mapping[\"digit\"][\"a\"][int(a)][\"bit\"]\n self.shift_register_states[shift_register_index] += (1 << bit)\n\n shift_register_index = self.Display_LED_Mapping[\"digit\"][\"b\"][int(b)][\"shift_register_index\"]\n bit = self.Display_LED_Mapping[\"digit\"][\"b\"][int(b)][\"bit\"]\n self.shift_register_states[shift_register_index] += (1 << bit)\n\n shift_register_index = self.Display_LED_Mapping[\"digit\"][\"c\"][int(c)][\"shift_register_index\"]\n bit = self.Display_LED_Mapping[\"digit\"][\"c\"][int(c)][\"bit\"]\n self.shift_register_states[shift_register_index] += (1 << bit)\n\n def set_all_off(self):\n self.shift_register_states = [0x00] * len(self.shift_register_states)\n self.shift_register_chain.write(self.get_inverted_shift_register_states())\n #self.shift_register_chain.write(self.shift_register_states)\n\n def update_display(self):\n self.set_all_off()\n if self.current_number > -1:\n self.generate_number_bytes()\n if self.current_phrase != \"\":\n self.generate_phrase_bytes()\n self.shift_register_chain.write(self.get_inverted_shift_register_states())\n\n def get_inverted_shift_register_states(self):\n mask = 0b11111111\n inverted_shift_register_states = []\n for shift_register_state in self.shift_register_states:\n inverted_shift_register_states.append(shift_register_state ^ mask)\n return inverted_shift_register_states\n\n\n###############\n# C H I M E S #\n###############\n\nclass Chime(threading.Thread):\n def __init__(self, gpio_number):\n threading.Thread.__init__(self)\n self.queue = queue.Queue()\n self.gpio_number = gpio_number\n GPIO.setup(gpio_number, GPIO.OUT)\n self.stop_power()\n self.start()\n\n def stop_power(self):\n print(\"stop_power\", self.gpio_number)\n GPIO.output(self.gpio_number, GPIO.LOW)\n\n def start_power(self):\n print(\"start_power\", self.gpio_number)\n GPIO.output(self.gpio_number, GPIO.HIGH)\n\n def add_pulse_to_queue(self, pulse_duration):\n self.queue.put(pulse_duration)\n\n def run(self):\n while True:\n try:\n pulse_duration = self.queue.get(True)\n if pulse_duration < 0.1: # saftey check\n self.start_power()\n time.sleep(pulse_duration)\n self.stop_power()\n except Exception as e:\n print(e)\n self.stop_power()\n finally:\n self.stop_power()\n\nclass Chime_Player(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.current_score = queue.Queue() # queue can act as interrupt or stop message\n GPIO.setmode(GPIO.BCM)\n self.gpios = [6,16,5,17,22]\n self.chimes = []\n for gpio in self.gpios: # how can this be a comprehension\n self.chimes.append(Chime(gpio))\n self.start()\n\n def stop_all_chime_power(self):\n self.play_score(\"blank\")\n for chime in self.chimes:\n chime.stop_power()\n\n def play_score(self,score_name):\n print(\"score_name=\",score_name)\n self.current_score.put(score_name)\n\n def run(self):\n while True:\n try:\n #score_name = self.current_score.get(True)\n #score = scores[score_name]\n score = self.current_score.get(True)\n score = scores[score]\n default_beat_period = score[\"default_beat_period\"]\n beats = score[\"beats\"]\n interrupt = False\n for beat in beats:\n print(\"beat=\", beat)\n if interrupt:\n break\n #print(\"beat\",beat)\n for notes in beat:\n #print(\"notes\",notes)\n if interrupt:\n break\n for note in notes:\n #print(\"note\",note) \n self.chimes[note[\"pitch\"]].add_pulse_to_queue(note[\"period\"])\n if not self.current_score.empty():\n interrupt = True\n break\n time.sleep(default_beat_period)\n except Exception as e:\n print(e)\n #self.stop_all_chime_power()\n\n###########\n# M A I N #\n###########\n\nclass Main(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.tb = thirtybirds.Thirtybirds(\n settings, \n app_path,\n self.network_message_handler,\n self.network_status_change_handler,\n self.exception_handler\n )\n self.deadman = deadman.Deadman_Switch(self.tb)\n self.queue = queue.Queue()\n self.hostname = self.tb.get_hostname()\n\n self.tb.subscribe_to_topic(\"cmd_all_off\")\n self.tb.subscribe_to_topic(\"cmd_play_score\")\n self.tb.subscribe_to_topic(\"cmd_set_phrase\")\n self.tb.subscribe_to_topic(\"cmd_set_number\")\n self.tb.subscribe_to_topic(\"request_display_leds_present\")\n self.tb.subscribe_to_topic(\"request_display_solenoids_present\")\n self.tb.subscribe_to_topic(\"request_system_tests\")\n self.tb.subscribe_to_topic(\"request_computer_details\")\n self.tb.subscribe_to_topic(\"request_current_sensor_nominal\")\n self.tb.subscribe_to_topic(\"request_current_sensor_present\")\n self.tb.subscribe_to_topic(\"request_current_sensor_value\")\n\n self.tb.publish(\"connected\", True)\n self.chime_player = Chime_Player()\n self.acrylic_display = Acrylic_Display()\n if self.tb.get_hostname() == 'pinball1game':\n self.power_sensor = ina260.INA260()\n self.tb.subscribe_to_topic(\"get_current\")\n self.start()\n\n\n def request_system_tests(self):\n # computer details\n self.tb.publish(\n topic=\"response_current_sensor_nominal\",\n message=self.request_current_sensor_nominal()\n )\n self.tb.publish(\n topic=\"response_display_leds_present\", \n message=self.request_display_leds_present()\n )\n self.tb.publish(\n topic=\"response_display_solenoids_present\", \n message=self.response_display_solenoids_present()\n )\n\n def request_display_solenoids_present(self):\n # This needs to be finished after the current sensor works\n return [True,True,True,True,True]\n\n def request_display_leds_present(self):\n # This needs to be finished after the current sensor works\n return {\n \"phrases\":[True,True,True,True,True],\n \"numerals\":{\n \"a\":[True,True,True,True,True,True,True,True,True,True],\n \"b\":[True,True,True,True,True,True,True,True,True,True],\n \"c\":[True,True,True,True,True,True,True,True,True,True],\n }\n }\n\n def request_computer_details(self):\n return {\n \"df\":self.tb.get_system_disk(),\n \"cpu_temp\":self.tb.get_core_temp(),\n \"pinball_git_timestamp\":self.tb.app_get_git_timestamp(),\n \"tb_git_timestamp\":self.tb.tb_get_git_timestamp(),\n }\n\n def request_current_sensor_nominal(self):\n # TODO: Make the ACTUAL tests here.\n return True\n\n def request_current_sensor_present(self):\n # TODO: Make the ACTUAL tests here.\n return True\n\n def request_current_sensor_value(self):\n # TODO: Make the ACTUAL tests here.\n return 0.0\n\n def status_receiver(self, msg):\n print(\"status_receiver\", msg)\n def network_message_handler(self, topic, message, origin, destination):\n self.add_to_queue(topic, message, origin, destination)\n def exception_handler(self, exception):\n print(\"exception_handler\",exception)\n def network_status_change_handler(self, status, hostname):\n print(\"network_status_change_handler\", status, hostname)\n def add_to_queue(self, topic, message, origin, destination):\n self.queue.put((topic, message, origin, destination))\n\n def run(self):\n while True:\n try:\n topic, message, origin, destination = self.queue.get(True)\n print(topic, message)\n if topic == b'cmd_all_off':\n self.acrylic_display.set_all_off()\n self.chime_player.stop_all_chime_power()\n\n if topic == b'cmd_play_score':\n if destination == self.tb.get_hostname():\n self.chime_player.play_score(message)\n\n if topic == b'cmd_set_number':\n if destination == self.tb.get_hostname():\n self.acrylic_display.set_number(message)\n \n if topic == b'cmd_set_phrase':\n if destination == self.tb.get_hostname():\n self.acrylic_display.set_phrase(message)\n\n if topic == b'request_computer_details':\n self.tb.publish(\n topic=\"response_computer_details\", \n message=self.request_computer_details()\n )\n\n if topic == b'request_current_sensor_nominal':\n self.tb.publish(\n topic=\"response_current_sensor_nominal\",\n message=self.request_current_sensor_nominal()\n )\n\n if topic == b'request_current_sensor_present':\n self.tb.publish(\n topic=\"response_current_sensor_present\",\n message=self.request_current_sensor_present()\n )\n\n if topic == b'request_current_sensor_value':\n self.tb.publish(\n topic=\"response_current_sensor_value\",\n message=self.request_current_sensor_value()\n )\n\n if topic == b'request_display_leds_present':\n self.tb.publish(\n topic=\"response_display_leds_present\", \n message=self.request_display_leds_present()\n )\n \n if topic == b'request_display_solenoids_present':\n self.tb.publish(\n topic=\"response_display_solenoids_present\", \n message=self.request_display_solenoids_present()\n )\n\n if topic == b'request_system_tests':\n self.request_system_tests()\n\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print(e, repr(traceback.format_exception(exc_type, exc_value,exc_traceback)))\n\nmain = Main()\n", "repo_name": "andycavatorta/pinball", "sub_path": "roles/display/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 17516, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 44, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.system_test.descending_scale", "line_number": 58, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.system_test", "line_number": 58, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.c_piano", "line_number": 59, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 59, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.c_mezzo", "line_number": 60, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 60, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.c_forte", "line_number": 61, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 61, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.asharp_piano", "line_number": 62, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 62, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.asharp_mezzo", "line_number": 63, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 63, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.asharp_forte", "line_number": 64, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 64, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.gsharp_piano", "line_number": 65, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 65, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.gsharp_mezzo", "line_number": 66, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 66, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.gsharp_forte", "line_number": 67, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 67, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.g_piano", "line_number": 68, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 68, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.g_mezzo", "line_number": 69, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 69, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.g_forte", "line_number": 70, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 70, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.f_piano", "line_number": 71, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 71, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.f_mezzo", "line_number": 72, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 72, "usage_type": "name"}, {"api_name": "roles.display.score_by_mode.singles.f_forte", "line_number": 73, "usage_type": "attribute"}, {"api_name": "roles.display.score_by_mode.singles", "line_number": 73, "usage_type": "name"}, {"api_name": "thirtybirds3.adapters.gpio.hc595.HC595_shift_reg.HC595", "line_number": 102, "usage_type": "call"}, {"api_name": "thirtybirds3.adapters.gpio.hc595.HC595_shift_reg", "line_number": 102, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 207, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 209, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 209, "usage_type": "attribute"}, {"api_name": "queue.Queue", "line_number": 210, "usage_type": "call"}, {"api_name": "RPi.GPIO.setup", "line_number": 212, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 212, "usage_type": "name"}, {"api_name": "RPi.GPIO.OUT", "line_number": 212, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 218, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 218, "usage_type": "name"}, {"api_name": "RPi.GPIO.LOW", "line_number": 218, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.output", "line_number": 222, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 222, "usage_type": "name"}, {"api_name": "RPi.GPIO.HIGH", "line_number": 222, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 233, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 241, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 243, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 243, "usage_type": "attribute"}, {"api_name": "queue.Queue", "line_number": 244, "usage_type": "call"}, {"api_name": "RPi.GPIO.setmode", "line_number": 245, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 245, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 245, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 286, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 295, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 297, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 297, "usage_type": "attribute"}, {"api_name": "thirtybirds3.thirtybirds.Thirtybirds", "line_number": 298, "usage_type": "call"}, {"api_name": "thirtybirds3.thirtybirds", "line_number": 298, "usage_type": "name"}, {"api_name": "common.deadman.Deadman_Switch", "line_number": 305, "usage_type": "call"}, {"api_name": "common.deadman", "line_number": 305, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 306, "usage_type": "call"}, {"api_name": "thirtybirds3.adapters.sensors.ina260_current_sensor.INA260", "line_number": 325, "usage_type": "call"}, {"api_name": "thirtybirds3.adapters.sensors.ina260_current_sensor", "line_number": 325, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 452, "usage_type": "call"}, {"api_name": "traceback.format_exception", "line_number": 453, "usage_type": "call"}]} +{"seq_id": "4713386923", "text": "import numpy as np\nimport tensorflow as tf\nfrom baselines.common import tf_util as U\nfrom baselines.common.tests.test_with_mpi import with_mpi\nfrom baselines import logger\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\nclass MpiAdamOptimizer(tf.train.AdamOptimizer):\n \"\"\"Adam optimizer that averages gradients across mpi processes.\"\"\"\n def __init__(self, comm, grad_clip=None, mpi_rank_weight=1, **kwargs):\n self.comm = comm\n self.grad_clip = grad_clip\n self.mpi_rank_weight = mpi_rank_weight\n tf.train.AdamOptimizer.__init__(self, **kwargs)\n def compute_gradients(self, loss, var_list, **kwargs):\n grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)\n grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]\n flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0) * self.mpi_rank_weight\n shapes = [v.shape.as_list() for g, v in grads_and_vars]\n sizes = [int(np.prod(s)) for s in shapes]\n\n total_weight = np.zeros(1, np.float32)\n self.comm.Allreduce(np.array([self.mpi_rank_weight], dtype=np.float32), total_weight, op=MPI.SUM)\n total_weight = total_weight[0]\n\n buf = np.zeros(sum(sizes), np.float32)\n countholder = [0] # Counts how many times _collect_grads has been called\n stat = tf.reduce_sum(grads_and_vars[0][1]) # sum of first variable\n def _collect_grads(flat_grad, np_stat):\n if self.grad_clip is not None:\n gradnorm = np.linalg.norm(flat_grad)\n if gradnorm > 1:\n flat_grad /= gradnorm\n logger.logkv_mean('gradnorm', gradnorm)\n logger.logkv_mean('gradclipfrac', float(gradnorm > 1))\n self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)\n np.divide(buf, float(total_weight), out=buf)\n if countholder[0] % 100 == 0:\n check_synced(np_stat, self.comm)\n countholder[0] += 1\n return buf\n\n avg_flat_grad = tf.py_func(_collect_grads, [flat_grad, stat], tf.float32)\n avg_flat_grad.set_shape(flat_grad.shape)\n avg_grads = tf.split(avg_flat_grad, sizes, axis=0)\n avg_grads_and_vars = [(tf.reshape(g, v.shape), v)\n for g, (_, v) in zip(avg_grads, grads_and_vars)]\n return avg_grads_and_vars\n\ndef check_synced(localval, comm=None):\n \"\"\"\n It's common to forget to initialize your variables to the same values, or\n (less commonly) if you update them in some other way than adam, to get them out of sync.\n This function checks that variables on all MPI workers are the same, and raises\n an AssertionError otherwise\n\n Arguments:\n comm: MPI communicator\n localval: list of local variables (list of variables on current worker to be compared with the other workers)\n \"\"\"\n comm = comm or MPI.COMM_WORLD\n vals = comm.gather(localval)\n if comm.rank == 0:\n assert all(val==vals[0] for val in vals[1:]),\\\n 'MpiAdamOptimizer detected that different workers have different weights: {}'.format(vals)\n\n@with_mpi(timeout=5)\ndef test_nonfreeze():\n np.random.seed(0)\n tf.set_random_seed(0)\n\n a = tf.Variable(np.random.randn(3).astype('float32'))\n b = tf.Variable(np.random.randn(2,5).astype('float32'))\n loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))\n\n stepsize = 1e-2\n # for some reason the session config with inter_op_parallelism_threads was causing\n # nested sess.run calls to freeze\n config = tf.ConfigProto(inter_op_parallelism_threads=1)\n sess = U.get_session(config=config)\n update_op = MpiAdamOptimizer(comm=MPI.COMM_WORLD, learning_rate=stepsize).minimize(loss)\n sess.run(tf.global_variables_initializer())\n losslist_ref = []\n for i in range(100):\n l,_ = sess.run([loss, update_op])\n print(i, l)\n losslist_ref.append(l)\n", "repo_name": "openai/baselines", "sub_path": "baselines/common/mpi_adam_optimizer.py", "file_name": "mpi_adam_optimizer.py", "file_ext": "py", "file_size_in_byte": 3976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14949, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mpi4py.MPI", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorflow.train", "line_number": 11, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer.__init__", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer.compute_gradients", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 26, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 34, "usage_type": "attribute"}, {"api_name": "baselines.logger.logkv_mean", "line_number": 37, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 37, "usage_type": "name"}, {"api_name": "baselines.logger.logkv_mean", "line_number": 38, "usage_type": "call"}, {"api_name": "baselines.logger", "line_number": 38, "usage_type": "name"}, {"api_name": "mpi4py.MPI.SUM", "line_number": 39, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.divide", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.py_func", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow.split", "line_number": 48, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 49, "usage_type": "call"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 64, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 76, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.sin", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 82, "usage_type": "call"}, {"api_name": "baselines.common.tf_util.get_session", "line_number": 83, "usage_type": "call"}, {"api_name": "baselines.common.tf_util", "line_number": 83, "usage_type": "name"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 84, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 84, "usage_type": "name"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 85, "usage_type": "call"}, {"api_name": "baselines.common.tests.test_with_mpi.with_mpi", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "6238186700", "text": "import strawberry\nimport datetime\nimport typing\nimport uuid\n\nfrom typing import Annotated\n\nfrom utils.Dataloaders import getLoadersFromInfo, getUserFromInfo\nfrom .BaseGQLModel import BaseGQLModel\n\nfrom GraphTypeDefinitions._GraphResolvers import (\n resolve_id,\n resolve_name,\n resolve_name_en,\n resolve_changedby,\n resolve_created,\n resolve_lastchange,\n resolve_createdby,\n resolve_rbacobject,\n createRootResolver_by_id,\n createRootResolver_by_page\n)\n\nPartGQLModel = Annotated[\"PartGQLModel\", strawberry.lazy(\".PartGQLModel\")]\nItemTypeGQLModel = Annotated[\"ItemTypeGQLModel\", strawberry.lazy(\".ItemTypeGQLModel\")]\n\n@strawberry.input\nclass ItemUpdateGQLModel:\n lastchange: datetime.datetime\n name: typing.Optional[str] = None\n order: typing.Optional[int] = None\n value: typing.Optional[str] = None\n type_id: typing.Optional[uuid.UUID] = None\n\n@strawberry.federation.type(\n keys=[\"id\"], \n name=\"FormItemGQLModel\",\n description=\"\"\"Type representing an item in the form\"\"\"\n)\nclass ItemGQLModel(BaseGQLModel):\n @classmethod\n def getLoader(cls, info):\n return getLoadersFromInfo(info).items\n \n # @classmethod\n # async def resolve_reference(cls, info: strawberry.types.Info, id: uuid.UUID):\n # implementation is inherited\n\n id = resolve_id\n name = resolve_name\n changedby = resolve_changedby\n lastchange = resolve_lastchange\n created = resolve_created\n createdby = resolve_createdby\n name_en = resolve_name_en\n rbacobject = resolve_rbacobject\n\n @strawberry.field(description=\"\"\"Item's order\"\"\")\n def order(self) -> int:\n return self.order if self.order else 0\n\n @strawberry.field(description=\"\"\"Item's value \"\"\")\n def value(self) -> str:\n return self.value\n\n @strawberry.field(description=\"Retrieves the part owning the item\")\n async def part(self, info: strawberry.types.Info) -> typing.Optional[\"PartGQLModel\"]:\n from .PartGQLModel import PartGQLModel\n result = await PartGQLModel.resolve_reference(info, self.part_id)\n return result\n\n @strawberry.field(description=\"Retrieves the item type\")\n async def type(self, info: strawberry.types.Info) -> typing.Optional[\"ItemTypeGQLModel\"]:\n from .ItemTypeGQLModel import ItemTypeGQLModel\n result = None if self.type_id is None else await ItemTypeGQLModel.resolve_reference(info=info, id=self.type_id)\n return result\n\n#############################################################\n#\n# Queries\n#\n#############################################################\n\n@strawberry.field(description=\"Retrieves the item type\")\nasync def item_by_id(\n self, info: strawberry.types.Info, id: uuid.UUID\n) -> typing.Optional[ItemGQLModel]:\n result = await ItemGQLModel.resolve_reference(info=info, id=id)\n return result\n\nfrom dataclasses import dataclass\nfrom uoishelpers.resolvers import createInputs\n\n@createInputs\n@dataclass\nclass FormItemWhereFilter:\n name: str\n name_en: str\n type_id: uuid.UUID\n value: str\n\n@strawberry.field(description=\"Retrieves the item type\")\nasync def item_page(\n self, info: strawberry.types.Info, skip: int = 0, limit: int = 0,\n where: typing.Optional[FormItemWhereFilter] = None\n) -> typing.List[ItemGQLModel]:\n loader = getLoadersFromInfo(info).items\n wf = None if where is None else strawberry.asdict(where)\n result = await loader.page(skip, limit, where = wf)\n return result\n#############################################################\n#\n# Mutations\n#\n#############################################################\n\n@strawberry.input(description=\"Input structure - C operation\")\nclass FormItemInsertGQLModel:\n name: str = strawberry.field(description=\"Item name\")\n part_id: uuid.UUID = strawberry.field(description=\"id of parent entity\")\n\n id: typing.Optional[uuid.UUID] = strawberry.field(description=\"primary key (UUID), could be client generated\", default=None)\n value: typing.Optional[str] = None\n order: typing.Optional[int] = strawberry.field(description=\"Position in parent entity\", default=None)\n type_id: typing.Optional[uuid.UUID] = None\n createdby: strawberry.Private[uuid.UUID] = None \n rbacobject: strawberry.Private[uuid.UUID] = None \n \n\n@strawberry.input(description=\"Input structure - U operation\")\nclass FormItemUpdateGQLModel:\n lastchange: datetime.datetime = strawberry.field(description=\"timestamp of last change = TOKEN\")\n id: uuid.UUID = strawberry.field(description=\"primary key (UUID), identifies object of operation\")\n\n name: typing.Optional[str] = strawberry.field(description=\"Item name\", default=None)\n value: typing.Optional[str] = None\n order: typing.Optional[int] = strawberry.field(description=\"Position in parent entity\", default=None)\n type_id: typing.Optional[uuid.UUID] = None\n changedby: strawberry.Private[uuid.UUID] = None\n \n@strawberry.type(description=\"Result of CU operations\")\nclass FormItemResultGQLModel:\n id: uuid.UUID = strawberry.field(description=\"primary key of CU operation object\")\n msg: str = strawberry.field(description=\"\"\"Should be `ok` if descired state has been reached, otherwise `fail`.\nFor update operation fail should be also stated when bad lastchange has been entered.\"\"\")\n\n @strawberry.field(description=\"\"\"Result of item operation\"\"\")\n async def item(self, info: strawberry.types.Info) -> typing.Optional[ItemGQLModel]:\n result = await ItemGQLModel.resolve_reference(info, self.id)\n return result\n \n\n@strawberry.field(\n description=\"\"\"Updates a section.\"\"\"\n)\nasync def item_update(self, info: strawberry.types.Info, item: FormItemUpdateGQLModel) -> \"FormItemResultGQLModel\":\n user = getUserFromInfo(info)\n item.changedby = uuid.UUID(user[\"id\"])\n\n result = FormItemResultGQLModel(id=item.id, msg=\"ok\")\n loader = getLoadersFromInfo(info).items\n row = await loader.update(item)\n result.msg = \"fail\" if row is None else \"ok\"\n return result \n\n\n@strawberry.field(\n description=\"\"\"C operation\"\"\"\n)\nasync def item_insert(self, info: strawberry.types.Info, item: FormItemInsertGQLModel) -> \"FormItemResultGQLModel\":\n user = getUserFromInfo(info)\n item.createdby = uuid.UUID(user[\"id\"])\n\n # form as the parent of new section is checked\n # rbacobject is retrieved and assigned to section.rbacobject\n # rbacobject is shared among form, its sections and parts\n partloader = getLoadersFromInfo(info).parts\n part = await partloader.load(item.part_id)\n assert part is not None, f\"{item.part_id} is unknown part (of section) (during item insert)\"\n item.rbacobject = part.rbacobject\n\n loader = getLoadersFromInfo(info).items\n row = await loader.insert(item)\n result = FormItemResultGQLModel(id=item.id, msg=\"ok\")\n result.msg = \"fail\" if row is None else \"ok\"\n result.id = None if row is None else row.id\n return result ", "repo_name": "hrbolek/gql_forms", "sub_path": "GraphTypeDefinitions/ItemGQLModel.py", "file_name": "ItemGQLModel.py", "file_ext": "py", "file_size_in_byte": 6901, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Annotated", "line_number": 24, "usage_type": "name"}, {"api_name": "strawberry.lazy", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.Annotated", "line_number": 25, "usage_type": "name"}, {"api_name": "strawberry.lazy", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 31, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 32, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 33, "usage_type": "attribute"}, {"api_name": "strawberry.input", "line_number": 27, "usage_type": "attribute"}, {"api_name": "BaseGQLModel.BaseGQLModel", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.Dataloaders.getLoadersFromInfo", "line_number": 43, "usage_type": "call"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_id", "line_number": 49, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_name", "line_number": 50, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_changedby", "line_number": 51, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_lastchange", "line_number": 52, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_created", "line_number": 53, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_createdby", "line_number": 54, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_name_en", "line_number": 55, "usage_type": "name"}, {"api_name": "GraphTypeDefinitions._GraphResolvers.resolve_rbacobject", "line_number": 56, "usage_type": "name"}, {"api_name": "strawberry.field", "line_number": 58, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 62, "usage_type": "call"}, {"api_name": "strawberry.types", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PartGQLModel.PartGQLModel.resolve_reference", "line_number": 69, "usage_type": "call"}, {"api_name": "PartGQLModel.PartGQLModel", "line_number": 69, "usage_type": "name"}, {"api_name": "strawberry.field", "line_number": 66, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 67, "usage_type": "attribute"}, {"api_name": "strawberry.types", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ItemTypeGQLModel.ItemTypeGQLModel.resolve_reference", "line_number": 75, "usage_type": "call"}, {"api_name": "ItemTypeGQLModel.ItemTypeGQLModel", "line_number": 75, "usage_type": "name"}, {"api_name": "strawberry.field", "line_number": 72, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 73, "usage_type": "attribute"}, {"api_name": "strawberry.federation.type", "line_number": 35, "usage_type": "call"}, {"api_name": "strawberry.federation", "line_number": 35, "usage_type": "attribute"}, {"api_name": "strawberry.types", "line_number": 86, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 86, "usage_type": "attribute"}, {"api_name": "{'PartGQLModel': 'PartGQLModel.PartGQLModel', 'ItemTypeGQLModel': 'ItemTypeGQLModel.ItemTypeGQLModel'}.resolve_reference", "line_number": 88, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 84, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 87, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 99, "usage_type": "attribute"}, {"api_name": "uoishelpers.resolvers.createInputs", "line_number": 94, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 95, "usage_type": "name"}, {"api_name": "strawberry.types", "line_number": 104, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 105, "usage_type": "attribute"}, {"api_name": "utils.Dataloaders.getLoadersFromInfo", "line_number": 107, "usage_type": "call"}, {"api_name": "strawberry.asdict", "line_number": 108, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 102, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 106, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 119, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 120, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 120, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 122, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 122, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 122, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 124, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 124, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 125, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 125, "usage_type": "attribute"}, {"api_name": "strawberry.Private", "line_number": 126, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 126, "usage_type": "attribute"}, {"api_name": "strawberry.Private", "line_number": 127, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 127, "usage_type": "attribute"}, {"api_name": "strawberry.input", "line_number": 117, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 132, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 133, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 133, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 135, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 135, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 136, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 137, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 137, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 138, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 138, "usage_type": "attribute"}, {"api_name": "strawberry.Private", "line_number": 139, "usage_type": "attribute"}, {"api_name": "uuid.UUID", "line_number": 139, "usage_type": "attribute"}, {"api_name": "strawberry.input", "line_number": 130, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 143, "usage_type": "attribute"}, {"api_name": "strawberry.field", "line_number": 143, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 144, "usage_type": "call"}, {"api_name": "strawberry.types", "line_number": 148, "usage_type": "attribute"}, {"api_name": "{'PartGQLModel': 'PartGQLModel.PartGQLModel', 'ItemTypeGQLModel': 'ItemTypeGQLModel.ItemTypeGQLModel'}.resolve_reference", "line_number": 149, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 147, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 148, "usage_type": "attribute"}, {"api_name": "strawberry.type", "line_number": 141, "usage_type": "call"}, {"api_name": "strawberry.types", "line_number": 156, "usage_type": "attribute"}, {"api_name": "utils.Dataloaders.getUserFromInfo", "line_number": 157, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.Dataloaders.getLoadersFromInfo", "line_number": 161, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 153, "usage_type": "call"}, {"api_name": "strawberry.types", "line_number": 170, "usage_type": "attribute"}, {"api_name": "utils.Dataloaders.getUserFromInfo", "line_number": 171, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.Dataloaders.getLoadersFromInfo", "line_number": 177, "usage_type": "call"}, {"api_name": "utils.Dataloaders.getLoadersFromInfo", "line_number": 182, "usage_type": "call"}, {"api_name": "strawberry.field", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "27767095977", "text": "import re\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\n__FILENAMES__ = {\n \"foreign\": \"-foreign-\",\n \"self\": \"-self-\",\n \"download\": \"-throughput-download-\",\n \"upload\": \"-throughput-upload-\",\n \"granular\": \"-throughput-granular-\",\n}\n\n\ndef seconds_since_start(dfs, start, column_name=\"SecondsSinceStart\"):\n \"\"\"\n Adds \"Seconds Since Start\" column to all DataFrames in List of DataFrames,\n based on \"CreationTime\" column within them and start time passed.\n\n :param dfs: List of DataFrames. Each DataFrame MUST contain DateTime column named \"CreationTime\"\n :param start: DateTime start time\n :param column_name: String of column name to add, default \"SecondsSinceStart\"\n :return: Inplace addition of column using passed column name\n \"\"\"\n for df in dfs:\n df[column_name] = (df[\"CreationTime\"]-start).apply(pd.Timedelta.total_seconds)\n\n\ndef find_earliest(dfs):\n \"\"\"\n Returns earliest DateTime in List of DataFrames based on \"CreationTime\" column within them.\n ASSUMES DATAFRAMES ARE SORTED\n\n :param dfs: List of DataFrames. Each DataFrame MUST contain DateTime column named \"CreationTime\" and MUST BE SORTED by it.\n :return: DateTime of earliest time within all dfs.\n \"\"\"\n earliest = dfs[0][\"CreationTime\"].iloc[0]\n for df in dfs:\n if df[\"CreationTime\"].iloc[0] < earliest:\n earliest = df[\"CreationTime\"].iloc[0]\n return earliest\n\n\ndef timeSinceStart(dfs, start):\n \"\"\"\n Adds \"TimeSinceStart\" column to all dataframes\n :param dfs:\n :param start:\n :return:\n \"\"\"\n for df in dfs:\n df[\"TimeSinceStart\"] = df[\"CreationTime\"]-start\n\ndef probeClean(df):\n # ConnRTT and ConnCongestionWindow refer to Underlying Connection\n df.columns = [\"CreationTime\", \"NumRTT\", \"Duration\", \"ConnRTT\", \"ConnCongestionWindow\", \"Type\", \"Empty\"]\n df = df.drop(columns=[\"Empty\"])\n df[\"CreationTime\"] = pd.to_datetime(df[\"CreationTime\"], format=\"%m-%d-%Y-%H-%M-%S.%f\")\n df[\"Type\"] = df[\"Type\"].apply(str.strip)\n df[\"ADJ_Duration\"] = df[\"Duration\"] / df[\"NumRTT\"]\n df = df.sort_values(by=[\"CreationTime\"])\n return df\n\n\ndef throughputClean(df):\n df.columns = [\"CreationTime\", \"Throughput\", \"NumberConnections\", \"Empty\"]\n df = df.drop(columns=[\"Empty\"])\n df[\"CreationTime\"] = pd.to_datetime(df[\"CreationTime\"], format=\"%m-%d-%Y-%H-%M-%S.%f\")\n df[\"ADJ_Throughput\"] = df[\"Throughput\"] / 1000000\n df = df.sort_values(by=[\"CreationTime\"])\n return df\n\n\ndef granularClean(df):\n df.columns = [\"CreationTime\", \"Throughput\", \"ID\", \"Type\", \"Empty\"]\n df = df.drop(columns=[\"Empty\"])\n df[\"CreationTime\"] = pd.to_datetime(df[\"CreationTime\"], format=\"%m-%d-%Y-%H-%M-%S.%f\")\n df[\"Type\"] = df[\"Type\"].apply(str.strip)\n df[\"ADJ_Throughput\"] = df[\"Throughput\"] / 1000000\n df = df.sort_values(by=[\"CreationTime\"])\n return df\n\n\ndef make90Percentile(df):\n df = df.sort_values(by=[\"ADJ_Duration\"])\n df = df.reset_index()\n df = df.iloc[:int(len(df)*.9)]\n df = df.sort_values(by=[\"CreationTime\"])\n return df\n\n\ndef main(title, paths):\n # Data Ingestion\n foreign = pd.read_csv(paths[\"foreign\"])\n self = pd.read_csv(paths[\"self\"])\n download = pd.read_csv(paths[\"download\"])\n upload = pd.read_csv(paths[\"upload\"])\n granular = pd.read_csv(paths[\"granular\"])\n\n # Data Cleaning\n foreign = probeClean(foreign)\n self = probeClean(self)\n download = throughputClean(download)\n upload = throughputClean(upload)\n granular = granularClean(granular)\n\n # Data Separation\n selfUp = self[self[\"Type\"] == \"SelfUp\"]\n selfUp = selfUp.reset_index()\n selfDown = self[self[\"Type\"] == \"SelfDown\"]\n selfDown = selfDown.reset_index()\n granularUp = granular[granular[\"Type\"] == \"Upload\"]\n granularUp = granularUp.reset_index()\n granularDown = granular[granular[\"Type\"] == \"Download\"]\n granularDown = granularDown.reset_index()\n\n\n\n # Moving Average\n foreign[\"DurationMA5\"] = foreign[\"ADJ_Duration\"].rolling(window=5).mean()\n selfUp[\"DurationMA5\"] = selfUp[\"ADJ_Duration\"].rolling(window=5).mean()\n selfDown[\"DurationMA5\"] = selfDown[\"ADJ_Duration\"].rolling(window=5).mean()\n\n # Normalize\n dfs = [foreign, selfUp, selfDown, download, upload, granularUp, granularDown]\n timeSinceStart(dfs, find_earliest(dfs))\n seconds_since_start(dfs, find_earliest(dfs))\n\n yCol = \"SecondsSinceStart\"\n\n def GraphNormal():\n ########## Graphing Complete\n fig, ax = plt.subplots()\n ax.set_title(title)\n ax.plot(foreign[yCol], foreign[\"ADJ_Duration\"], \"b.\", label=\"foreign\")\n ax.plot(selfUp[yCol], selfUp[\"ADJ_Duration\"], \"r.\", label=\"selfUP\")\n ax.plot(selfDown[yCol], selfDown[\"ADJ_Duration\"], \"c.\", label=\"selfDOWN\")\n ax.plot(foreign[yCol], foreign[\"DurationMA5\"], \"b--\", label=\"foreignMA\")\n ax.plot(selfUp[yCol], selfUp[\"DurationMA5\"], \"r--\", label=\"selfUPMA\")\n ax.plot(selfDown[yCol], selfDown[\"DurationMA5\"], \"c--\", label=\"selfDOWNMA\")\n ax.set_ylim([0, max(foreign[\"ADJ_Duration\"].max(), self[\"ADJ_Duration\"].max())])\n ax.legend(loc=\"upper left\")\n\n secax = ax.twinx()\n secax.plot(download[yCol], download[\"ADJ_Throughput\"], \"g-\", label=\"download (MB/s)\")\n secax.plot(granularDown[granularDown[\"ID\"] == 0][yCol], granularDown[granularDown[\"ID\"] == 0][\"ADJ_Throughput\"], \"g--\", label=\"Download Connection 0 (MB/S)\")\n secax.plot(upload[yCol], upload[\"ADJ_Throughput\"], \"y-\", label=\"upload (MB/s)\")\n secax.plot(granularUp[granularUp[\"ID\"] == 0][yCol], granularUp[granularUp[\"ID\"] == 0][\"ADJ_Throughput\"], \"y--\", label=\"Upload Connection 0 (MB/S)\")\n secax.legend(loc=\"upper right\")\n #GraphNormal()\n\n def StackedThroughput():\n ########## Graphing Stacked\n fig, ax = plt.subplots()\n ax.set_title(title + \" Granular Throughput\")\n # ax.plot(foreign[yCol], foreign[\"ADJ_Duration\"], \"b.\", label=\"foreign\")\n # ax.plot(selfUp[yCol], selfUp[\"ADJ_Duration\"], \"r.\", label=\"selfUP\")\n # ax.plot(selfDown[yCol], selfDown[\"ADJ_Duration\"], \"c.\", label=\"selfDOWN\")\n # ax.plot(foreign[yCol], foreign[\"DurationMA5\"], \"b--\", label=\"foreignMA\")\n # ax.plot(selfUp[yCol], selfUp[\"DurationMA5\"], \"r--\", label=\"selfUPMA\")\n # ax.plot(selfDown[yCol], selfDown[\"DurationMA5\"], \"c--\", label=\"selfDOWNMA\")\n # ax.set_ylim([0, max(foreign[\"ADJ_Duration\"].max(), self[\"ADJ_Duration\"].max())])\n # ax.legend(loc=\"upper left\")\n\n secax = ax.twinx()\n secax.plot(download[yCol], download[\"ADJ_Throughput\"], \"g-\", label=\"download (MB/s)\")\n secax.plot(upload[yCol], upload[\"ADJ_Throughput\"], \"y-\", label=\"upload (MB/s)\")\n\n granularDown[\"bucket\"] = granularDown[\"SecondsSinceStart\"].round(0)\n buckets = pd.DataFrame(granularDown[\"bucket\"].unique())\n buckets.columns = [\"bucket\"]\n buckets = buckets.set_index(\"bucket\")\n buckets[\"SecondsSinceStart\"] = granularDown.drop_duplicates(subset=[\"bucket\"]).reset_index()[\"SecondsSinceStart\"]\n buckets[\"bottom\"] = 0\n for id in sorted(granularDown[\"ID\"].unique()):\n secax.bar(granularDown[yCol][granularDown[\"ID\"] == id] + .05,\n granularDown[\"ADJ_Throughput\"][granularDown[\"ID\"] == id],\n width=.09, bottom=buckets.iloc[len(buckets) - len(granularDown[granularDown[\"ID\"] == id]):][\"bottom\"]\n )\n # ,label=f\"Download Connection {id}\")\n buckets[\"toadd_bottom\"] = (granularDown[granularDown[\"ID\"] == id]).set_index(\"bucket\")[\"ADJ_Throughput\"]\n buckets[\"toadd_bottom\"] = buckets[\"toadd_bottom\"].fillna(0)\n buckets[\"bottom\"] += buckets[\"toadd_bottom\"]\n\n\n granularUp[\"bucket\"] = granularUp[\"SecondsSinceStart\"].round(0)\n buckets = pd.DataFrame(granularUp[\"bucket\"].unique())\n buckets.columns = [\"bucket\"]\n buckets = buckets.set_index(\"bucket\")\n buckets[\"SecondsSinceStart\"] = granularUp.drop_duplicates(subset=[\"bucket\"]).reset_index()[\"SecondsSinceStart\"]\n buckets[\"bottom\"] = 0\n for id in sorted(granularUp[\"ID\"].unique()):\n secax.bar(granularUp[yCol][granularUp[\"ID\"] == id] - .05, granularUp[\"ADJ_Throughput\"][granularUp[\"ID\"] == id],\n width=.09, bottom=buckets.iloc[len(buckets) - len(granularUp[granularUp[\"ID\"] == id]):][\"bottom\"]\n )\n #,label=f\"Upload Connection {id}\")\n buckets[\"toadd_bottom\"] = (granularUp[granularUp[\"ID\"] == id]).set_index(\"bucket\")[\"ADJ_Throughput\"]\n buckets[\"toadd_bottom\"] = buckets[\"toadd_bottom\"].fillna(0)\n buckets[\"bottom\"] += buckets[\"toadd_bottom\"]\n secax.legend(loc=\"upper right\")\n\n\n secax.legend(loc=\"upper left\")\n\n #StackedThroughput()\n stacked_bar_throughput(upload, granularUp, \"SecondsSinceStart\", \"ADJ_Throughput\", title + \" Upload Stacked\",\n \"Upload Throughput MB/s\")\n stacked_bar_throughput(download, granularDown, \"SecondsSinceStart\", \"ADJ_Throughput\", title + \" Download Stacked\",\n \"Download Throughput MB/s\")\n\n def Percent90():\n ######### Graphing Removing 90th Percentile\n nonlocal selfUp\n nonlocal selfDown\n nonlocal foreign\n selfUp = make90Percentile(selfUp)\n selfDown = make90Percentile(selfDown)\n foreign = make90Percentile(foreign)\n\n # Recalculate MA\n foreign[\"DurationMA5\"] = foreign[\"ADJ_Duration\"].rolling(window=5).mean()\n selfUp[\"DurationMA5\"] = selfUp[\"ADJ_Duration\"].rolling(window=5).mean()\n selfDown[\"DurationMA5\"] = selfDown[\"ADJ_Duration\"].rolling(window=5).mean()\n\n # Graphing Complete\n fig, ax = plt.subplots()\n ax.set_title(title + \" 90th Percentile (ordered lowest to highest duration)\")\n # ax.plot(foreign[yCol], foreign[\"ADJ_Duration\"], \"b.\", label=\"foreign\")\n # ax.plot(selfUp[yCol], selfUp[\"ADJ_Duration\"], \"r.\", label=\"selfUP\")\n # ax.plot(selfDown[yCol], selfDown[\"ADJ_Duration\"], \"c.\", label=\"selfDOWN\")\n ax.plot(foreign[yCol], foreign[\"DurationMA5\"], \"b--\", label=\"foreignMA\")\n ax.plot(selfUp[yCol], selfUp[\"DurationMA5\"], \"r--\", label=\"selfUPMA\")\n ax.plot(selfDown[yCol], selfDown[\"DurationMA5\"], \"c--\", label=\"selfDOWNMA\")\n ax.set_ylim([0, max(foreign[\"ADJ_Duration\"].max(), selfUp[\"ADJ_Duration\"].max(), selfDown[\"ADJ_Duration\"].max())])\n ax.legend(loc=\"upper left\")\n\n secax = ax.twinx()\n secax.plot(download[yCol], download[\"ADJ_Throughput\"], \"g-\", label=\"download (MB/s)\")\n secax.plot(granularDown[granularDown[\"ID\"] == 0][yCol], granularDown[granularDown[\"ID\"] == 0][\"ADJ_Throughput\"],\n \"g--\", label=\"Download Connection 0 (MB/S)\")\n secax.plot(upload[yCol], upload[\"ADJ_Throughput\"], \"y-\", label=\"upload (MB/s)\")\n secax.plot(granularUp[granularUp[\"ID\"] == 0][yCol], granularUp[granularUp[\"ID\"] == 0][\"ADJ_Throughput\"], \"y--\",\n label=\"Upload Connection 0 (MB/S)\")\n secax.legend(loc=\"upper right\")\n\n Percent90()\n\ndef stacked_bar_throughput(df, granular, xcolumn, ycolumn, title, label):\n fig, ax = plt.subplots()\n ax.set_title(title)\n\n secax = ax.twinx()\n ax.get_yaxis().set_visible(False)\n ax.set_xlabel(\"Seconds Since Start (s)\")\n secax.set_ylabel(\"Throughput (MB/s)\")\n # secax.set_xticks(range(0, round(granular[xcolumn].max()) + 1)) # Ticks every 1 second\n\n # Plot Main Throughput\n secax.plot(df[xcolumn], df[ycolumn], \"k--\", label=label)\n\n df_gran = granular.copy()\n # df_gran[\"bucket\"] = df_gran[xcolumn].round(0) # With rounding\n df_gran[\"bucket\"] = df_gran[xcolumn] # Without rounding (csv creation time points need to be aligned)\n buckets = pd.DataFrame(df_gran[\"bucket\"].unique())\n buckets.columns = [\"bucket\"]\n buckets = buckets.set_index(\"bucket\")\n buckets[xcolumn] = df_gran.drop_duplicates(subset=[\"bucket\"]).reset_index()[xcolumn]\n buckets[\"bottom\"] = 0\n for id in sorted(df_gran[\"ID\"].unique()):\n secax.bar(df_gran[xcolumn][df_gran[\"ID\"] == id],\n df_gran[ycolumn][df_gran[\"ID\"] == id],\n width=.25, bottom=buckets.iloc[len(buckets) - len(df_gran[df_gran[\"ID\"] == id]):][\"bottom\"]\n )\n # ,label=f\"Download Connection {id}\")\n buckets[\"toadd_bottom\"] = (df_gran[df_gran[\"ID\"] == id]).set_index(\"bucket\")[ycolumn]\n buckets[\"toadd_bottom\"] = buckets[\"toadd_bottom\"].fillna(0)\n buckets[\"bottom\"] += buckets[\"toadd_bottom\"]\n\n secax.legend(loc=\"upper right\")\n\n\ndef findFiles(dir):\n matches = {}\n\n files = os.listdir(dir)\n for file in files:\n if os.path.isfile(dir+file):\n for name in __FILENAMES__:\n regex = \"(?P.*)(?P\" + __FILENAMES__[name] + \")(?P.*)\"\n match = re.match(regex, file)\n if match is not None:\n start = match.group(\"start\")\n end = match.group(\"end\")\n if start not in matches:\n matches[start] = {}\n if end not in matches[start]:\n matches[start][end] = {}\n if name in matches[start][end]:\n print(\"ERROR ALREADY FOUND A FILE THAT HAS THE SAME MATCHING\")\n matches[start][end][name] = dir+file\n return matches\n\ndef generatePaths():\n return {\n \"foreign\": \"\",\n \"self\": \"\",\n \"download\": \"\",\n \"upload\": \"\",\n \"granular\": \"\",\n }\n\ndef makeGraphs(files):\n for start in files:\n x = 0\n for end in files[start]:\n # Check if it contains all file fields\n containsALL = True\n for key in __FILENAMES__:\n if key not in files[start][end]:\n containsALL = False\n # If we don't have all files then loop to next one\n if not containsALL:\n continue\n\n main(start + \" - \" + str(x), files[start][end])\n x += 1\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n paths = generatePaths()\n\n files = findFiles(\"./Data/WillTest/\")\n print(files)\n makeGraphs(files)\n\n plt.show()", "repo_name": "Schickendantzj/RPMGraph", "sub_path": "main_with_granular.py", "file_name": "main_with_granular.py", "file_ext": "py", "file_size_in_byte": 14336, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.Timedelta", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 171, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 265, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 286, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 291, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 337, "usage_type": "name"}]} +{"seq_id": "24262728067", "text": "import pdb\nfrom collections import defaultdict\n\nFNAME = \"in14.txt\"\n\ndata = open(FNAME).read().split('\\n\\n')\npdb.set_trace()\nbase = data[0]\nproduct = {}\nfor line in data[1].splitlines():\n a, b = line.split(\" -> \")\n product[a] = b\n\n# iterate the comopund forward one generation\n# e.g. HH -> N means HH -> HNH\ndef step(compound):\n out = ''\n for i in range(len(compound) - 1):\n out += compound[i] + product[compound[i:i+2]]\n out += compound[-1]\n return out\n \n# the original way for Part 1 - obsoleced but maintained for reference\n#compound = base\n#for _ in range(10):\n# compound = step(compound)\n#\n#counts = [(compound.count(i), i) for i in compound]\n#counts.sort()\n#print(\"Part 1:\", counts[-1][0] - counts[0][0])\n\n# return the count profile of a compound x steps from now\n# will use recursion with memoization\nmemo = {}\ndef profile(compound, steps):\n \n if (compound, steps) in memo:\n return memo[(compound, steps)]\n \n # large compounds can just be broken into small compounds and recombined\n if len(compound) > 2:\n counts = defaultdict(int)\n for i in range(len(compound) - 1):\n subcounts = profile(compound[i:i+2], steps)\n for k, v in subcounts.items():\n counts[k] += v\n for c in compound[1:-1]: # correct for double counting the overlap\n counts[c] -= 1\n return counts \n \n # other than at the top level, we can just deal with compounds of size 2\n assert len(compound) == 2\n \n # base cases\n if steps == 1:\n countdict = defaultdict(int)\n for c in step(compound):\n countdict[c] += 1\n memo[(compound, steps)] = countdict\n return countdict\n \n # for a new compound, iterate it forward a generation,\n # then find the counts of its children and recombine\n counts = defaultdict(int)\n nextgen = step(compound)\n left = nextgen[:2]\n right = nextgen[-2:]\n countsleft = profile(left, steps - 1)\n countsright = profile(right, steps - 1)\n for k, v in countsleft.items():\n counts[k] += v\n for k, v in countsright.items():\n counts[k] += v\n counts[nextgen[1]] -= 1 # correct for double counting the middle character\n memo[(compound, steps)] = counts # save it for later!\n return counts\n \ncounts = profile(base, 10)\nprint(\"Part 1:\", max(counts.values()) - min(counts.values()))\ncounts = profile(base, 40)\nprint(\"Part 2:\", max(counts.values()) - min(counts.values()))\n\n#pdb.set_trace()\n", "repo_name": "stoberc/aoc2021", "sub_path": "p14.py", "file_name": "p14.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pdb.set_trace", "line_number": 7, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 42, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 56, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "41236598388", "text": "import json\r\nimport logging\r\nimport os\r\nimport tempfile\r\nimport asyncio\r\nimport platform\r\nimport aiohttp\r\nfrom yarl import URL\r\n\r\nimport pytest\r\nfrom aiohttp import ClientSession\r\nfrom injector import (ClassAssistedBuilder, Injector, Module, inject, provider,\r\n singleton)\r\n\r\nfrom backup.config import Config, Setting\r\nfrom backup.model import Coordinator\r\nfrom dev.simulationserver import SimulationServer\r\nfrom backup.drive import DriveRequests, DriveSource, FolderFinder, AuthCodeQuery\r\nfrom backup.util import GlobalInfo, Estimator, Resolver, DataCache\r\nfrom backup.ha import HaRequests, HaSource, HaUpdater\r\nfrom backup.logger import reset\r\nfrom backup.model import DummyBackup, DestinationPrecache, Model\r\nfrom backup.time import Time\r\nfrom backup.module import BaseModule\r\nfrom backup.debugworker import DebugWorker\r\nfrom backup.creds import Creds\r\nfrom backup.server import ErrorStore\r\nfrom backup.ha import AddonStopper\r\nfrom backup.ui import UiServer\r\nfrom backup.watcher import Watcher\r\nfrom .faketime import FakeTime\r\nfrom .helpers import Uploader, createBackupTar\r\nfrom dev.ports import Ports\r\nfrom dev.simulated_google import SimulatedGoogle\r\nfrom dev.request_interceptor import RequestInterceptor\r\nfrom dev.simulated_supervisor import SimulatedSupervisor\r\n\r\n\r\n@singleton\r\nclass FsFaker():\r\n @inject\r\n def __init__(self):\r\n self.bytes_free = 1024 * 1024 * 1024\r\n self.bytes_total = 1024 * 1024 * 1024\r\n self.old_method = None\r\n\r\n def start(self):\r\n if platform.system() != \"Windows\":\r\n self.old_method = os.statvfs\r\n os.statvfs = self._hijack\r\n\r\n def stop(self):\r\n if platform.system() != \"Windows\":\r\n os.statvfs = self.old_method\r\n\r\n def _hijack(self, path):\r\n return os.statvfs_result((0, 1, int(self.bytes_total), int(self.bytes_free), int(self.bytes_free), 0, 0, 0, 0, 255))\r\n\r\n def setFreeBytes(self, bytes_free, bytes_total=1):\r\n self.bytes_free = bytes_free\r\n self.bytes_total = bytes_total\r\n if self.bytes_free > self.bytes_total:\r\n self.bytes_total = self.bytes_free\r\n\r\n\r\nclass ReaderHelper:\r\n def __init__(self, session, ui_port, ingress_port):\r\n self.session = session\r\n self.ui_port = ui_port\r\n self.ingress_port = ingress_port\r\n self.timeout = aiohttp.ClientTimeout(total=20)\r\n\r\n def getUrl(self, ingress=True, ssl=False):\r\n if ssl:\r\n protocol = \"https\"\r\n else:\r\n protocol = \"http\"\r\n if ingress:\r\n return protocol + \"://localhost:\" + str(self.ingress_port) + \"/\"\r\n else:\r\n return protocol + \"://localhost:\" + str(self.ui_port) + \"/\"\r\n\r\n async def getjson(self, path, status=200, json=None, auth=None, ingress=True, ssl=False, sslcontext=None):\r\n async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, ssl=sslcontext, timeout=self.timeout) as resp:\r\n assert resp.status == status\r\n return await resp.json()\r\n\r\n async def get(self, path, status=200, json=None, auth=None, ingress=True, ssl=False):\r\n async with self.session.get(self.getUrl(ingress, ssl) + path, json=json, auth=auth, timeout=self.timeout) as resp:\r\n if resp.status != status:\r\n import logging\r\n logging.getLogger().error(resp.text())\r\n assert resp.status == status\r\n return await resp.text()\r\n\r\n async def postjson(self, path, status=200, json=None, ingress=True):\r\n async with self.session.post(self.getUrl(ingress) + path, json=json, timeout=self.timeout) as resp:\r\n assert resp.status == status\r\n return await resp.json()\r\n\r\n async def assertError(self, path, error_type=\"generic_error\", status=500, ingress=True, json=None):\r\n logging.getLogger().info(\"Requesting \" + path)\r\n data = await self.getjson(path, status=status, ingress=ingress, json=json)\r\n assert data['error_type'] == error_type\r\n\r\n\r\n# This module should onyl ever have bindings that can also be satisfied by MainModule\r\nclass TestModule(Module):\r\n def __init__(self, config: Config, ports: Ports):\r\n self.ports = ports\r\n self.config = config\r\n\r\n @provider\r\n @singleton\r\n def getDriveCreds(self, time: Time) -> Creds:\r\n return Creds(time, \"test_client_id\", time.now(), \"test_access_token\", \"test_refresh_token\", \"test_client_secret\")\r\n\r\n @provider\r\n @singleton\r\n def getTime(self) -> Time:\r\n return FakeTime()\r\n\r\n @provider\r\n @singleton\r\n def getPorts(self) -> Ports:\r\n return self.ports\r\n\r\n @provider\r\n @singleton\r\n def getConfig(self) -> Config:\r\n return self.config\r\n\r\n\r\n@pytest.fixture\r\ndef event_loop():\r\n if platform.system() == \"Windows\":\r\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\r\n return asyncio.new_event_loop()\r\n\r\n\r\n@pytest.fixture\r\nasync def generate_config(server_url: URL, ports, cleandir):\r\n return Config.withOverrides({\r\n Setting.DRIVE_URL: str(server_url),\r\n Setting.SUPERVISOR_URL: str(server_url) + \"/\",\r\n Setting.AUTHORIZATION_HOST: str(server_url),\r\n Setting.TOKEN_SERVER_HOSTS: str(server_url),\r\n Setting.DRIVE_REFRESH_URL: str(server_url.with_path(\"/oauth2/v4/token\")),\r\n Setting.DRIVE_AUTHORIZE_URL: str(server_url.with_path(\"/o/oauth2/v2/auth\")),\r\n Setting.DRIVE_TOKEN_URL: str(server_url.with_path(\"/token\")),\r\n Setting.DRIVE_DEVICE_CODE_URL: str(server_url.with_path(\"/device/code\")),\r\n Setting.SUPERVISOR_TOKEN: \"test_header\",\r\n Setting.SECRETS_FILE_PATH: \"secrets.yaml\",\r\n Setting.CREDENTIALS_FILE_PATH: \"credentials.dat\",\r\n Setting.FOLDER_FILE_PATH: \"folder.dat\",\r\n Setting.RETAINED_FILE_PATH: \"retained.json\",\r\n Setting.ID_FILE_PATH: \"id.json\",\r\n Setting.DATA_CACHE_FILE_PATH: \"data_cache.json\",\r\n Setting.STOP_ADDON_STATE_PATH: \"stop_addon.json\",\r\n Setting.INGRESS_TOKEN_FILE_PATH: \"ingress.dat\",\r\n Setting.DEFAULT_DRIVE_CLIENT_ID: \"test_client_id\",\r\n Setting.DEFAULT_DRIVE_CLIENT_SECRET: \"test_client_secret\",\r\n Setting.BACKUP_DIRECTORY_PATH: os.path.join(cleandir, \"backups\"),\r\n Setting.PORT: ports.ui,\r\n Setting.INGRESS_PORT: ports.ingress,\r\n Setting.BACKUP_STARTUP_DELAY_MINUTES: 0,\r\n Setting.PING_TIMEOUT: 0.1,\r\n })\r\n\r\n\r\n@pytest.fixture\r\nasync def injector(cleandir, ports, generate_config):\r\n drive_creds = Creds(FakeTime(), \"test_client_id\", None, \"test_access_token\", \"test_refresh_token\")\r\n\r\n os.mkdir(os.path.join(cleandir, \"backups\"))\r\n with open(os.path.join(cleandir, \"secrets.yaml\"), \"w\") as f:\r\n f.write(\"for_unit_tests: \\\"password value\\\"\\n\")\r\n\r\n with open(os.path.join(cleandir, \"credentials.dat\"), \"w\") as f:\r\n f.write(json.dumps(drive_creds.serialize()))\r\n\r\n return Injector([BaseModule(), TestModule(generate_config, ports)])\r\n\r\n\r\n@pytest.fixture\r\nasync def ui_server(injector, server):\r\n os.mkdir(\"static\")\r\n server = injector.get(UiServer)\r\n await server.run()\r\n yield server\r\n await server.shutdown()\r\n\r\n\r\n@pytest.fixture\r\ndef reader(server, ui_server, session, ui_port, ingress_port):\r\n return ReaderHelper(session, ui_port, ingress_port)\r\n\r\n\r\n@pytest.fixture\r\nasync def uploader(injector: Injector, server_url):\r\n return injector.get(ClassAssistedBuilder[Uploader]).build(host=str(server_url))\r\n\r\n\r\n@pytest.fixture\r\nasync def google(injector: Injector):\r\n return injector.get(SimulatedGoogle)\r\n\r\n\r\n@pytest.fixture\r\nasync def interceptor(injector: Injector):\r\n return injector.get(RequestInterceptor)\r\n\r\n\r\n@pytest.fixture\r\nasync def supervisor(injector: Injector, server, session):\r\n return injector.get(SimulatedSupervisor)\r\n\r\n\r\n@pytest.fixture\r\nasync def addon_stopper(injector: Injector):\r\n return injector.get(AddonStopper)\r\n\r\n\r\n@pytest.fixture\r\nasync def server(injector, port, drive_creds: Creds, session):\r\n server = injector.get(SimulationServer)\r\n\r\n # start the server\r\n logging.getLogger().info(\"Starting SimulationServer on port \" + str(port))\r\n await server.start(port)\r\n yield server\r\n await server.stop()\r\n\r\n\r\n@pytest.fixture\r\nasync def data_cache(injector):\r\n return injector.get(DataCache)\r\n\r\n\r\n@pytest.fixture\r\nasync def session(injector):\r\n async with injector.get(ClientSession) as session:\r\n yield session\r\n\r\n\r\n@pytest.fixture\r\nasync def precache(injector):\r\n return injector.get(DestinationPrecache)\r\n\r\n\r\n@pytest.fixture\r\nasync def backup(coord, source, dest):\r\n await coord.sync()\r\n assert len(coord.backups()) == 1\r\n return coord.backups()[0]\r\n\r\n\r\n@pytest.fixture\r\nasync def fs(injector):\r\n faker = injector.get(FsFaker)\r\n faker.start()\r\n yield faker\r\n faker.stop()\r\n\r\n\r\n@pytest.fixture\r\nasync def estimator(injector, fs):\r\n return injector.get(Estimator)\r\n\r\n\r\n@pytest.fixture\r\nasync def device_code(injector):\r\n return injector.get(AuthCodeQuery)\r\n\r\n\r\n@pytest.fixture\r\nasync def error_store(injector):\r\n return injector.get(ErrorStore)\r\n\r\n\r\n@pytest.fixture\r\nasync def model(injector):\r\n return injector.get(Model)\r\n\r\n\r\n@pytest.fixture\r\nasync def global_info(injector):\r\n return injector.get(GlobalInfo)\r\n\r\n\r\n@pytest.fixture\r\nasync def server_url(port):\r\n return URL(\"http://localhost:\").with_port(port)\r\n\r\n\r\n@pytest.fixture\r\nasync def ports(unused_tcp_port_factory):\r\n return Ports(unused_tcp_port_factory(), unused_tcp_port_factory(), unused_tcp_port_factory())\r\n\r\n\r\n@pytest.fixture\r\nasync def port(ports: Ports):\r\n return ports.server\r\n\r\n\r\n@pytest.fixture\r\nasync def ui_url(ports: Ports):\r\n return URL(\"http://localhost\").with_port(ports.ingress)\r\n\r\n\r\n@pytest.fixture\r\nasync def ui_port(ports: Ports):\r\n return ports.ui\r\n\r\n\r\n@pytest.fixture\r\nasync def ingress_port(ports: Ports):\r\n return ports.ingress\r\n\r\n\r\n@pytest.fixture\r\nasync def coord(injector):\r\n return injector.get(Coordinator)\r\n\r\n\r\n@pytest.fixture()\r\nasync def updater(injector):\r\n return injector.get(HaUpdater)\r\n\r\n\r\n@pytest.fixture()\r\nasync def cleandir():\r\n newpath = tempfile.mkdtemp()\r\n os.chdir(newpath)\r\n return newpath\r\n\r\n\r\n@pytest.fixture\r\nasync def time(injector):\r\n reset()\r\n return injector.get(Time)\r\n\r\n\r\n@pytest.fixture\r\nasync def config(injector):\r\n return injector.get(Config)\r\n\r\n\r\n@pytest.fixture\r\nasync def drive_creds(injector):\r\n return injector.get(Creds)\r\n\r\n\r\n@pytest.fixture\r\nasync def drive(injector, server, session):\r\n return injector.get(DriveSource)\r\n\r\n\r\n@pytest.fixture\r\nasync def ha(injector, server, session):\r\n return injector.get(HaSource)\r\n\r\n\r\n@pytest.fixture\r\nasync def ha_requests(injector, server):\r\n return injector.get(HaRequests)\r\n\r\n\r\n@pytest.fixture\r\nasync def drive_requests(injector, server):\r\n return injector.get(DriveRequests)\r\n\r\n\r\n@pytest.fixture\r\nasync def resolver(injector):\r\n return injector.get(Resolver)\r\n\r\n\r\n@pytest.fixture\r\nasync def client_identifier(injector):\r\n return injector.get(Config).clientIdentifier()\r\n\r\n\r\n@pytest.fixture\r\nasync def debug_worker(injector):\r\n return injector.get(DebugWorker)\r\n\r\n\r\n@pytest.fixture()\r\nasync def folder_finder(injector):\r\n return injector.get(FolderFinder)\r\n\r\n\r\n@pytest.fixture()\r\nasync def watcher(injector):\r\n watcher = injector.get(Watcher)\r\n yield watcher\r\n await watcher.stop()\r\n\r\n\r\nclass BackupHelper():\r\n def __init__(self, uploader, time):\r\n self.time = time\r\n self.uploader = uploader\r\n\r\n async def createFile(self, size=1024 * 1024 * 2, slug=\"testslug\", name=\"Test Name\"):\r\n from_backup: DummyBackup = DummyBackup(\r\n name, self.time.toUtc(self.time.local(1985, 12, 6)), \"fake source\", slug)\r\n data = await self.uploader.upload(createBackupTar(slug, name, self.time.now(), size))\r\n return from_backup, data\r\n\r\n\r\n@pytest.fixture\r\ndef backup_helper(uploader, time):\r\n return BackupHelper(uploader, time)\r\n", "repo_name": "sabeechen/hassio-google-drive-backup", "sub_path": "hassio-google-drive-backup/tests/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 12022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2613, "dataset": "github-code", "pt": "52", "api": [{"api_name": "injector.inject", "line_number": 41, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 48, "usage_type": "call"}, {"api_name": "os.statvfs", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.statvfs", "line_number": 50, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 53, "usage_type": "call"}, {"api_name": "os.statvfs", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.statvfs_result", "line_number": 57, "usage_type": "call"}, {"api_name": "injector.singleton", "line_number": 39, "usage_type": "name"}, {"api_name": "aiohttp.ClientTimeout", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 102, "usage_type": "call"}, {"api_name": "injector.Module", "line_number": 108, "usage_type": "name"}, {"api_name": "backup.config.Config", "line_number": 109, "usage_type": "name"}, {"api_name": "dev.ports.Ports", "line_number": 109, "usage_type": "name"}, {"api_name": "backup.time.Time", "line_number": 115, "usage_type": "name"}, {"api_name": "backup.creds.Creds", "line_number": 116, "usage_type": "call"}, {"api_name": "injector.provider", "line_number": 113, "usage_type": "name"}, {"api_name": "injector.singleton", "line_number": 114, "usage_type": "name"}, {"api_name": "backup.creds.Creds", "line_number": 115, "usage_type": "name"}, {"api_name": "faketime.FakeTime", "line_number": 121, "usage_type": "call"}, {"api_name": "injector.provider", "line_number": 118, "usage_type": "name"}, {"api_name": "injector.singleton", "line_number": 119, "usage_type": "name"}, {"api_name": "backup.time.Time", "line_number": 120, "usage_type": "name"}, {"api_name": "injector.provider", "line_number": 123, "usage_type": "name"}, {"api_name": "injector.singleton", "line_number": 124, "usage_type": "name"}, {"api_name": "dev.ports.Ports", "line_number": 125, "usage_type": "name"}, {"api_name": "injector.provider", "line_number": 128, "usage_type": "name"}, {"api_name": "injector.singleton", "line_number": 129, "usage_type": "name"}, {"api_name": "backup.config.Config", "line_number": 130, "usage_type": "name"}, {"api_name": "platform.system", "line_number": 136, "usage_type": "call"}, {"api_name": "asyncio.set_event_loop_policy", "line_number": 137, "usage_type": "call"}, {"api_name": "asyncio.WindowsSelectorEventLoopPolicy", "line_number": 137, "usage_type": "call"}, {"api_name": "asyncio.new_event_loop", "line_number": 138, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 134, "usage_type": "attribute"}, {"api_name": "yarl.URL", "line_number": 142, "usage_type": "name"}, {"api_name": "backup.config.Config.withOverrides", "line_number": 143, "usage_type": "call"}, {"api_name": "backup.config.Config", "line_number": 143, "usage_type": "name"}, {"api_name": "backup.config.Setting.DRIVE_URL", "line_number": 144, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 144, "usage_type": "name"}, {"api_name": "backup.config.Setting.SUPERVISOR_URL", "line_number": 145, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 145, "usage_type": "name"}, {"api_name": "backup.config.Setting.AUTHORIZATION_HOST", "line_number": 146, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 146, "usage_type": "name"}, {"api_name": "backup.config.Setting.TOKEN_SERVER_HOSTS", "line_number": 147, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 147, "usage_type": "name"}, {"api_name": "backup.config.Setting.DRIVE_REFRESH_URL", "line_number": 148, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 148, "usage_type": "name"}, {"api_name": "backup.config.Setting.DRIVE_AUTHORIZE_URL", "line_number": 149, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 149, "usage_type": "name"}, {"api_name": "backup.config.Setting.DRIVE_TOKEN_URL", "line_number": 150, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 150, "usage_type": "name"}, {"api_name": "backup.config.Setting.DRIVE_DEVICE_CODE_URL", "line_number": 151, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 151, "usage_type": "name"}, {"api_name": "backup.config.Setting.SUPERVISOR_TOKEN", "line_number": 152, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 152, "usage_type": "name"}, {"api_name": "backup.config.Setting.SECRETS_FILE_PATH", "line_number": 153, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 153, "usage_type": "name"}, {"api_name": "backup.config.Setting.CREDENTIALS_FILE_PATH", "line_number": 154, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 154, "usage_type": "name"}, {"api_name": "backup.config.Setting.FOLDER_FILE_PATH", "line_number": 155, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 155, "usage_type": "name"}, {"api_name": "backup.config.Setting.RETAINED_FILE_PATH", "line_number": 156, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 156, "usage_type": "name"}, {"api_name": "backup.config.Setting.ID_FILE_PATH", "line_number": 157, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 157, "usage_type": "name"}, {"api_name": "backup.config.Setting.DATA_CACHE_FILE_PATH", "line_number": 158, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 158, "usage_type": "name"}, {"api_name": "backup.config.Setting.STOP_ADDON_STATE_PATH", "line_number": 159, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 159, "usage_type": "name"}, {"api_name": "backup.config.Setting.INGRESS_TOKEN_FILE_PATH", "line_number": 160, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 160, "usage_type": "name"}, {"api_name": "backup.config.Setting.DEFAULT_DRIVE_CLIENT_ID", "line_number": 161, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 161, "usage_type": "name"}, {"api_name": "backup.config.Setting.DEFAULT_DRIVE_CLIENT_SECRET", "line_number": 162, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 162, "usage_type": "name"}, {"api_name": "backup.config.Setting.BACKUP_DIRECTORY_PATH", "line_number": 163, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 163, "usage_type": "name"}, {"api_name": "backup.config.Setting.PORT", "line_number": 164, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 164, "usage_type": "name"}, {"api_name": "backup.config.Setting.INGRESS_PORT", "line_number": 165, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 165, "usage_type": "name"}, {"api_name": "backup.config.Setting.BACKUP_STARTUP_DELAY_MINUTES", "line_number": 166, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 166, "usage_type": "name"}, {"api_name": "backup.config.Setting.PING_TIMEOUT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "backup.config.Setting", "line_number": 167, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 141, "usage_type": "attribute"}, {"api_name": "backup.creds.Creds", "line_number": 173, "usage_type": "call"}, {"api_name": "faketime.FakeTime", "line_number": 173, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 180, "usage_type": "call"}, {"api_name": "injector.Injector", "line_number": 182, "usage_type": "call"}, {"api_name": "backup.module.BaseModule", "line_number": 182, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 187, "usage_type": "call"}, {"api_name": "injector.get", "line_number": 188, "usage_type": "call"}, {"api_name": "backup.ui.UiServer", "line_number": 188, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 194, "usage_type": "attribute"}, {"api_name": "injector.Injector", "line_number": 200, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 201, "usage_type": "call"}, {"api_name": "injector.ClassAssistedBuilder", "line_number": 201, "usage_type": "name"}, {"api_name": "helpers.Uploader", "line_number": 201, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 199, "usage_type": "attribute"}, {"api_name": "injector.Injector", "line_number": 205, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 206, "usage_type": "call"}, {"api_name": "dev.simulated_google.SimulatedGoogle", "line_number": 206, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 204, "usage_type": "attribute"}, {"api_name": "injector.Injector", "line_number": 210, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 211, "usage_type": "call"}, {"api_name": "dev.request_interceptor.RequestInterceptor", "line_number": 211, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 209, "usage_type": "attribute"}, {"api_name": "injector.Injector", "line_number": 215, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 216, "usage_type": "call"}, {"api_name": "dev.simulated_supervisor.SimulatedSupervisor", "line_number": 216, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 214, "usage_type": "attribute"}, {"api_name": "injector.Injector", "line_number": 220, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 221, "usage_type": "call"}, {"api_name": "backup.ha.AddonStopper", "line_number": 221, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 219, "usage_type": "attribute"}, {"api_name": "backup.creds.Creds", "line_number": 225, "usage_type": "name"}, {"api_name": "injector.get", "line_number": 226, "usage_type": "call"}, {"api_name": "dev.simulationserver.SimulationServer", "line_number": 226, "usage_type": "argument"}, {"api_name": "logging.getLogger", "line_number": 229, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 224, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 237, "usage_type": "call"}, {"api_name": "backup.util.DataCache", "line_number": 237, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 235, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 242, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 242, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 240, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 248, "usage_type": "call"}, {"api_name": "backup.model.DestinationPrecache", "line_number": 248, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 246, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 251, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 260, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 258, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 268, "usage_type": "call"}, {"api_name": "backup.util.Estimator", "line_number": 268, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 266, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 273, "usage_type": "call"}, {"api_name": "backup.drive.AuthCodeQuery", "line_number": 273, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 271, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 278, "usage_type": "call"}, {"api_name": "backup.server.ErrorStore", "line_number": 278, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 276, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 283, "usage_type": "call"}, {"api_name": "backup.model.Model", "line_number": 283, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 281, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 288, "usage_type": "call"}, {"api_name": "backup.util.GlobalInfo", "line_number": 288, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 286, "usage_type": "attribute"}, {"api_name": "yarl.URL", "line_number": 293, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 291, "usage_type": "attribute"}, {"api_name": "dev.ports.Ports", "line_number": 298, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 296, "usage_type": "attribute"}, {"api_name": "dev.ports.Ports", "line_number": 302, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 301, "usage_type": "attribute"}, {"api_name": "dev.ports.Ports", "line_number": 307, "usage_type": "name"}, {"api_name": "yarl.URL", "line_number": 308, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 306, "usage_type": "attribute"}, {"api_name": "dev.ports.Ports", "line_number": 312, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 311, "usage_type": "attribute"}, {"api_name": "dev.ports.Ports", "line_number": 317, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 316, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 323, "usage_type": "call"}, {"api_name": "backup.model.Coordinator", "line_number": 323, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 321, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 328, "usage_type": "call"}, {"api_name": "backup.ha.HaUpdater", "line_number": 328, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 326, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 333, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 334, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 331, "usage_type": "call"}, {"api_name": "backup.logger.reset", "line_number": 340, "usage_type": "call"}, {"api_name": "injector.get", "line_number": 341, "usage_type": "call"}, {"api_name": "backup.time.Time", "line_number": 341, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 338, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 346, "usage_type": "call"}, {"api_name": "backup.config.Config", "line_number": 346, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 344, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 351, "usage_type": "call"}, {"api_name": "backup.creds.Creds", "line_number": 351, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 349, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 356, "usage_type": "call"}, {"api_name": "backup.drive.DriveSource", "line_number": 356, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 354, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 361, "usage_type": "call"}, {"api_name": "backup.ha.HaSource", "line_number": 361, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 359, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 366, "usage_type": "call"}, {"api_name": "backup.ha.HaRequests", "line_number": 366, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 364, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 371, "usage_type": "call"}, {"api_name": "backup.drive.DriveRequests", "line_number": 371, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 369, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 376, "usage_type": "call"}, {"api_name": "backup.util.Resolver", "line_number": 376, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 374, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 381, "usage_type": "call"}, {"api_name": "backup.config.Config", "line_number": 381, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 379, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 386, "usage_type": "call"}, {"api_name": "backup.debugworker.DebugWorker", "line_number": 386, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 384, "usage_type": "attribute"}, {"api_name": "injector.get", "line_number": 391, "usage_type": "call"}, {"api_name": "backup.drive.FolderFinder", "line_number": 391, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 389, "usage_type": "call"}, {"api_name": "injector.get", "line_number": 396, "usage_type": "call"}, {"api_name": "backup.watcher.Watcher", "line_number": 396, "usage_type": "argument"}, {"api_name": "pytest.fixture", "line_number": 394, "usage_type": "call"}, {"api_name": "backup.model.DummyBackup", "line_number": 407, "usage_type": "name"}, {"api_name": "helpers.createBackupTar", "line_number": 409, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 413, "usage_type": "attribute"}]} +{"seq_id": "40003553406", "text": "from django.shortcuts import render, redirect\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import User, Book, Author, Review\nimport bcrypt\n\n\n\n\ndef index(request):\n return render(request, 'login.html')\n\n\ndef new_user(request):\n errors = User.objects.new_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n else:\n name= request.POST['name']\n user_name= request.POST['user_name']\n email= request.POST['email']\n password= request.POST['password']\n pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()\n new_user = User.objects.create(name=name, user_name=user_name, email=email, password=pw_hash)\n request.session['user_id'] = new_user.id\n\n return redirect(f'/all_books/{new_user.id}')\n\n\n\ndef login(request):\n \n errors = User.objects.return_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/')\n login_user_list = User.objects.filter(email=request.POST['email']) \n logged_in_user = login_user_list[0]\n request.session['user_id'] = logged_in_user.id\n return redirect(f'/all_books/{logged_in_user.id}')\n\n\n\ndef all_books(request, user_id):\n if 'user_id' not in request.session:\n return redirect('/') \n \n last_three = Review.objects.all().order_by('-id')[:3]\n context = {\n 'user' : User.objects.get(id=request.session['user_id']),\n 'details' : Review.objects.all(),\n 'books' : Book.objects.all(),\n 'last_three' : last_three\n\n }\n return render(request, 'all_books.html', context)\n\n\n\ndef add_book(request):\n if 'user_id' not in request.session:\n return redirect('/')\n\n author = Author.objects.all()\n context={\n 'user' : User.objects.get(id=request.session['user_id']),\n 'author' : author\n }\n \n return render(request, 'add_book.html', context)\n\n\n\ndef process_new(request):\n\n errors = User.objects.book_validator(request.POST)\n if len(errors) > 0:\n for key, value in errors.items():\n messages.error(request, value)\n return redirect('/add_book')\n\n title = request.POST['title']\n review = request.POST['review']\n rating = request.POST['rating']\n user_id = request.session['user_id']\n \n if request.POST['new_auth'] == '':\n author = request.POST['author']\n author= Author.objects.get(id=author)\n this_book = Book.objects.create(title=title, author=author)\n else:\n added_auth = request.POST['new_auth']\n new_auth = Author.objects.create(name=added_auth)\n this_book = Book.objects.create(title=title, author=new_auth)\n\n this_book_id = this_book.id\n review= Review.objects.create(comment=review, rating=rating, user=User.objects.get(id=user_id), book=this_book)\n\n return redirect(f'/one_book/{this_book_id}')\n\n\n\ndef one_book(request, this_book_id):\n if 'user_id' not in request.session:\n return redirect('/') \n\n user = User.objects.get(id=request.session['user_id'])\n\n book = Book.objects.get(id=this_book_id)\n author = book.author.name\n review = book.books.all()\n\n context = {\n 'book' : book,\n 'author' : author,\n 'review' : review,\n 'user' : user,\n }\n return render(request, 'one_book.html', context)\n\n\n\ndef add_review(request):\n user = User.objects.get(id=request.session['user_id'])\n comment = request.POST['comment']\n rating = request.POST['rating']\n book_id = request.POST['book_id']\n\n book_updated = Book.objects.get(id=book_id)\n Review.objects.create(comment=comment, rating=rating, user=user, book=book_updated)\n\n return redirect(f'/one_book/{book_id}')\n\n\n\ndef user_page(request, user_id):\n if 'user_id' not in request.session:\n return redirect('/')\n\n user = User.objects.get(id=user_id)\n review_count = user.users.all()\n print(review_count)\n \n count = 0\n for x in review_count:\n if [x]:\n count = count + 1\n \n context={\n 'user' : user,\n 'books' :user.users.all(),\n 'count' : count\n }\n\n return render(request, 'user_page.html', context)\n\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/')", "repo_name": "R2DEV0/dojo_reads", "sub_path": "main_app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "models.User.objects.new_validator", "line_number": 15, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 15, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 19, "usage_type": "call"}, {"api_name": "bcrypt.hashpw", "line_number": 25, "usage_type": "call"}, {"api_name": "bcrypt.gensalt", "line_number": 25, "usage_type": "call"}, {"api_name": "models.User.objects.create", "line_number": 26, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "models.User.objects.return_validator", "line_number": 35, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 35, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "models.User.objects.filter", "line_number": 40, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 49, "usage_type": "call"}, {"api_name": "models.Review.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Review.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Review", "line_number": 51, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 53, "usage_type": "name"}, {"api_name": "models.Review.objects.all", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Review.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.Review", "line_number": 54, "usage_type": "name"}, {"api_name": "models.Book.objects.all", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 55, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Author.objects.all", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 67, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 69, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "models.User.objects.book_validator", "line_number": 79, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 79, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 82, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Author.objects.get", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 92, "usage_type": "name"}, {"api_name": "models.Book.objects.create", "line_number": 93, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 93, "usage_type": "name"}, {"api_name": "models.Author.objects.create", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Author.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "models.Author", "line_number": 96, "usage_type": "name"}, {"api_name": "models.Book.objects.create", "line_number": 97, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 97, "usage_type": "name"}, {"api_name": "models.Review.objects.create", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Review.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.Review", "line_number": 100, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 110, "usage_type": "name"}, {"api_name": "models.Book.objects.get", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 112, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 122, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 127, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 127, "usage_type": "name"}, {"api_name": "models.Book.objects.get", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Book.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "models.Book", "line_number": 132, "usage_type": "name"}, {"api_name": "models.Review.objects.create", "line_number": 133, "usage_type": "call"}, {"api_name": "models.Review.objects", "line_number": 133, "usage_type": "attribute"}, {"api_name": "models.Review", "line_number": 133, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 141, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 143, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 143, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 158, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "41643213411", "text": "\"\"\"This file contains utility classes for exploring complex instance graphs.\nThis is handy for deeply nested ``Stash`` instances.\n\n\"\"\"\n__author__ = 'Paul Landes'\n\nimport dataclasses\nfrom typing import Set, Type, Any\nimport logging\nimport sys\nimport collections\nfrom io import TextIOBase\nfrom zensols.config import ClassResolver, Writable\n\nlogger = logging.getLogger(__name__)\n\n\nclass ClassExplorer(Writable):\n \"\"\"A utility class that recursively reports class metadata in an object graph.\n\n \"\"\"\n ATTR_META_NAME = 'ATTR_EXP_META'\n \"\"\"The attribute name set on classes to find to report their fields. When the\n value of this is set as a class attribute, each of that object instances'\n members are pretty printed. The value is a tuple of string attribute\n names.\n\n \"\"\"\n\n def __init__(self, include_classes: Set[Type],\n exclude_classes: Set[Type] = None,\n indent: int = 4, attr_truncate_len: int = 80,\n include_dicts: bool = False,\n include_private: bool = False,\n dictify_dataclasses: bool = False):\n self.include_classes = include_classes\n if exclude_classes is None:\n self.exclude_classes = set()\n else:\n self.exclude_classes = exclude_classes\n self.indent = indent\n self.attr_truncate_len = attr_truncate_len\n self.include_dicts = include_dicts\n self.include_private = include_private\n self.dictify_dataclasses = dictify_dataclasses\n\n def get_metadata(self, inst: Any) -> dict:\n self.visited = set()\n try:\n include_classes = set(self.include_classes | set([inst.__class__]))\n meta = self._get_metadata(\n inst, tuple(include_classes), tuple(self.exclude_classes))\n finally:\n del self.visited\n return meta\n\n def _get_dict(self, inst: dict, include_classes: Set[Type],\n exclude_classes: Set[Type]) -> dict:\n oid = id(inst)\n if oid not in self.visited:\n children = []\n self.visited.add(oid)\n for k, v in inst.items():\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'desc {k} -> {type(v)}')\n v = self._get_metadata(v, include_classes, exclude_classes)\n if v is not None:\n children.append({'attr': k, 'child': v})\n return {'class_name': '', 'children': children}\n\n def _is_traversable(self, inst: Any, include_classes: Set[Type],\n exclude_classes: Set[Type]) -> bool:\n return isinstance(inst, include_classes) and \\\n not isinstance(inst, exclude_classes)\n\n def _get_metadata(self, inst: Any, include_classes: Set[Type],\n exclude_classes: Set[Type]) -> dict:\n oid = id(inst)\n if oid in self.visited:\n return None\n self.visited.add(oid)\n dat = None\n if self.include_dicts and isinstance(inst, dict):\n dat = self._get_dict(inst, include_classes, exclude_classes)\n elif self._is_traversable(inst, include_classes, exclude_classes):\n dat = collections.OrderedDict()\n cls = inst.__class__\n class_name = ClassResolver.full_classname(cls)\n children = []\n dat['class_name'] = class_name\n is_dataclass = self.dictify_dataclasses and \\\n dataclasses.is_dataclass(inst)\n has_attr_meta = hasattr(cls, self.ATTR_META_NAME)\n if hasattr(inst, 'name'):\n dat['name'] = getattr(inst, 'name')\n if has_attr_meta or is_dataclass:\n attrs = {}\n dat['attrs'] = attrs\n if not has_attr_meta and is_dataclass:\n try:\n attr_names = dataclasses.asdict(inst)\n except Exception as e:\n logger.info(\n f'can not get attr names for {type(inst)}: {e}')\n attr_names = ()\n elif has_attr_meta:\n attr_names = getattr(cls, self.ATTR_META_NAME)\n # TODO: skip attributes that will or have already been\n # traversed as a \"traversable\" object on a recursion\n for attr in attr_names:\n v = getattr(inst, attr)\n if isinstance(v, dict):\n v = self._get_dict(v, include_classes, exclude_classes)\n if v is not None:\n children.append({'attr': attr, 'child': v})\n else:\n attrs[attr] = v\n for attr in inst.__dir__():\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'desc meta: {type(inst)}.{attr}')\n if self.include_private or not attr.startswith('_'):\n try:\n child_inst = getattr(inst, attr)\n except Exception as e:\n msg = f'error: can not traverse attribute {attr}: {e}'\n logger.info(msg)\n child_inst = msg\n if isinstance(child_inst, dict):\n child = self._get_dict(\n child_inst, include_classes, exclude_classes)\n else:\n child = self._get_metadata(\n child_inst, include_classes, exclude_classes)\n if child is not None:\n children.append({'attr': attr, 'child': child})\n if len(children) > 0:\n dat['children'] = children\n return dat\n\n def write(self, inst: Any, depth: int = 0,\n writer: TextIOBase = sys.stdout):\n meta = self.get_metadata(inst)\n self._write(meta, depth, None, writer)\n\n def write_metadata(self, depth: int = 0,\n writer: TextIOBase = sys.stdout,\n metadata: dict = None):\n self._write(metadata, depth, None, writer)\n\n def _write(self, metadata: dict, depth: int, attr: str, writer):\n cn = f'{attr}: ' if attr is not None else ''\n name = f\" ({metadata['name']})\" if 'name' in metadata else ''\n sp = self._sp(depth)\n sp2 = self._sp(depth + 1)\n writer.write(f\"{sp}{cn}{metadata['class_name']}{name}\\n\")\n if 'attrs' in metadata:\n for k, v in metadata['attrs'].items():\n v = self._trunc(str(v), max_len=self.attr_truncate_len)\n writer.write(f'{sp2}{k}: {v}\\n')\n if 'children' in metadata:\n for c in metadata['children']:\n self._write(c['child'], depth + 1, c['attr'], writer)\n", "repo_name": "plandes/util", "sub_path": "src/python/zensols/config/meta.py", "file_name": "meta.py", "file_ext": "py", "file_size_in_byte": 6849, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "zensols.config.Writable", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 30, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 58, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 64, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 76, "usage_type": "name"}, {"api_name": "typing.Set", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 77, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 86, "usage_type": "call"}, {"api_name": "zensols.config.ClassResolver.full_classname", "line_number": 88, "usage_type": "call"}, {"api_name": "zensols.config.ClassResolver", "line_number": 88, "usage_type": "name"}, {"api_name": "dataclasses.is_dataclass", "line_number": 92, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 101, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 119, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 140, "usage_type": "name"}, {"api_name": "io.TextIOBase", "line_number": 141, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 141, "usage_type": "attribute"}, {"api_name": "io.TextIOBase", "line_number": 146, "usage_type": "name"}, {"api_name": "sys.stdout", "line_number": 146, "usage_type": "attribute"}]} +{"seq_id": "4146388725", "text": "import json\n\nfrom functions import dkufood\nfrom .loadresmsg import restList, keyboard\n\n\n# 요청한 식당의 식단 메뉴를 가져옵니다..\ndef getRestMenu(topic):\n \"\"\"\n 카카오톡에서 사용자의 요청을 받아 원하는 식당 정보를 받고,\n 메시지로 반환하는 함수\n\n :type topic: string\n :param topic: 메뉴 이름\n :return: 카카오톡에 일반 메시지로 반환\n \"\"\"\n\n with open('data/restMenu.json') as msg_File:\n data = json.load(msg_File)\n\n if topic == \"처음으로..\":\n data[topic]['keyboard'] = keyboard\n return data[topic]\n\n _request = {\n \"location\": {\n \"campus\": topic[1:3],\n \"restaurant\": topic[4:]\n }\n }\n\n result = dkufood.requestFoodMenu(_request)\n\n data[topic]['message']['text'] = topic + \"\\n\\n\" + result['message']\n data[topic]['keyboard'] = restList\n\n return data[topic]", "repo_name": "NEONKID/DLUGBot", "sub_path": "messages/foodmessage.py", "file_name": "foodmessage.py", "file_ext": "py", "file_size_in_byte": 922, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "loadresmsg.keyboard", "line_number": 22, "usage_type": "name"}, {"api_name": "functions.dkufood.requestFoodMenu", "line_number": 32, "usage_type": "call"}, {"api_name": "functions.dkufood", "line_number": 32, "usage_type": "name"}, {"api_name": "loadresmsg.restList", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "19032878395", "text": "# -*- coding: utf-8 -*-\nfrom odoo import api, models, fields, _\nfrom odoo.exceptions import UserError\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass Tracking(models.Model):\n _inherit = ['mail.thread', 'mail.activity.mixin']\n _name = 'lgps.tracking'\n\n name = fields.Char(\n required=True,\n string=_(\"Tracking Id\"),\n default=\"Autogenerated on Save\"\n )\n\n client_id = fields.Many2one(\n comodel_name=\"res.partner\",\n string=_(\"Client Account\"),\n domain=[\n ('customer', '=', True),\n ('active', '=', True),\n ('is_company', '=', True)\n ],\n required=True,\n )\n\n gpsdevice_id = fields.Many2one(\n comodel_name=\"lgps.gpsdevice\",\n string=_(\"Gps Device\"),\n index=True,\n required=True,\n )\n\n applicant = fields.Char(\n string=_(\"Applicant\"),\n )\n\n state = fields.Selection(\n [\n ('registered', _('Registrado')),\n ('active', _('Activo')),\n ('paused', _('Detenido')),\n ('finished', _('Finalizado')),\n ('billed', _('Facturado')),\n ('cancelled', _('Cancelled')),\n ],\n string=_(\"State\"),\n default=\"registered\",\n track_visibility='onchange',\n required=True,\n )\n\n category = fields.Selection(\n [\n ('event', _('Per Event')),\n ('permanent', _('Permanent')),\n ('uninterrupted ', _('Uninterrupted')),\n ],\n string=_(\"Category\"),\n default=\"event\",\n required=True,\n index=True,\n )\n\n driver = fields.Char(\n string=_(\"Driver\"),\n )\n\n phone = fields.Text(\n string=_(\"Phones\"),\n )\n\n notifications = fields.Char(\n string=_(\"Email Notifications\"),\n )\n\n notify = fields.Boolean(\n string=_(\"Notify Client\"),\n default=False\n )\n\n initial_date = fields.Datetime(\n default=fields.Datetime.now,\n string=_(\"Initial Date\"),\n )\n\n final_date = fields.Datetime(\n string=_(\"Final Date\"),\n )\n\n origin = fields.Text(\n string=_(\"Origin\"),\n required=True,\n )\n\n destination = fields.Text(\n string=_(\"Destination\"),\n required=True,\n )\n\n route = fields.Text(\n string=_(\"Route\"),\n )\n\n observations = fields.Text(\n string=_(\"Observations\"),\n )\n\n emails = fields.Text(\n string=_(\"Additional Emails\"),\n )\n\n start_date = fields.Datetime(\n string=_(\"Activity Started at\"),\n readonly=True\n )\n\n end_date = fields.Datetime(\n string=_(\"Activity finished at\"),\n readonly=True\n )\n\n tracking_log_ids = fields.One2many(\n comodel_name=\"lgps.tracking_logs\",\n inverse_name=\"tracking_id\",\n string=_(\"Logs\"),\n )\n\n freight_weight = fields.Float(\n digits=(10, 1),\n string=_(\"Load Weight\"),\n help=_(\"Freight weight in Kilos\")\n )\n\n freight_dimensions = fields.Float(\n digits=(10, 1),\n string=_(\"Freight dimensions\"),\n help=_(\"Freight dimensions in meters\")\n )\n\n freight_cost = fields.Float(\n digits=(10, 2),\n string=_(\"Freight cost\"),\n help=_(\"Freight costs\")\n )\n\n comment_period = fields.Integer(\n string=_('Comment Period'),\n default=0,\n )\n\n active = fields.Boolean(default=True)\n\n @api.model\n def create(self, vals):\n seq = self.env['ir.sequence'].next_by_code('lgps.tracking') or '/'\n vals['name'] = seq\n return super(Tracking, self).create(vals)\n\n def button_do_active(self):\n for tracking in self:\n # tracking.state = 'active'\n _logger.warning('self %', self)\n user = self.env['res.users'].search([('name', '=', self.env.user.name)])\n _logger.warning('user: %', user)\n # _logger.warning('user name: %', user.name)\n # _logger.warning('user id: %', user.id)\n # log_object = self.env['lgps.tracking_logs']\n # user.id\n # employee_id = self.env[''].search()\n raise UserError('Stop execution')\n\n # dictionary = {\n # 'name': 'Automatic Generated',\n # 'comment': 'Monitoreo Iniciado',\n # 'comment_date': fields.Datetime.now,\n # 'employee_id': device.client_id.id,\n # 'email_sent': device.id,\n # 'tracking_id': self.destination_gpsdevice_ids.id,\n # 'vehicle_location': self.operation_mode,\n # }\n # device_log = log_object.create(dictionary)\n\n return True\n\n def button_do_pause(self):\n for tracking in self:\n tracking.state = 'paused'\n return True\n\n def button_do_finish(self):\n for tracking in self:\n tracking.state = 'finished'\n return True\n\n def button_do_cancel(self):\n for tracking in self:\n tracking.state = 'cancelled'\n tracking.active = False\n return True\n", "repo_name": "intralix/lgps", "sub_path": "lgps/models/tracking.py", "file_name": "tracking.py", "file_ext": "py", "file_size_in_byte": 5066, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 8, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 12, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.fields.Many2one", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields.Many2one", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.fields.Char", "line_number": 36, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 36, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 37, "usage_type": "call"}, {"api_name": "odoo.fields.Selection", "line_number": 40, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 42, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 43, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 44, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 45, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 46, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 47, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 49, "usage_type": "call"}, {"api_name": "odoo.fields.Selection", "line_number": 55, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 55, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 57, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 58, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 59, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 61, "usage_type": "call"}, {"api_name": "odoo.fields.Char", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 67, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 68, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 71, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 71, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 72, "usage_type": "call"}, {"api_name": "odoo.fields.Char", "line_number": 75, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 75, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 76, "usage_type": "call"}, {"api_name": "odoo.fields.Boolean", "line_number": 79, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 79, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 80, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 84, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 84, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 85, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 86, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 89, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 89, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 90, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 93, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 93, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 94, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 98, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 98, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 99, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 103, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 103, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 104, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 107, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 107, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 108, "usage_type": "call"}, {"api_name": "odoo.fields.Text", "line_number": 111, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 111, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 112, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 115, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 115, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 116, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 120, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 120, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 121, "usage_type": "call"}, {"api_name": "odoo.fields.One2many", "line_number": 125, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 125, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 128, "usage_type": "call"}, {"api_name": "odoo.fields.Float", "line_number": 131, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 131, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 133, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 134, "usage_type": "call"}, {"api_name": "odoo.fields.Float", "line_number": 137, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 137, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 139, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 140, "usage_type": "call"}, {"api_name": "odoo.fields.Float", "line_number": 143, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 143, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 145, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 146, "usage_type": "call"}, {"api_name": "odoo.fields.Integer", "line_number": 149, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 149, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 150, "usage_type": "call"}, {"api_name": "odoo.fields.Boolean", "line_number": 154, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 154, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 156, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 156, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "70051688165", "text": "import random\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Q\n\nUser = settings.AUTH_USER_MODEL\n\nclass TweetLike(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n tweet = models.ForeignKey(\"Tweet\", on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\nclass TweetQuerySet(models.QuerySet):\n def by_username(self, username):\n return self.filter(user__username__iexact=username)\n\n def feed(self, user):\n profiles_exist = user.following.exists()\n followed_users_id = []\n if profiles_exist:\n followed_users_id = user.following.values_list(\"user__id\", flat=True) # [x.user.id for x in profiles]\n return self.filter(\n Q(user__id__in=followed_users_id) |\n Q(user=user)\n ).distinct().order_by(\"-timestamp\")\n\nclass TweetManager(models.Manager):\n def get_queryset(self, *args, **kwargs):\n return TweetQuerySet(self.model, using=self._db)\n\n def feed(self, user):\n return self.get_queryset().feed(user)\n\nclass Tweet(models.Model):\n # Maps to SQL data\n # id = models.AutoField(primary_key=True)\n parent = models.ForeignKey(\"self\", null=True, on_delete=models.SET_NULL)\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"tweets\") # many users can many tweets\n likes = models.ManyToManyField(User, related_name='tweet_user', blank=True, through=TweetLike)\n content = models.TextField(blank=True, null=True)\n image = models.FileField(upload_to='images/', blank=True, null=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n objects = TweetManager()\n # def __str__(self):\n # return self.content\n \n class Meta:\n ordering = ['-id']\n \n @property\n def is_retweet(self):\n return self.parent != None\n \n def serialize(self):\n '''\n Feel free to delete!\n '''\n return {\n \"id\": self.id,\n \"content\": self.content,\n \"likes\": random.randint(0, 200)\n }", "repo_name": "codingforentrepreneurs/Tweetme-2", "sub_path": "tweets/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2094, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 401, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.QuerySet", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models.Manager", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.FileField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "19098417475", "text": "from decimal import Decimal\n\nfrom django.db import transaction\nfrom rest_framework import serializers\n\nfrom app.models import Transaction, Wallet\n\n\nclass WalletSerializer(serializers.ModelSerializer):\n class Meta:\n model = Wallet\n fields = ['id', 'label', 'balance']\n read_only_fields = ['balance']\n\n\nclass TransactionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Transaction\n fields = ['id', 'wallet_id', 'txid', 'amount']\n\n def create(self, validated_data):\n with transaction.atomic():\n instance = super().create(validated_data)\n self._add_new_transaction_to_wallet(\n wallet_id=instance.wallet_id.id,\n amount=instance.amount\n )\n return instance\n\n def update(self, instance, validated_data):\n with transaction.atomic():\n old_amount = instance.amount\n instance = super().update(instance, validated_data)\n self._update_transaction_in_wallet(\n wallet_id=instance.wallet_id.id,\n old_amount=old_amount,\n new_amount=instance.amount,\n )\n\n return instance\n\n def _update_transaction_in_wallet(\n self, wallet_id: str, old_amount: Decimal, new_amount: Decimal\n ):\n wallet = Wallet.objects.select_for_update().get(pk=wallet_id)\n balance = wallet.balance or 0\n wallet.balance = balance - old_amount + new_amount\n wallet.save()\n\n def _add_new_transaction_to_wallet(self, wallet_id: str, amount: Decimal):\n wallet = Wallet.objects.select_for_update().get(pk=wallet_id)\n balance = wallet.balance or 0\n wallet.balance = balance + amount\n wallet.save()\n", "repo_name": "Mexamos/b2broker-test-task", "sub_path": "b2broker/app/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1752, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "app.models.Wallet", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 16, "usage_type": "name"}, {"api_name": "app.models.Transaction", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 31, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 43, "usage_type": "name"}, {"api_name": "app.models.Wallet.objects.select_for_update", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.Wallet.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.models.Wallet", "line_number": 45, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 50, "usage_type": "name"}, {"api_name": "app.models.Wallet.objects.select_for_update", "line_number": 51, "usage_type": "call"}, {"api_name": "app.models.Wallet.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "app.models.Wallet", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "6719457168", "text": "import fileinput\nfrom typing import Generator, Iterable\n\n\ndef priority(item: str) -> int:\n order = ord(item)\n if order < ord(\"a\"):\n return order - ord(\"A\") + 27\n return order - ord(\"a\") + 1\n\n\ndef halves(line: str) -> tuple[str, str]:\n half = len(line) // 2\n return line[:half], line[half:]\n\n\ndef triplets(lines: list[str]) -> Generator[tuple[str, str, str], None, None]:\n for i in range(0, len(lines), 3):\n yield lines[i], lines[i + 1], lines[i + 2]\n\n\ndef itemize(group: Iterable[str]) -> tuple[str, ...]:\n return tuple(set(items) for items in group)\n\n\ndef common(*sacks: set[str]) -> str:\n sacks = iter(sacks)\n intersection = next(sacks)\n for sack in sacks:\n intersection &= sack\n return intersection.pop()\n\n\nif __name__ == \"__main__\":\n lines = [line.strip() for line in fileinput.input(\"-\")]\n compartment_items = map(itemize, (halves(line) for line in lines))\n group_items = map(itemize, (triplet for triplet in triplets(lines)))\n print(sum(priority(common(*items)) for items in compartment_items))\n print(sum(priority(common(*items)) for items in group_items))\n", "repo_name": "technomunk/advent-of-code", "sub_path": "2022/03_rucksack_reorganization.py", "file_name": "03_rucksack_reorganization.py", "file_ext": "py", "file_size_in_byte": 1133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "typing.Generator", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 22, "usage_type": "name"}, {"api_name": "fileinput.input", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "72844874404", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n box_width = 30\n focal_length = 1000\n principal_point = np.array((300.0, 300))\n distance = 100\n basic_box = box_width/2 * np.array(((1,1,1.0), (1,1,-1), (1,-1,1), (1,-1,-1), (-1,1,1),(-1,1,-1),(-1,-1,1), (-1,-1,-1)))\n box1 = np.array(((0,0,distance))) + basic_box\n c30 = np.cos(30/180*np.pi)\n s30 = np.sin(30/180*np.pi)\n R = np.array(((c30, 0, s30), (0,1,0), (-s30, 0, c30)))\n # box2 = np.matmul(R,box1.transpose()).transpose()\n # bbx1 = draw_box(box1, focal_length, principal_point)\n # bbx2 = draw_box(box2, focal_length, principal_point, 'g')\n # plt.plot((principal_point[0]), (principal_point[1]), 'rx')\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.xlim(0, 1300)\n # plt.ylim(0, 600)\n # plt.show()\n # draw_birds_eye(box1, 'b')\n # draw_birds_eye(box2, 'g')\n # draw_birds_camera()\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.xlim(np.min(box1[:, 0])-5, np.max(box2[:, 0])+5)\n # plt.ylim(-5, np.max(box1[:, 2])+5)\n # plt.show()\n\n # # crop box1\n # draw_box(box1, focal_length, principal_point, draw_bbx=False)\n # plt.xlim(bbx1[0], bbx1[1])\n # plt.ylim(bbx1[2], bbx1[3])\n # plt.show()\n # plt.gca().set_aspect('equal', adjustable='box')\n # draw_birds_eye(box1, 'b')\n # draw_birds_camera()\n # plt.show()\n\n # draw_birds_eye(box1, 'g')\n # plt.gca().set_aspect('equal', adjustable='box')\n # draw_birds_camera()\n # plt.show()\n\n # # crop box 2\n # draw_box(box2, focal_length, principal_point, 'g', draw_bbx=False)\n # plt.xlim(bbx2[0], bbx2[1])\n # plt.ylim(bbx2[2], bbx2[3])\n # plt.show()\n # # second case\n # rot_box = np.matmul(np.linalg.inv(R),basic_box.transpose()).transpose()\n # box3 = np.array(((0,0,distance))) + rot_box\n # box4 = np.matmul(R,box3.transpose()).transpose()\n # bbx3 = draw_box(box3, focal_length, principal_point)\n # bbx4 = draw_box(box4, focal_length, principal_point, 'g')\n # plt.plot((principal_point[0]), (principal_point[1]), 'rx')\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.xlim(0, 1300)\n # plt.ylim(0, 600)\n # plt.show()\n # draw_birds_eye(box3, 'b')\n # draw_birds_eye(box4, 'g')\n # draw_birds_camera()\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.xlim(np.min(box3[:, 0])-5, np.max(box4[:, 0])+5)\n # plt.ylim(-5, np.max(box3[:, 2])+5)\n # plt.show()\n # #crop box 3\n # draw_box(box3, focal_length, principal_point, 'g', draw_bbx=False)\n # plt.xlim(bbx3[0], bbx3[1])\n # plt.ylim(bbx3[2], bbx3[3])\n # plt.show()\n # draw_birds_eye(box3, 'b')\n # draw_birds_camera()\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.show()\n # # crop box 4\n # draw_box(box4, focal_length, principal_point, 'g', draw_bbx=False)\n # plt.xlim(bbx4[0], bbx4[1])\n # plt.ylim(bbx4[2], bbx4[3])\n # plt.show()\n # draw_birds_eye(box3, 'g')\n # draw_birds_camera()\n # plt.gca().set_aspect('equal', adjustable='box')\n # plt.show()\n\n # case 3\n skew_box = np.array(((1,1,1.0), (1,1,-1), (1,-1,1), (1,-1,-1), (-1,1,1),(-1,1,-1),(-1,-1,1), (-1,-1,-1)))\n skew_box[:,2] -= 0.6+skew_box[:,0]*0.4\n skew_box *= box_width/2\n\n box5 = np.array(((0,0,distance))) + skew_box\n box6 = np.matmul(R,box5.transpose()).transpose()\n bbx5 = draw_box(box5, focal_length, principal_point)\n bbx6 = draw_box(box6, focal_length, principal_point, 'g')\n plt.plot((principal_point[0]), (principal_point[1]), 'rx')\n plt.gca().set_aspect('equal', adjustable='box')\n plt.xlim(0, 1300)\n plt.ylim(0, 600)\n plt.show()\n draw_birds_eye(box5, 'b')\n draw_birds_eye(box6, 'g')\n draw_birds_camera()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.xlim(np.min(box5[:, 0])-5, np.max(box6[:, 0])+5)\n plt.ylim(-5, np.max(box5[:, 2])+5)\n plt.show()\n #crop box 5\n draw_box(box5, focal_length, principal_point, 'b', draw_bbx=False)\n plt.xlim(bbx5[0], bbx5[1])\n plt.ylim(bbx5[2], bbx5[3])\n plt.show()\n draw_birds_eye(box5, 'b')\n draw_birds_camera()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n # crop box 6\n draw_box(box6, focal_length, principal_point, 'g', draw_bbx=False)\n plt.xlim(bbx6[0], bbx6[1])\n plt.ylim(bbx6[2], bbx6[3])\n plt.show()\n draw_birds_eye(box5, 'g')\n draw_birds_camera()\n plt.gca().set_aspect('equal', adjustable='box')\n plt.show()\n \n\ndef draw_box(box, f, pp,c='b', draw_bbx=True):\n edges = ((0,1), (0,2), (0,4), (1,3), (1, 5), (2, 3), (2,6), (3, 7), (4,5), (4,6), (5, 7), (6,7))\n box_hom = box[:, :2]/box[:,2].reshape(-1,1)*f+pp\n for i, j in edges:\n plt.plot((box_hom[i,0], box_hom[j,0]), (box_hom[i,1], box_hom[j,1]), c)\n xmin = np.min(box_hom[:,0])-5\n xmax = np.max(box_hom[:,0])+5\n ymin = np.min(box_hom[:,1])-5\n ymax = np.max(box_hom[:,1])+5\n plt.plot((xmin, xmax, xmax, xmin, xmin), (ymin, ymin, ymax, ymax, ymin),'r')\n return (xmin, xmax, ymin, ymax)\n\ndef draw_birds_eye(box, c='r'):\n edges = ((0,1), (0,2), (0,4), (1,3), (1, 5), (2, 3), (2,6), (3, 7), (4,5), (4,6), (5, 7), (6,7))\n for i, j in edges:\n plt.plot((box[i,0], box[j,0]), (box[i,2], box[j,2]), c)\n\ndef draw_birds_camera():\n length = 10\n angle = 45\n ca = np.cos(angle*np.pi/180)\n sa = np.sin(angle*np.pi/180)\n plt.plot((-sa*length, 0, sa*length), (ca*length, 0, ca*length),'r')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "Davmo049/public_probabilistic_3d_regression", "sub_path": "scripts/visualize_normalized_camera.py", "file_name": "visualize_normalized_camera.py", "file_ext": "py", "file_size_in_byte": 5495, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "numpy.max", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 149, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}]} +{"seq_id": "10691365912", "text": "import argparse\nfrom sklearn.datasets import load_svmlight_file\n\ndefaultPath = \".\"\nCLI=argparse.ArgumentParser()\nCLI.add_argument(\n \"--data_path\",\n type=str,\n default=defaultPath\n)\n\nargs = CLI.parse_args()\n\nX,y = load_svmlight_file(args.data_path + \"/data/epsilon_normalized\")\n\n# Make the train-test split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\n# Convert to dense\nimport numpy as np\nX_train = np.array(X_train.todense())\nX_test = np.array(X_test.todense())\n\n# Write to binary numpy files\nnp.save(args.data_path + \"/data/epsilon.X_train\", X_train)\nnp.save(args.data_path + \"/data/epsilon.X_test\", X_test)\nnp.save(args.data_path + \"/data/epsilon.y_train\", y_train)\nnp.save(args.data_path + \"/data/epsilon.y_test\", y_test)\n", "repo_name": "choudary21/WML-CE-RAPIDS-PAI4SK", "sub_path": "preprocess-epsilon.py", "file_name": "preprocess-epsilon.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_svmlight_file", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "4298255365", "text": "import cv2\n\ncap = cv2.VideoCapture(\"C:\\\\vscode\\\\Teknofest\\\\klasor11\\\\car.mp4\")\n\n\ncircles = []\ndef mouse(event,x,y,lags,params): # event maosue haerketi x y oluşacak çemberin merkezi\n if event == cv2.EVENT_LBUTTONDOWN:\n circles.append((x,y))\n\ncv2.namedWindow(\"Frame\")\ncv2.setMouseCallback(\"Frame\",mouse) # frame üzerine yapılan işleri belirtir.\n\nwhile 1 :\n _,frame = cap.read()\n frame = cv2.resize(frame,(640,480))\n\n for center in circles:\n cv2.circle(frame,center,20,(255,0,0),-1)\n\n cv2.imshow(\"Frame\",frame)\n\n key = cv2.waitKey(1)\n\n if key == 27:\n break\n elif key == ord(\"h\"): # ekranı temizlememk için\n circles = []\n\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "Yusufygc/GoruntuIsleme", "sub_path": "klasor11/fare_kullanımı.py", "file_name": "fare_kullanımı.py", "file_ext": "py", "file_size_in_byte": 736, "program_lang": "python", "lang": "tr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.VideoCapture", "line_number": 3, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "27300749332", "text": "from time import time\nimport sklearn.ensemble as ske\nfrom clustering_cdhit import readJson\nimport numpy as np\nimport sys, os\nimport pickle\n \n\n\ndef train_model(numpy_matrix, region_type, samples_file, samples_sp2): \n \n samples_dic = readJson(samples_file)\n samplesnumber = len(samples_dic.keys())\n gene_matrix=np.load(numpy_matrix)\n\n Y = np.zeros((1,samplesnumber))\n Y[:,0:samples_sp2]=1\n print (Y)\n Y=Y.T\n print (Y.shape)\n\n t0 = time()\n model = ske.RandomForestRegressor(bootstrap=True, n_estimators=1000, n_jobs= 16, max_depth=None, min_samples_split=1.0, random_state=0)\n model.fit(gene_matrix, Y.ravel())\n print(\"analysis done in in %0.3fs.\" % (time() - t0)) \n\n try:\n os.stat(\"models\")\n except:\n os.mkdir(\"models\")\n\n # save the model to disk\n filename = 'models/finalized_model_%s.sav' %region_type\n pickle.dump(model, open(filename, 'wb'))\n\n\n\nif __name__ == \"__main__\":\n\n numpy_matrix = sys.argv[1]\n region_type = sys.argv[2]\n samples_file = sys.argv[3]\n samples_sp2 = sys.argv[4]\n\n train_model(numpy_matrix, region_type, samples_file, int(samples_sp2))\n #train_model('../matrix/0.7_PERC_gene.npy', 'gene', '../dics/samples.json', 2)", "repo_name": "ivyduk/identiclust-train", "sub_path": "scripts/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 1334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "clustering_cdhit.readJson", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 23, "usage_type": "name"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 28, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "8362756674", "text": "from rest_framework import serializers\nfrom .models import Book, Comment\n\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comment\n exclude = []\n\nclass BookSerializer(serializers.ModelSerializer):\n book_comments = CommentSerializer(many=True, read_only=True)\n status = serializers.CharField(required=False)\n checkOutBy = serializers.CharField(required=False)\n checkOutDate = serializers.DateTimeField(required=False, allow_null=True)\n addedDate = serializers.DateTimeField(required=False)\n class Meta:\n model = Book\n exclude = ['statusBeforeCheckout']\n", "repo_name": "macofiloteo/library-drf", "sub_path": "api/books/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "models.Comment", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.serializers.DateTimeField", "line_number": 15, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.serializers.DateTimeField", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Book", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "27614718057", "text": "import typing as t\n\nimport libtmux\nfrom libtmux._compat import LegacyVersion as Version\nfrom libtmux.common import get_version\n\nfrom .__about__ import __version__\nfrom .exc import TmuxpPluginException\n\n#: Minimum version of tmux required to run libtmux\nTMUX_MIN_VERSION = \"1.8\"\n\n#: Most recent version of tmux supported\nTMUX_MAX_VERSION = None\n\n#: Minimum version of libtmux required to run libtmux\nLIBTMUX_MIN_VERSION = \"0.8.3\"\n\n#: Most recent version of libtmux supported\nLIBTMUX_MAX_VERSION = None\n\n#: Minimum version of tmuxp required to use plugins\nTMUXP_MIN_VERSION = \"1.6.0\"\n\n#: Most recent version of tmuxp\nTMUXP_MAX_VERSION = None\n\n\nif t.TYPE_CHECKING:\n from libtmux.session import Session\n from libtmux.window import Window\n from typing_extensions import TypedDict, TypeGuard, Unpack\n\n from ._types import PluginConfigSchema\n\n class VersionConstraints(TypedDict):\n version: t.Union[Version, str]\n vmin: str\n vmax: t.Optional[str]\n incompatible: t.List[t.Union[t.Any, str]]\n\n class TmuxpPluginVersionConstraints(TypedDict):\n tmux: VersionConstraints\n tmuxp: VersionConstraints\n libtmux: VersionConstraints\n\n\nclass Config(t.TypedDict):\n plugin_name: str\n tmux_min_version: str\n tmux_max_version: t.Optional[str]\n tmux_version_incompatible: t.Optional[t.List[str]]\n libtmux_min_version: str\n libtmux_max_version: t.Optional[str]\n libtmux_version_incompatible: t.Optional[t.List[str]]\n tmuxp_min_version: str\n tmuxp_max_version: t.Optional[str]\n tmuxp_version_incompatible: t.Optional[t.List[str]]\n\n\nDEFAULT_CONFIG: \"Config\" = {\n \"plugin_name\": \"tmuxp-plugin\",\n \"tmux_min_version\": TMUX_MIN_VERSION,\n \"tmux_max_version\": TMUX_MAX_VERSION,\n \"tmux_version_incompatible\": None,\n \"libtmux_min_version\": LIBTMUX_MIN_VERSION,\n \"libtmux_max_version\": LIBTMUX_MAX_VERSION,\n \"libtmux_version_incompatible\": None,\n \"tmuxp_min_version\": TMUXP_MIN_VERSION,\n \"tmuxp_max_version\": TMUXP_MAX_VERSION,\n \"tmuxp_version_incompatible\": None,\n}\n\n\ndef validate_plugin_config(config: \"PluginConfigSchema\") -> \"TypeGuard[Config]\":\n return isinstance(config, dict)\n\n\ndef setup_plugin_config(\n config: \"PluginConfigSchema\", default_config: \"Config\" = DEFAULT_CONFIG\n) -> \"Config\":\n new_config = config.copy()\n for default_key, default_value in default_config.items():\n if default_key not in new_config:\n new_config[default_key] = default_value # type:ignore\n\n assert validate_plugin_config(new_config)\n\n return new_config\n\n\nclass TmuxpPlugin:\n def __init__(self, **kwargs: \"Unpack[PluginConfigSchema]\") -> None:\n \"\"\"\n Initialize plugin.\n\n The default version values are set to the versions that the plugin\n system requires.\n\n Parameters\n ----------\n plugin_name : str\n Name of the child plugin. Used in error message plugin fails to\n load\n\n tmux_min_version : str\n Min version of tmux that the plugin supports\n\n tmux_max_version : str\n Min version of tmux that the plugin supports\n\n tmux_version_incompatible : list\n Versions of tmux that are incompatible with the plugin\n\n libtmux_min_version : str\n Min version of libtmux that the plugin supports\n\n libtmux_max_version : str\n Max version of libtmux that the plugin supports\n\n libtmux_version_incompatible : list\n Versions of libtmux that are incompatible with the plugin\n\n tmuxp_min_version : str\n Min version of tmuxp that the plugin supports\n\n tmuxp_max_version : str\n Max version of tmuxp that the plugin supports\n\n tmuxp_version_incompatible : list\n Versions of tmuxp that are incompatible with the plugin\n\n \"\"\"\n config = setup_plugin_config(config=kwargs)\n self.plugin_name = config[\"plugin_name\"]\n\n # Dependency versions\n self.tmux_version = get_version()\n self.libtmux_version = libtmux.__about__.__version__\n self.tmuxp_version = Version(__version__)\n\n self.version_constraints: \"TmuxpPluginVersionConstraints\" = {\n \"tmux\": {\n \"version\": self.tmux_version,\n \"vmin\": config[\"tmux_min_version\"],\n \"vmax\": config[\"tmux_max_version\"],\n \"incompatible\": config[\"tmux_version_incompatible\"]\n if config[\"tmux_version_incompatible\"]\n else [],\n },\n \"libtmux\": {\n \"version\": self.libtmux_version,\n \"vmin\": config[\"libtmux_min_version\"],\n \"vmax\": config[\"libtmux_max_version\"],\n \"incompatible\": config[\"libtmux_version_incompatible\"]\n if config[\"libtmux_version_incompatible\"]\n else [],\n },\n \"tmuxp\": {\n \"version\": self.tmuxp_version,\n \"vmin\": config[\"tmuxp_min_version\"],\n \"vmax\": config[\"tmuxp_max_version\"],\n \"incompatible\": config[\"tmuxp_version_incompatible\"]\n if config[\"tmuxp_version_incompatible\"]\n else [],\n },\n }\n\n self._version_check()\n\n def _version_check(self) -> None:\n \"\"\"\n Check all dependency versions for compatibility.\n \"\"\"\n for dep, constraints in self.version_constraints.items():\n assert isinstance(constraints, dict)\n try:\n assert self._pass_version_check(**constraints)\n except AssertionError as e:\n raise TmuxpPluginException(\n \"Incompatible {dep} version: {version}\\n{plugin_name} \"\n \"requirements:\\nmin: {vmin} | max: {vmax} | \"\n \"incompatible: {incompatible}\\n\".format(\n dep=dep, plugin_name=self.plugin_name, **constraints\n )\n ) from e\n\n def _pass_version_check(\n self,\n version: t.Union[str, Version],\n vmin: str,\n vmax: t.Optional[str],\n incompatible: t.List[t.Union[t.Any, str]],\n ) -> bool:\n \"\"\"\n Provide affirmative if version compatibility is correct.\n \"\"\"\n if vmin and version < Version(vmin):\n return False\n if vmax and version > Version(vmax):\n return False\n if version in incompatible:\n return False\n\n return True\n\n def before_workspace_builder(self, session: \"Session\") -> None:\n \"\"\"\n Provide a session hook previous to creating the workspace.\n\n This runs after the session has been created but before any of\n the windows/panes/commands are entered.\n\n Parameters\n ----------\n session : :class:`libtmux.Session`\n session to hook into\n \"\"\"\n\n def on_window_create(self, window: \"Window\") -> None:\n \"\"\"\n Provide a window hook previous to doing anything with a window.\n\n This runs runs before anything is created in the windows, like panes.\n\n Parameters\n ----------\n window: :class:`libtmux.Window`\n window to hook into\n \"\"\"\n\n def after_window_finished(self, window: \"Window\") -> None:\n \"\"\"\n Provide a window hook after creating the window.\n\n This runs after everything has been created in the window, including\n the panes and all of the commands for the panes. It also runs after the\n ``options_after`` has been applied to the window.\n\n Parameters\n ----------\n window: :class:`libtmux.Window`\n window to hook into\n \"\"\"\n\n def before_script(self, session: \"Session\") -> None:\n \"\"\"\n Provide a session hook after the workspace has been built.\n\n This runs after the workspace has been loaded with ``tmuxp load``. It\n augments instead of replaces the ``before_script`` section of the\n workspace data.\n\n This hook provides access to the LibTmux.session object for any\n behavior that would be used in the ``before_script`` section of the\n workspace file that needs access directly to the session object.\n This runs after the workspace has been loaded with ``tmuxp load``.\n\n The hook augments, rather than replaces, the ``before_script`` section\n of the workspace. While it is possible to do all of the\n ``before_script`` workspace in this function, if a shell script\n is currently being used for the workspace, it would be cleaner to\n continue using the script in the ``before_section``.\n\n If changes to the session need to be made prior to\n anything being built, please use ``before_workspace_builder`` instead.\n\n Parameters\n ----------\n session : :class:`libtmux.Session`\n session to hook into\n \"\"\"\n\n def reattach(self, session: \"Session\") -> None:\n \"\"\"\n Provide a session hook before reattaching to the session.\n\n Parameters\n ----------\n session : :class:`libtmux.Session`\n session to hook into\n \"\"\"\n", "repo_name": "tmux-python/tmuxp", "sub_path": "src/tmuxp/plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 9217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3824, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 29, "usage_type": "attribute"}, {"api_name": "typing_extensions.TypedDict", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 37, "usage_type": "attribute"}, {"api_name": "libtmux._compat.LegacyVersion", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 40, "usage_type": "attribute"}, {"api_name": "typing_extensions.TypedDict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 48, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 51, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 52, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 52, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 55, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 55, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 58, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "attribute"}, {"api_name": "libtmux.common.get_version", "line_number": 138, "usage_type": "call"}, {"api_name": "libtmux.__about__", "line_number": 139, "usage_type": "attribute"}, {"api_name": "libtmux._compat.LegacyVersion", "line_number": 140, "usage_type": "call"}, {"api_name": "__about__.__version__", "line_number": 140, "usage_type": "argument"}, {"api_name": "exc.TmuxpPluginException", "line_number": 180, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 190, "usage_type": "attribute"}, {"api_name": "libtmux._compat.LegacyVersion", "line_number": 190, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 192, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 193, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 193, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 193, "usage_type": "attribute"}, {"api_name": "libtmux._compat.LegacyVersion", "line_number": 198, "usage_type": "call"}, {"api_name": "libtmux._compat.LegacyVersion", "line_number": 200, "usage_type": "call"}]} +{"seq_id": "25393305392", "text": "from datetime import datetime\n\nfrom celery import shared_task\nfrom celery.utils.log import get_task_logger\n\nfrom management.investiments.models import Gain, Investiment, InvestimentStatus\n\nlogger = get_task_logger(__name__)\n\n\n@shared_task\ndef generate_gains():\n # import ipdb; ipdb.set_trace()\n investiments = Investiment.objects.filter(status=InvestimentStatus.ACTIVE)\n\n for investiment in investiments:\n gain = investiment.gains.last()\n today = datetime.now()\n\n difference = 0\n if gain:\n difference = today.month - gain.created_at.month\n else:\n difference = today.month - investiment.created_at.month\n\n if difference:\n new_gain = Gain.objects.create(\n origin_investiment=investiment,\n amount=Gain.calculate_gain(investiment=investiment)\n )\n\n logger.info(f\"{investiment.id} generated new gain of {new_gain.amount}.\")\n", "repo_name": "ftbevi/backend-test", "sub_path": "management/investiments/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "celery.utils.log.get_task_logger", "line_number": 8, "usage_type": "call"}, {"api_name": "management.investiments.models.Investiment.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "management.investiments.models.Investiment.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "management.investiments.models.Investiment", "line_number": 14, "usage_type": "name"}, {"api_name": "management.investiments.models.InvestimentStatus.ACTIVE", "line_number": 14, "usage_type": "attribute"}, {"api_name": "management.investiments.models.InvestimentStatus", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "management.investiments.models.Gain.objects.create", "line_number": 27, "usage_type": "call"}, {"api_name": "management.investiments.models.Gain.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "management.investiments.models.Gain", "line_number": 27, "usage_type": "name"}, {"api_name": "management.investiments.models.Gain.calculate_gain", "line_number": 29, "usage_type": "call"}, {"api_name": "management.investiments.models.Gain", "line_number": 29, "usage_type": "name"}, {"api_name": "celery.shared_task", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "73056735204", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport math\nimport time\nimport json\n\nfrom config import NOTLOCAL\n\nEPSILON = 0.0000001\nLOGDIR = os.environ['OPENSHIFT_LOG_DIR']\nMAX_CHAR = 4000\nCHAR_RATIO = 0.6643902034970293\n\nif not NOTLOCAL:\n LOGDIR = os.path.join(LOGDIR, '../server/logs')\n\njswriteto = lambda s: s[:-4] + '.js'\n_curpath = os.path.normpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n\nclass SimpleLinearRegression:\n\n \"\"\" tool class as help for calculating a linear function \"\"\"\n\n def __init__(self, data):\n \"\"\" initializes members with defaults \"\"\"\n self.data = data # list of (x,y) pairs\n self.a = 0 # \"a\" of y = a + b*x\n self.b = 0 # \"b\" of y = a + b*x\n self.r = 0 # coefficient of correlation\n\n def run(self):\n \"\"\" calculates coefficient of correlation and\n the parameters for the linear function \"\"\"\n sumX, sumY, sumXY, sumXX, sumYY = 0, 0, 0, 0, 0\n n = float(len(self.data))\n\n for x, y in self.data:\n sumX += x\n sumY += y\n sumXY += x * y\n sumXX += x * x\n sumYY += y * y\n\n denominator = math.sqrt(\n (sumXX - 1 / n * sumX ** 2) * (sumYY - 1 / n * sumY ** 2))\n if denominator < EPSILON:\n return False\n\n # coefficient of correlation\n self.r = (sumXY - 1 / n * sumX * sumY)\n self.r /= denominator\n\n # is there no relationship between 'x' and 'y'?\n if abs(self.r) < EPSILON:\n return False\n\n # calculating 'a' and 'b' of y = a + b*x\n self.b = sumXY - sumX * sumY / n\n self.b /= (sumXX - sumX ** 2 / n)\n\n self.a = sumY - self.b * sumX\n self.a /= n\n return True\n\n def function(self, x):\n \"\"\" linear function (be aware of current\n coefficient of correlation \"\"\"\n return self.a + self.b * x\n\n def __repr__(self):\n \"\"\" current linear function for print \"\"\"\n return \"y = f(x) = %(a)f + %(b)f*x\" % self.__dict__\n\nre_log = re.compile(r'^(.+?),(.+?),(\\d+),(\\d+),(\\d+),(.+)$')\nre_js = re.compile(r'(var kc =) \\d+(, bc =) \\d+(, km =) \\d+(, bm =) \\d+')\n\nstriplines = lambda s: '\\n'.join(l.strip() for l in s.splitlines())\n\n\ndef parselog(fromtime):\n logfile = os.path.join(LOGDIR, 'mosesserver.log')\n fromtime = time.strptime(fromtime, '%Y-%m-%d %H:%M:%S')\n validlinec = []\n validlinem = []\n with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:\n for ln in f:\n l = ln.strip()\n m = re_log.match(l)\n if not m:\n continue\n try:\n if time.strptime(m.group(1), '%Y-%m-%d %H:%M:%S') < fromtime:\n continue\n realchar = int(m.group(5)) * int(m.group(3)) / float(m.group(4))\n usedtime = float(m.group(6))\n except Exception:\n continue\n if m.group(2) == 'c2m':\n validlinec.append((realchar, usedtime))\n else:\n validlinem.append((realchar, usedtime))\n lrc = SimpleLinearRegression(validlinec)\n lrm = SimpleLinearRegression(validlinem)\n kc, bc, km, bm = 28, 8883, 28, 8883\n if lrc.run():\n kc, bc = int(lrc.b * 1000), int(lrc.a * 1000)\n if lrm.run():\n km, bm = int(lrm.b * 1000), int(lrm.a * 1000)\n return kc, bc, km, bm\n\n# min: -24.219574739049666\n\njoinlist = lambda l: ''.join(chr(min(32 + int(-n * 3.9), 126))\n for n in l).replace('\\\\', '\\\\\\\\').replace('\"', r'\\\"')\n\n\ndef writejs(value, jsfile):\n writeto = jswriteto(jsfile)\n js_template = ('var zhcmodel = \"%s\";\\nvar zhmmodel = \"%s\";\\n'\n 'var zhclen = %s, zhmlen = %s;\\n%s')\n zhmodel = json.load(open(os.path.join(_curpath, 'modelzh.json'), 'r'))\n f = striplines(js_template % (\n joinlist(zhmodel['zhc']), joinlist(zhmodel['zhm']),\n int(MAX_CHAR * CHAR_RATIO), MAX_CHAR,\n re_js.sub(r'\\1 %s\\2 %s\\3 %s\\4 %s' % value, open(jsfile, 'r').read())\n )) + '\\n'\n with open(writeto, 'w') as w:\n w.write(f)\n\nif __name__ == '__main__':\n REPODIR = os.environ['OPENSHIFT_REPO_DIR']\n writejs(parselog('2020-01-01 00:00:00'),\n os.path.join(REPODIR, 'diy/static/wenyan_.js'))\n", "repo_name": "gumblex/pywebapps", "sub_path": "diy/speedestimate.py", "file_name": "speedestimate.py", "file_ext": "py", "file_size_in_byte": 4346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.NOTLOCAL", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 79, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "time.strptime", "line_number": 87, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 97, "usage_type": "call"}, {"api_name": "json.load", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 126, "usage_type": "call"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}]} +{"seq_id": "71781671204", "text": "import time\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport os\nimport platform\nsystem = platform.platform()\ndriverpath = \"/home/user/Schreibtisch/ChromeDriver_91.0.4472.19/chromedriver\"\nif 'Linux' in system:\n tmp = \"/home/user\"\n slashes = '/'\nif 'Windows' in system:\n tmp = os.environ.get('TMP')\n slashes = '\\\\'\n\n# erstelle datei wenn nicht vorhanden\ndef in_datei_schreiben():\n now = time.strftime(\"%m-%d-%y_%H-%M-%S\")\n file = open(newpath + slashes + username_chat.text + '.txt', 'a+')\n file.write(username_chat.text + ' ' + 'zuletzt online:' + now + \"\\n\")\n file.close()\n\n# erstelle ordner zum speichern\nnewpath = tmp + slashes +'whatsapp_timestamps'\nif not os.path.exists(newpath):\n os.makedirs(newpath)\n\n# setze wendriver auf chrome debugger fenster\n\n#windows\ndef webdriver_win():\n chrome_options = Options()\n chrome_options.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\n chrome_driver = r\"C:\\Users\\danie\\AppData\\Local\\Programs\\Python\\chromedriver_win32_v90\\chromedriver.exe\"\n browser = webdriver.Chrome(chrome_driver, options=chrome_options)\n return browser\n#linux\ndef webdriver_linux():\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\n browser = webdriver.Chrome(options=options, executable_path=driverpath)\n return browser\nif 'Linux' in system:\n browser = webdriver_linux()\nif 'Windows' in system:\n browser = webdriver_win()\n# testet ob gelbe Warnung das handy nicht verbunden ist existiert und bleibt in der schleife bis connection wieder da ist\ndef test_connection():\n oflinetester = browser.find_element_by_xpath(\"html/body/div/div[1]/div[1]/div[3]/div\") # .find_elements_by_tag_name(\"span\")\n while \"Telefon nicht verbunden\" in oflinetester.text:\n print(\"Telefon nicht verbunden...warte\")\n time.sleep(3)\n#div wert falls wieder probleme auftreten\ndiv_wert = 3\n\n#xpath variablen\nleft_sidebar_all_xpath = \"html/body/div/div[1]/div[1]/div[3]/div\"\nchatlist_xpath = left_sidebar_all_xpath + \"/div[2]/div[\" + str(div_wert) + \"]/div/div\"\narchiviert_xpath = left_sidebar_all_xpath + \"/div[2]/div[1]\"\nchat_xpath = \"html/body/div/div/div/div[4]/div/header/div[2]\"\nchat_username_xpath = chat_xpath + \"/div\"\nonlinestatus_xpath = chat_xpath + \"/div[2]/span\"\n\n# wechselt zu aktuell offenen tab (sollte hierbei Whatsapp sein)\nbrowser.switch_to.window(browser.window_handles[0])\ntest_connection()\n\n# ermittelt nutzeranzahl\nchatlist = browser.find_element_by_xpath(chatlist_xpath)\nnutzeranzahl = int(chatlist.get_attribute(\"aria-rowcount\"))\ni = 1\n\n# voreinstellung der browser-fenster-höhen\n# da bei scrollbarem chatverlauf unerwünschte effekte auftreten (nicht alle chats werden mitgenommen)\n# browserseitiges Zoomen ist nicht zu empfehlen da dadurch .click() nicht jeden nutzer mitnimmt und das script mit den usern durcheinander kommt\nbrowser.set_window_size(0, 0) # verkleinerung auf 0 um wirkliche Windowgröße zu ermitteln mit nachfolgendem browser.get_window_size().get(\"height\")\nchrome_hoehe = browser.get_window_size().get(\"height\")\nheader_hoehe = browser.find_element_by_xpath(left_sidebar_all_xpath + \"/header\").rect.get(\"height\")\noffline_banner = 107 # bleibt erst mal hardgecocodet da eine parametrisierung zwar möglich aber wenig sinnvoll ist\nsucher_hoehe = browser.find_element_by_xpath(left_sidebar_all_xpath + \"/div[1]\").rect.get(\"height\")\narchive_hoehe = browser.find_element_by_xpath(archiviert_xpath).rect.get(\"height\")\ngesamt_vor_hoehe = chrome_hoehe + header_hoehe + offline_banner + sucher_hoehe + archive_hoehe\nchat_hoehe = browser.find_element_by_xpath(chatlist_xpath + \"/div[1]\").rect.get(\"height\")\nbrowser.set_window_size(600, gesamt_vor_hoehe + chat_hoehe * nutzeranzahl)\n\nprint(\"--------------------------------\")\n\nwhile i > 0:\n # prüfe alle nutzer absteigend durch (aufsteigend müsste auch gehen, wurde aber noch nicht getestet)\n for j in range(nutzeranzahl, 0, -1):\n test_connection()\n # ermittelt username aus chatverlauf\n username_chatverlauf = browser.find_element_by_xpath(chatlist_xpath + \"/div[\" + str(j) + \"]/div/div/div[2]/div[1]/div[1]/span/span\")\n print(str(j))\n print(\"listenname: \" + username_chatverlauf.text)\n username_chatverlauf.click()\n # ermittelt username aus angeklickten chat\n chat = browser.find_element_by_xpath(chat_xpath)\n username_chat = browser.find_element_by_xpath(chat_username_xpath)\n # zeigt chatheader noch einmal an um überprüfen zu können ob script sauber läuft\n print(\"chatkopf: \" + username_chat.text)\n # ermittelt onlinestatus\n print(\"Online-Status: \")\n time.sleep(0.1)\n anzahl = chat.find_elements_by_tag_name(\"div\")\n # wenn zweites div element vorhanden ist wird geprüft ob es das wort \"online\" ist oder etwas anderes, bei online logge aktuelle zeit\n if len(anzahl) == 3:\n now = time.strftime(\"%m-%d-%y_%H-%M-%S\")\n status = browser.find_element_by_xpath(onlinestatus_xpath).text\n if status == \"online\":\n print('zuletzt online:' + now)\n in_datei_schreiben()\n else:\n print(\"offline\")\n else:\n print(\"offline\")\n print(\"--------------------------------\")\n# time.sleep(1)\n", "repo_name": "DanielMueller1309/Python", "sub_path": "sonstiges/wa_online_anzeiger/onliner.py", "file_name": "onliner.py", "file_ext": "py", "file_size_in_byte": 5387, "program_lang": "python", "lang": "de", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "platform.platform", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "time.strftime", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 34, "usage_type": "name"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 38, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 38, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 40, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 40, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "9884718420", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 13 18:24:13 2017\n\n@author: gualandi\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef DrawMatrix(M, name):\n # Crea una figura, disegna l'immagine data dalla matrice, aggiungi la colorbar sulla destra\n plt.figure(figsize=(6,6))\n # Uso la colormap 'gray' per avere la schacchiera in bianco&nero\n plt.imshow(M, cmap='gray')\n plt.savefig(\"prova.pdf\", dpi=300)\n plt.show()\n\n#------------------------------------------\n# MAIN ENTRY POINT\n#------------------------------------------\nif __name__ == \"__main__\":\n filename1 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data32_1001.csv'\n M1 = np.loadtxt(open(filename1, \"rb\"), delimiter=\",\")\n DrawMatrix(M1, )\n\n filename2 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data64_1001.csv'\n M2 = np.loadtxt(open(filename2, \"rb\"), delimiter=\",\")\n DrawMatrix(M2)\n \n filename2 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data128_1001.csv'\n M2 = np.loadtxt(open(filename2, \"rb\"), delimiter=\",\")\n DrawMatrix(M2)\n \n filename2 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data256_1001.csv'\n M2 = np.loadtxt(open(filename2, \"rb\"), delimiter=\",\")\n DrawMatrix(M2)\n \n filename2 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data512_1001.csv'\n M2 = np.loadtxt(open(filename2, \"rb\"), delimiter=\",\")\n DrawMatrix(M2)", "repo_name": "stegua/dotlib", "sub_path": "python/view_matrix.py", "file_name": "view_matrix.py", "file_ext": "py", "file_size_in_byte": 1428, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "43445701795", "text": "import win32com.client\nimport os\nimport Cybos_function\nimport sqlite3\n\nos.chdir(\"C:\\\\Users\\\\S\\\\Desktop\\\\바탕화면(임시)\\\\ETF\\\\\")\nos.getcwd()\n\n# Cybos Plus연결 여부 체크\nCybos_function.Get_login_status()\n\n# ETF code, name 불러오기\nETF_code_list, ETF_name_list = Cybos_function.Get_ETF_code_name()\n\n# ETF 코드 및 종목명 txt파일로 저장\nwith open(\"ETF_code_list.txt\", \"w\") as f :\n for code in ETF_code_list :\n f.write(code + \"\\n\")\nwith open(\"ETF_name_list.txt\", \"w\") as f :\n for name in ETF_name_list :\n f.write(name + \"\\n\")\n\n# ETF 일일데이터 객체 생성\nobjETF = win32com.client.Dispatch(\"Dscbo1.CpSvr7246\")\n\n# 데이터 수신 및 DB에 저장\ncount = 1\ncon = sqlite3.connect(\"tmp/ETF.db\")\nfor code in ETF_code_list :\n try :\n print(count, \" / \" , len(ETF_code_list))\n count += 1\n ETF_df = Cybos_function.ETF_GetData(objETF, code)\n ETF_df.to_sql(code, con, index=False)\n\n except :\n print(\"오류 : \", code)\n continue\ncon.close()\n\n\n", "repo_name": "harimha/PycharmProjects", "sub_path": "python32bit_project/Using_Cybos_ETF.py", "file_name": "Using_Cybos_ETF.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "Cybos_function.Get_login_status", "line_number": 10, "usage_type": "call"}, {"api_name": "Cybos_function.Get_ETF_code_name", "line_number": 13, "usage_type": "call"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 24, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 24, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 24, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 28, "usage_type": "call"}, {"api_name": "Cybos_function.ETF_GetData", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "9227115544", "text": "\"\"\"\nThis script runs the signal_timestamps function, that gets the duration of each \nrecording + all the timestamps of the recoring, and saves the info in the \ndatabase.\n\"\"\"\n\nimport os\nimport sqlite3\n\nfrom birdsong.data_preparation.audio_conversion.signal_extraction import signal_timestamps\n\nif 'HOSTNAME' in os.environ:\n # script runs on server\n STORAGE_DIR = '/storage/step1_wav/'\n DATABASE_DIR = '/storage/db.sqlite'\nelse:\n # script runs locally\n STORAGE_DIR = 'storage/step1_wav/'\n DATABASE_DIR = 'storage/db.sqlite'\n\n# Get a list of files that are downloaded\ndownloaded_files = os.listdir(STORAGE_DIR)\nprint('list with downloaded files made')\nprint(len(downloaded_files))\n\n# Get the recording ID's from the filenames\ndownloaded_ids = [int(x[:-4]) for x in downloaded_files]\n\n# Get all the recordings that were already processed before\nconn = sqlite3.connect(DATABASE_DIR)\nprint('database loaded')\nc = conn.cursor()\nq = '''\nSELECT id FROM recordings\nWHERE step1 = 1 AND duration IS NOT NULL\n'''\nc.execute(q)\nprocessed_ids = [i[0] for i in c.fetchall()]\nprint('list of already processed recordings')\nprint(len(processed_ids))\n\n# Remove the already processed recordings from the ones we want to process\nto_process = [x for x in downloaded_ids if x not in processed_ids]\nprint('list of files to process')\nprint(len(to_process))\n\n# Processing\nq = '''\nUPDATE recordings\nSET duration = ?, sum_signal = ?, timestamps = ? \nWHERE id = ?\n'''\nbatch = []\nfor i, rec_id in enumerate(to_process):\n rec = str(rec_id) + '.wav'\n print(rec)\n try:\n duration, sum_signal, timestamps = signal_timestamps(\n STORAGE_DIR + rec)\n batch.append((duration, sum_signal, timestamps, rec_id))\n if len(batch) % 50 == 0:\n print(f\"batch {i} full\")\n c.executemany(q, batch)\n conn.commit()\n batch = []\n except:\n print(f'could not get info of recording {rec}')\n pass\nc.executemany(q, batch)\nconn.commit()\nconn.close()\n", "repo_name": "multavici/DSR-Bird-Song", "sub_path": "birdsong/data_preparation/audio_conversion/get_timestamps.py", "file_name": "get_timestamps.py", "file_ext": "py", "file_size_in_byte": 2006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 30, "usage_type": "call"}, {"api_name": "birdsong.data_preparation.audio_conversion.signal_extraction.signal_timestamps", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "41621957023", "text": "import torch\r\nimport numpy as np\r\nfrom tqdm.auto import tqdm\r\n\r\ndevice = 'cuda' if torch. cuda.is_available() else 'cpu'\r\ntorch.manual_seed(777)\r\nif device == 'cuda':\r\n torch.cuda.manual_seed_all(777)\r\n\r\ndef train_one_epoch(model, train_loader, optimizer, criterion):\r\n model.train()\r\n running_loss = 0.0\r\n correct = 0\r\n total = 0\r\n\r\n for inputs, labels in tqdm(train_loader):\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n\r\n optimizer.zero_grad()\r\n outputs = model(inputs)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n running_loss += loss.item()\r\n\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n\r\n return 100 * correct / total, running_loss / len(train_loader)\r\n\r\ndef evaluate(model, data_loader, criterion):\r\n model.eval()\r\n val_loss = 0.0\r\n correct = 0\r\n total = 0\r\n \r\n with torch.no_grad():\r\n for inputs, labels in tqdm(data_loader):\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n \r\n outputs = model(inputs)\r\n loss = criterion(outputs, labels)\r\n val_loss += loss.item()\r\n\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n\r\n return 100 * correct / total, val_loss / len(data_loader)\r\n\r\ndef SVM_data_setting(data_loader):\r\n input_list = []\r\n label_list = []\r\n \r\n for inputs, labels in tqdm(data_loader):\r\n inputs = np.array(inputs).flatten()\r\n input_list.append(inputs)\r\n label_list.append(labels)\r\n \r\n input_list = np.array(input_list).reshape(len(data_loader), -1)\r\n label_list = np.array(label_list)\r\n \r\n return input_list, label_list", "repo_name": "sumin303/23_2_AI_Midterm", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1890, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cuda.is_available", "line_number": 5, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 6, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 8, "usage_type": "attribute"}, {"api_name": "tqdm.auto.tqdm", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 46, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "35443731898", "text": "import requests\n\nrequest_id = input(\"Введите искомый id: \")\n\nresponse = requests.get(\n \"http://127.0.0.1:5000/api/problems\", {\"id\": request_id}\n)\n\njson = response.json()\nfor i in json.keys():\n print(json[i])\n", "repo_name": "Yabra/BeginnerCodeQuest", "sub_path": "api_tests/show_problems.py", "file_name": "show_problems.py", "file_ext": "py", "file_size_in_byte": 228, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "29003530265", "text": "#!/usr/bin/env python3\n\n# Find errors in Postfix log SQLite DB\n\nimport re\nimport sys\nfrom collections import defaultdict\nfrom sqlite3 import connect\n\nclass PostfixSearch(object):\n\n \"\"\"\n \"\"\"\n\n def __init__(self):\n self._sql = None\n\n def load_db(self, dbpath):\n self._sql = connect(dbpath)\n\n def find_msg_by_subject(self, sender, subject):\n request = \"\"\"\n SELECT raddr.email, msg.qid, st.status\n FROM email_insertion msg\n INNER JOIN email_recipient rcv ON msg.qid = rcv.qid\n INNER JOIN email_sender snd ON msg.qid = snd.qid\n INNER JOIN email_address saddr ON snd.emailid = saddr.id\n INNER JOIN email_address raddr ON rcv.emailid = raddr.id\n INNER JOIN email_subject esub ON esub.qid = msg.qid\n INNER JOIN subject sub ON sub.id = esub.subjectid\n INNER JOIN email_status est ON est.qid = msg.qid\n INNER JOIN status st ON est.statusid = st.id\n WHERE saddr.email = :sender\n AND sub.text LIKE :subject\n ORDER BY raddr.email\n \"\"\"\n c = self._sql.cursor()\n c.execute(request, {'sender': sender, 'subject': subject})\n for row in c.fetchall():\n yield row\n\n def find_reason_by_qid(self, qid):\n request = \"\"\"\n SELECT est.msg, datetime(est.date, 'unixepoch')\n FROM email_status est\n WHERE est.qid=:qid\n \"\"\"\n c = self._sql.cursor()\n c.execute(request, {'qid': qid})\n return c.fetchone()\n\n\nHOSTERROR_CRE = re.compile(r'^host\\s([\\w\\.\\-]+)'\n r'\\[(\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\\]\\s'\n r'said:\\s(\\d{3})\\s(.*)$')\n\ndef main(dbpath, sender, subject):\n ps = PostfixSearch()\n ps.load_db(dbpath)\n addresses = defaultdict(list)\n for email, qid, status in ps.find_msg_by_subject(sender, subject):\n addresses[email].append((status, qid))\n invalid = 0\n issues = {}\n for addr in sorted(addresses, key=lambda a: a.lower()):\n statuses = addresses[addr]\n if 'sent' not in [s[0] for s in statuses]:\n qid = [s[1] for s in statuses if s[0] == 'bounced']\n if not qid:\n # likely to be invalid addresses\n # print(addr, statuses)\n invalid +=1 \n else:\n reason, dt = ps.find_reason_by_qid(qid[0])\n issues[addr] = (dt, reason)\n #print(addr)\n for addr in sorted(issues, key=lambda a: issues[a][0]):\n dt, reason = issues[addr]\n # print(\"%-32s %-20s %s\" % (addr, dt, reason))\n mo = HOSTERROR_CRE.match(reason)\n if mo:\n reason = mo.group(4)\n code = int(mo.group(3))\n else:\n reason = reason\n code = 500\n print(\"%s,%d,%s\" % (addr, code, reason))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Missing DB\", file=sys.stderr)\n exit(1)\n #main(sys.argv[1], 'Lettre%%Décembre 2015')\n main(sys.argv[1], 'paca@ml.ffplum.info', 'Newsletter Radios 8.33')\n", "repo_name": "ebouaziz/miscripts", "sub_path": "Python/email/findfail.py", "file_name": "findfail.py", "file_ext": "py", "file_size_in_byte": 3111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 60, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 92, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 95, "usage_type": "attribute"}]} +{"seq_id": "30907006467", "text": "import logging\nfrom typing import Dict, List\n\nfrom moodle_dl.config import ConfigHelper\nfrom moodle_dl.moodle.mods import MoodleMod\nfrom moodle_dl.moodle.moodle_constants import moodle_html_footer, moodle_html_header\nfrom moodle_dl.moodle.request_helper import RequestRejectedError\nfrom moodle_dl.types import Course, File\nfrom moodle_dl.utils import PathTools as PT\n\n\nclass QuizMod(MoodleMod):\n MOD_NAME = 'quiz'\n MOD_PLURAL_NAME = 'quizzes'\n MOD_MIN_VERSION = 2016052300 # 3.1\n\n @classmethod\n def download_condition(cls, config: ConfigHelper, file: File) -> bool:\n return config.get_download_quizzes() or (not (file.module_modname.endswith(cls.MOD_NAME) and file.deleted))\n\n async def real_fetch_mod_entries(self, courses: List[Course]) -> Dict[int, Dict[int, Dict]]:\n quizzes = (\n await self.client.async_post(\n 'mod_quiz_get_quizzes_by_courses', self.get_data_for_mod_entries_endpoint(courses)\n )\n ).get('quizzes', [])\n\n result = {}\n for quiz in quizzes:\n course_id = quiz.get('course', 0)\n\n quiz_files = quiz.get('introfiles', [])\n self.set_props_of_files(quiz_files, type='quiz_introfile')\n\n quiz_intro = quiz.get('intro', '')\n if quiz_intro != '':\n quiz_files.append(\n {\n 'filename': 'Quiz intro',\n 'filepath': '/',\n 'description': quiz_intro,\n 'type': 'description',\n }\n )\n\n self.add_module(\n result,\n course_id,\n quiz.get('coursemodule', 0),\n {\n 'id': quiz.get('id', 0),\n 'name': quiz.get('name', 'unnamed quiz'),\n 'files': quiz_files,\n },\n )\n\n await self.add_quizzes_files(result)\n return result\n\n async def add_quizzes_files(self, quizzes: Dict[int, Dict[int, Dict]]):\n \"\"\"\n Fetches for the quizzes list the quizzes files\n @param quizzes: Dictionary of all quizzes, indexed by courses, then module id\n \"\"\"\n if not self.config.get_download_quizzes():\n return\n\n if self.version < 2016052300: # 3.1\n return\n\n await self.run_async_load_function_on_mod_entries(quizzes, self.load_quiz_files)\n\n async def load_quiz_files(self, quiz: Dict):\n data = {'quizid': quiz.get('id', 0), 'userid': self.user_id, 'status': 'all'}\n attempts = (await self.client.async_post('mod_quiz_get_user_attempts', data)).get('attempts', [])\n quiz_name = quiz.get('name', '')\n for attempt in attempts:\n attempt['_quiz_name'] = quiz_name\n\n quiz['files'] += await self.run_async_collect_function_on_list(\n attempts,\n self.load_files_of_attempt,\n 'attempt',\n {'collect_id': 'id', 'collect_name': '_quiz_name'},\n )\n\n async def load_files_of_attempt(self, attempt: Dict) -> List[Dict]:\n result = []\n\n attempt_id = attempt.get('id', 0)\n attempt_state = attempt.get('state', 'unknown')\n quiz_name = attempt.get('_quiz_name', '')\n\n attempt_filename = PT.to_valid_name(\n quiz_name + ' (attempt ' + str(attempt_id) + ' ' + attempt_state + ')', is_file=False\n )\n\n data = {'attemptid': attempt_id}\n try:\n if attempt_state == 'finished':\n questions = (await self.client.async_post('mod_quiz_get_attempt_review', data)).get('questions', [])\n elif attempt_state == 'inprogress':\n questions = (await self.client.async_post('mod_quiz_get_attempt_summary', data)).get('questions', [])\n else:\n return result\n except RequestRejectedError:\n logging.debug(\"No access rights for quiz attempt %d\", attempt_id)\n return result\n\n # build quiz HTML\n quiz_html = moodle_html_header\n for question in questions:\n question_html = question.get('html', '').split('\n # \"\"\"\n\n event_data_time = f'''send reminder email'''\n event_data_zipcode = i[5]\n if not event_data_zipcode: # if zipcode in logistics form exists, use that, else use zipcode in account\n event_data_zipcode = i[4]\n event_data_location = zipcode_dict[event_data_zipcode]\n event_data += \"\" + \\\n i[-2]+\", \"+event_data_location+\", \" + \\\n event_data_time+\": \"+i[2]+\"
\"\n\n event_data += \"\"\n # print('event_data in try:', event_data)\n\n html += event_data\n # print('date_events_dict date entry to delete:',date_events_dict[curr_date])\n del date_events_dict[curr_date]\n # print('date_events_dict after deletion:')\n # print('done with 1 day')\n # html+=\"\"+str(curr_mo_date_events_dict[curr_date])+\"\"\n # curr_event_row.append(curr_mo_date_events_dict[curr_date])\n except Exception as e:\n # print(e)\n # print('except')\n html += \"\"\n # curr_event_row.append([])\n # print('curr_event_row',curr_event_row)\n # del date_events_dict[date]\n html += \"\"\n # print('done with 1 day')\n event_rows.append(curr_event_row)\n # print('event_rows',event_rows)\n # print(\"\\n\")\n\n if curr_mo_index == len(months)-1: # at last month, or months[11]='12'\n curr_mo_index = 0 # reset to months[0]\n curr_year = str(int(curr_year)+1)\n else:\n curr_mo_index += 1\n\n ######## end of new ########\n\n html += \"\"\n # print('done with calendar page html')\n # print()\n\n # if date is in current month, proceed to add date numbers and corresponding events on each date\n # if date is in next month, add month header, then sat-sun row, then add date numbers and corresponding events\n\n return html\n # return(render_template('calendar.html'))\n\n\n@app.route('/sendreminderemail', methods=[\"GET\", \"POST\"])\n# def makecalevents(ltype): # include this in return of oauth2callback\ndef sendreminderemail():\n\n if 'credentials' not in flask.session:\n return flask.redirect('authorize')\n\n # Load credentials from the session.\n credentials = google.oauth2.credentials.Credentials(\n **flask.session['credentials'])\n # print('credentials loaded from session')\n\n # def create_and_send_email():\n try:\n service = build('gmail', 'v1', credentials=credentials)\n # print('service built')\n\n # if window.confirm(\"Click OK to send email\"):\n message = EmailMessage()\n\n etype = request.args.get('etype')\n recipient = request.args.get('recipient')\n items = request.args.get('items')\n # remove ids from items\n pattern = r'\\([0-9]+\\)\\ '\n if len(re.findall(pattern, items)) > 1:\n plural = 1\n else:\n plural = 0\n items = re.sub(pattern, '', items)\n\n ymd = request.args.get('ymd')\n dt = datetime.datetime(\n int(ymd.split('-')[0]), int(ymd.split('-')[1]), int(ymd.split('-')[2]))\n dt_to_weekday = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday',\n 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}\n weekday = dt_to_weekday[dt.weekday()]\n mdy = ymd.split('-')[1]+'/'+ymd.split('-')[2]+'/'+ymd.split('-')[0]\n first_name = request.args.get('first_name')\n\n if etype == \"dropoff\":\n body_html = f\"\"\"

Hi {first_name},

\n

Hope you're doing well! We're reaching out regarding the upcoming start of your rental of {items} on {weekday} {mdy}. Please use this link to schedule your drop-off. Let us know if you have any questions.

\n

All the best,

\n

Team Hubbub

\n \"\"\"\n body = MIMEText(body_html, 'html')\n message.set_content(body)\n # if one item, don't use plural\n if plural == 1:\n message['Subject'] = '[Hubbub] Scheduling drop-offs for your rentals'\n else:\n message['Subject'] = '[Hubbub] Scheduling drop-off for your rental'\n\n elif etype == \"pickup\":\n body_html = f\"\"\"

Hi {first_name},

\n

Hope you're doing well and enjoying your rental!

\n

We're reaching out regarding the upcoming end of your rental of {items} on {weekday} {mdy}. Please schedule your pick-up at this link, or use the link to set up a rental extension. Let us know if you have any questions.

\n

All the best,

\n

Team Hubbub

\n \"\"\"\n body = MIMEText(body_html, 'html')\n message.set_content(body)\n\n message['Subject'] = '[Hubbub] End of Rental Logistics'\n\n message['To'] = recipient\n message['From'] = 'hello@hubbub.shop'\n # message['Subject'] = 'Automated draft'\n\n # encoded message\n encoded_message = base64.urlsafe_b64encode(message.as_bytes()) \\\n .decode()\n\n create_message = {\n 'raw': encoded_message\n }\n # pylint: disable=E1101\n send_message = (service.users().messages().send\n (userId=\"me\", body=create_message).execute())\n print(F'Message Id: {send_message[\"id\"]}')\n except HttpError as error:\n print(F'An error occurred: {error}')\n send_message = None\n\n return \"email sent\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True, ssl_context='adhoc')\n", "repo_name": "angihe93/auto-ops", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 49713, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 28, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 35, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "psycopg2.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2.credentials.Credentials", "line_number": 53, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2", "line_number": 53, "usage_type": "attribute"}, {"api_name": "google.auth.transport.requests", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.discovery.build", "line_number": 58, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery", "line_number": 58, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery", "line_number": 58, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2.credentials.Credentials", "line_number": 87, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2", "line_number": 87, "usage_type": "attribute"}, {"api_name": "google.auth.transport.requests", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 88, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.discovery.build", "line_number": 92, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery", "line_number": 92, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery", "line_number": 92, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 250, "usage_type": "call"}, {"api_name": "google_auth_oauthlib.flow.flow.Flow.from_client_secrets_file", "line_number": 260, "usage_type": "call"}, {"api_name": "google_auth_oauthlib.flow.flow", "line_number": 260, "usage_type": "attribute"}, {"api_name": "google_auth_oauthlib.flow", "line_number": 260, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 267, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 278, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 281, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 289, "usage_type": "attribute"}, {"api_name": "google_auth_oauthlib.flow.flow.Flow.from_client_secrets_file", "line_number": 293, "usage_type": "call"}, {"api_name": "google_auth_oauthlib.flow.flow", "line_number": 293, "usage_type": "attribute"}, {"api_name": "google_auth_oauthlib.flow", "line_number": 293, "usage_type": "name"}, {"api_name": "flask.url_for", "line_number": 295, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 299, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 301, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 316, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 319, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 319, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 326, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 327, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2.credentials.Credentials", "line_number": 330, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2", "line_number": 330, "usage_type": "attribute"}, {"api_name": "google.auth.transport.requests", "line_number": 330, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 331, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.discovery.build", "line_number": 335, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery", "line_number": 335, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery", "line_number": 335, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 343, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 343, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 343, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 345, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 345, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 345, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 347, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 347, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 347, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 349, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 349, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 349, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 350, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 350, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 350, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 352, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 352, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 353, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 353, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 353, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 354, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 354, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 386, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 386, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 386, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 387, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 387, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 387, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 388, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 388, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 388, "usage_type": "name"}, {"api_name": "email.message", "line_number": 389, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 389, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 389, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 389, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 412, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 412, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 412, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 460, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 460, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 464, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 464, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 469, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 472, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 599, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 600, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 601, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 609, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 610, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2.credentials.Credentials", "line_number": 613, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2", "line_number": 613, "usage_type": "attribute"}, {"api_name": "google.auth.transport.requests", "line_number": 613, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 614, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.discovery.build", "line_number": 619, "usage_type": "call"}, {"api_name": "googleapiclient.discovery.discovery", "line_number": 619, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery", "line_number": 619, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 622, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 622, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 639, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 835, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 961, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 1017, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2.credentials.Credentials", "line_number": 1020, "usage_type": "call"}, {"api_name": "google.auth.transport.requests.oauth2", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "google.auth.transport.requests", "line_number": 1020, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 1021, "usage_type": "attribute"}, {"api_name": "googleapiclient.discovery.build", "line_number": 1026, "usage_type": "call"}, {"api_name": "email.message.EmailMessage", "line_number": 1030, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 1032, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1032, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1032, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1033, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1033, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1033, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 1034, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1034, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1034, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 1037, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 1041, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 1043, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1043, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1043, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 1044, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 1050, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 1050, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 1050, "usage_type": "name"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1058, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 1073, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64encode", "line_number": 1083, "usage_type": "call"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 1093, "usage_type": "name"}]} +{"seq_id": "42659124275", "text": "#!/usr/bin/env python3\nimport os\nfrom Bio import SeqIO\n\nos.chdir('/Users/kika/ownCloud/blasto_comparative/proteins/')\ngenome = SeqIO.parse('Oeli_companion.CDS_cdseq_OK.fna', 'fasta')\n\nwith open('Oeli_companion.CDS_cdseq_OK_l30.fna', 'w') as out:\n\tfor seq in genome:\n\t\tif len(seq) >= 90:\n\t\t\tout.write('>{}\\n{}\\n'.format(seq.description, seq.seq))\n\t\telse:\n\t\t\tprint(seq.description)\n", "repo_name": "kikinocka/ngs", "sub_path": "py_scripts/len_filter.py", "file_name": "len_filter.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 5, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 6, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "19583959872", "text": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# --------------------------------------------------\n# Description: Matrix Factorization With Tensorflow\n# --------------------------------------------------\n# Author: Roy Lei \n# Created Date : April 31st 2020\n# --------------------------------------------------\nimport requests\nimport pandas as pd\nCOURSERA_PATH = \"https://lua9b20g37-2.algolianet.com/1/indexes/*/queries?x-algolia-agent=Algolia%20for%20vanilla%20JavaScript%20(lite)%203.30.0%3Breact-instantsearch%205.2.3%3BJS%20Helper%202.26.1&x-algolia-application-id=LUA9B20G37&x-algolia-api-key=dcc55281ffd7ba6f24c3a9b18288499b\"\n\nclass Scraper(object):\n def __init__(self, path=\"\"):\n self.path = path\n self.responses = {}\n\n def _get_headers(self):\n return {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n }\n\n def _get_payloads(self):\n return {\n \"requests\": [\n {\n \"indexName\": \"DO_NOT_DELETE_PLACEHOLDER_INDEX_NAME\",\n \"params\": \"query=&page=0&highlightPreTag=%3Cais-highlight-0000000000%3E&highlightPostTag=%3C%2Fais-highlight-0000000000%3E&optionalFilters=query%3Aapi&facets=%5B%5D&tagFilters=\"\n },\n {\n \"indexName\": \"prod_all_products_term_optimization\",\n \"params\": \"query=&hitsPerPage=100&page=0&highlightPreTag=%3Cais-highlight-0000000000%3E&highlightPostTag=%3C%2Fais-highlight-0000000000%3E&optionalFilters=query%3Aapi&ruleContexts=%5B%22en%22%5D&facets=%5B%5D&tagFilters=\"\n },\n {\n \"indexName\": \"test_suggestions\",\n \"params\": \"query=&hitsPerPage=7&page=0&highlightPreTag=%3Cais-highlight-0000000000%3E&highlightPostTag=%3C%2Fais-highlight-0000000000%3E&optionalFilters=query%3Aapi&facets=%5B%5D&tagFilters=\"\n }\n ]\n }\n\n def _fliter(self, items):\n dataframe = self._create_pandas()\n\n for item in items:\n # print(item)\n row = dict()\n for column in self._get_columns():\n if column == \"description\":\n row[column] = item[\"_snippetResult\"][\"description\"][\"value\"]\n else:\n row[column] = item[column]\n \n dataframe = dataframe.append(row, ignore_index=True)\n return dataframe\n\n def _get_columns(self):\n return [\n \"name\",\n \"productDifficultyLevel\",\n \"enrollments\",\n \"language\",\n \"numProductRatings\",\n \"avgProductRating\",\n \"skills\",\n \"imageUrl\",\n \"description\"\n ]\n \n def _create_pandas(self):\n return pd.DataFrame(columns=self._get_columns())\n\n def call(self):\n self.responses = requests.post(\n self.path, json=self._get_payloads(), headers=self._get_headers())\n return self.responses.json()\n\n def result(self):\n return self._fliter(self.responses.json()[\"results\"][1][\"hits\"])\n\ndef main():\n scraper = Scraper(COURSERA_PATH)\n scraper.call()\n scraper.result().to_csv(\"./dataset/courses.csv\", encoding='utf-8', index=False)\n\nif __name__ == \"__main__\":\n main()", "repo_name": "1997roylee/Final-Year-Project", "sub_path": "src/algorithm/CourseraScraper.py", "file_name": "CourseraScraper.py", "file_ext": "py", "file_size_in_byte": 3316, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "1381757365", "text": "import sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))\nfrom common import open_file\n\ncan_drop_to = lambda x, y: grid[y][x] == '.'\n\ndef fill_lines(this_x, this_y, next_x, next_y):\n if this_x > next_x:\n for x in range(int(this_x), int(next_x)-1, -1): grid[this_y][x] = '#'\n elif this_x < next_x:\n for x in range(int(this_x), int(next_x)+1): grid[this_y][x] = '#'\n elif this_y > next_y:\n for y in range(int(this_y), int(next_y)-1, -1): grid[y][this_x] = '#'\n else:\n for y in range(int(this_y), int(next_y)+1): grid[y][this_x] = '#'\n\ndef print_grid():\n for y in range(200):\n for x in range(400, 720): print(grid[y][x], end='')\n print()\n\ndef determine_abyss_row():\n for y in range(len(grid)-1, 0, -1):\n if '#' in grid[y]: return y\n\ndef drop_sand():\n sand_location = (500, 0)\n while sand_location[1] < abyss_row:\n if can_drop_to(sand_location[0], sand_location[1]+1):\n sand_location = (sand_location[0], sand_location[1]+1)\n elif can_drop_to(sand_location[0]-1, sand_location[1]+1):\n sand_location = (sand_location[0]-1, sand_location[1]+1)\n elif can_drop_to(sand_location[0]+1, sand_location[1]+1):\n sand_location = (sand_location[0]+1, sand_location[1]+1)\n else:\n grid[sand_location[1]][sand_location[0]] = 'o'\n return True\n if sand_location[1] >= abyss_row:\n print(\"IN THE ABYSS\")\n return False\n return True\n\ndef flush_sand():\n for y in range(len(grid)):\n for x in range(len(grid[y])):\n if grid[y][x] == 'o': grid[y][x] = '.'\n\ngrid = [['.' for x in range(1000)] for y in range(1000)]\nfor line in open_file(\"input.txt\"):\n coords = line.strip().split(' -> ')\n for index, coord in enumerate(coords):\n if index < len(coords)-1:\n this_x, this_y = coord.split(',')\n next_x, next_y = coords[index+1].split(',')\n fill_lines(int(this_x), int(this_y), int(next_x), int(next_y))\n\nabyss_row = determine_abyss_row()\nsand_dropped = 0\nwhile True:\n if not drop_sand():\n print(f\"Part 1: Successfully dropped {sand_dropped} pieces of sand\") # 578\n break\n sand_dropped += 1\n\n# PART 2: Re-initialise grid (remove sand, and draw floor)\nflush_sand()\nabyss_row += 2\ngrid[abyss_row] = ['#' for x in range(1000)]\nsand_dropped = 0\nwhile grid[0][500] != \"o\":\n sand_dropped += 1\n drop_sand()\nprint(f\"Part 2: Successfully dropped {sand_dropped} pieces of sand\") # 24377", "repo_name": "neilstudd/advent-of-code", "sub_path": "2022/day14/day14.py", "file_name": "day14.py", "file_ext": "py", "file_size_in_byte": 2539, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 2, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 2, "usage_type": "call"}, {"api_name": "os.path", "line_number": 2, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 2, "usage_type": "call"}, {"api_name": "common.open_file", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "4374460454", "text": "#!/usr/bin/env python\nimport rospy\nimport sys\nimport pickle\nimport socket\nimport numpy as np\nfrom visualization_msgs.msg import Marker\nfrom geometry_msgs.msg import PoseWithCovarianceStamped, Point, PointStamped\nimport tf\n\n\n# This node will periodically send its position to the CC.\n# It will also send a list of the messes that it has discovered to the CC.\nclass ClientNode():\n def __init__(self):\n rospy.init_node('location_node')\n rospy.Subscriber('/visualization_marker', Marker, self.marker_callback, queue_size=1)\n rospy.Subscriber('/amcl_pose', PoseWithCovarianceStamped, self.amcl_callback)\n\n host = \"134.126.125.125\" # ip of server\n port = 13000\n self.addr = (host, port)\n self.messes = []\n self.tf_listener = tf.TransformListener()\n self.position = None\n\n try:\n self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n except:\n rospy.loginfo(\"FAILED TO CREATE SOCKET\")\n sys.exit()\n\n self.ROBOT_ID = 0\n self.pos = [0,0]\n self.mess = [None,None]\n rate = rospy.Rate(5)\n\n rospy.loginfo(\"LOCATION_CLIENT SETUP COMPLETE\")\n self.old_marker = Point()\n self.old_marker.x = 100000\n self.old_marker.y = 100000\n while not rospy.is_shutdown():\n mess_x = self.mess[0]\n mess_y = self.mess[1]\n\n try:\n self.s.sendto(pickle.dumps([0,self.ROBOT_ID, self.pos[0], self.pos[1]]), self.addr)\n except Exception as e:\n s.close()\n rospy.loginfo(\"LOCATION_CLIENT ERROR. CLOSING SOCKET\")\n rospy.loginfo(e)\n sys.exit()\n\n rate.sleep()\n\n def is_new_mess(self, xone, yone, xtwo, ytwo):\n return np.sqrt((xone - xtwo)**2 + (yone - ytwo)**2) < .2\n\n # Callback for 'amcl_pose' topic\n def amcl_callback(self, amcl_msg):\n self.position = amcl_msg.pose.pose\n self.pos[0] = amcl_msg.pose.pose.position.x\n self.pos[1] = amcl_msg.pose.pose.position.y\n\n def marker_callback(self, marker):\n \"\"\" The marker callback to see the ar_markers\"\"\"\n marker_point = PointStamped()\n marker_point.header.frame_id = 'camera_rgb_optical_frame'\n marker_point.header.stamp = rospy.get_rostime()\n marker_point.point.x = marker.pose.position.x\n marker_point.point.y = marker.pose.position.y\n marker_point.point.z = marker.pose.position.z\n\n try: # transform marker position into the map frame\n marker_point.header.stamp = rospy.get_rostime()\n self.tf_listener.waitForTransform('camera_rgb_optical_frame',\n 'map',\n marker_point.header.stamp,\n rospy.Duration(1.0))\n # get the point transform\n marker_point = self.tf_listener.transformPoint('map', marker_point) \n except tf.Exception as e:\n print(e)\n print(\"ERROR in marker_callback\")\n\n new = True\n if len(self.messes) != 0:\n for mess in self.messes:\n if (self.close_enough(mess.x, mess.y, marker_point.point.x, marker_point.point.y, .15)):\n new = False\n else:\n if self.close_enough(self.position.position.x, self.position.position.y, marker_point.point.x, marker_point.point.y, 1):\n new = False\n rospy.loginfo(\"In second one\")\n self.messes.append(marker_point.point)\n self.s.sendto(pickle.dumps([1, self.ROBOT_ID, marker_point.point.x, marker_point.point.y]), self.addr)\n\n if self.close_enough(self.position.position.x, self.position.position.y, marker_point.point.x, marker_point.point.y, 1) and new:\n rospy.loginfo(\"In third one\")\n self.messes.append(marker_point.point)\n self.s.sendto(pickle.dumps([1, self.ROBOT_ID, marker_point.point.x, marker_point.point.y]), self.addr)\n\n \n \n\n def close_enough(self, x_one, y_one, x_two, y_two, threshold):\n \"\"\" Checks to see if its close enough to a goal\"\"\"\n return (np.sqrt((x_one - x_two)**2 + (y_one - y_two)**2) < threshold)\n\n \nif __name__ == \"__main__\":\n ClientNode()\n", "repo_name": "JoeyNeidigh/robo_cleanup", "sub_path": "scripts/location_client.py", "file_name": "location_client.py", "file_ext": "py", "file_size_in_byte": 4351, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rospy.init_node", "line_number": 16, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 17, "usage_type": "call"}, {"api_name": "visualization_msgs.msg.Marker", "line_number": 17, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 18, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PoseWithCovarianceStamped", "line_number": 18, "usage_type": "argument"}, {"api_name": "tf.TransformListener", "line_number": 24, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 28, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 28, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 31, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 36, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 38, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 42, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 50, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 57, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.PointStamped", "line_number": 67, "usage_type": "call"}, {"api_name": "rospy.get_rostime", "line_number": 69, "usage_type": "call"}, {"api_name": "rospy.get_rostime", "line_number": 75, "usage_type": "call"}, {"api_name": "rospy.Duration", "line_number": 79, "usage_type": "call"}, {"api_name": "tf.Exception", "line_number": 82, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 94, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 96, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 99, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "9311527661", "text": "import re\nimport os\nimport time\nimport poplib\nfrom django.core import mail\nfrom selenium.webdriver.common.keys import Keys\n\nfrom .base import FunctionalTest\n\nSUBJECT = 'Your login link for Superlists'\n\nclass LoginTest(FunctionalTest):\n\n def wait_for_mail(self, test_email, subject):\n\n if not self.staging_server:\n\n email = mail.outbox[0]\n self.assertIn(test_email, email.to)\n self.assertEqual(email.subject, subject)\n return email.body\n \n email_id = None\n start = time.time()\n inbox = poplib.POP3_SSL('pop.mail.yahoo.com')\n\n try:\n inbox.user(test_email)\n inbox.pass_(os.environ.get('YAHOO_PASSWORD'))\n\n while (time.time() - start) < 60:\n # get 10 newest messages\n count, _ = inbox.stat()\n for i in reversed(range(max(1, count - 10), count + 1)):\n print('getting message', i)\n _, lines, _ = inbox.retr(i)\n lines = [l.decode('utf-8') for l in lines]\n \n if f'Subject: {subject}' in lines:\n email_id = 1\n body = '\\n'.join(lines)\n return body\n time.sleep(5)\n \n finally:\n if email_id:\n inbox.dele(email_id)\n inbox.quit()\n\n def test_can_get_email_link_to_log_in(self):\n # Bob goes to the website and finds there's a new feature to log in.\n # He is prompted to enter the email and so does he do.\n if self.staging_server:\n test_email = 'testinggoatisthebest@yahoo.com'\n else:\n test_email = 'bob@example.com'\n self.browser.get(self.live_server_url)\n self.browser.find_element_by_name('email').send_keys(test_email)\n self.browser.find_element_by_name('email').send_keys(Keys.ENTER)\n\n # A message appears that a email has been sent to him with the log in\n # URL\n self.wait_for(lambda: self.assertIn(\n 'Check your email',\n self.browser.find_element_by_tag_name('body').text\n ))\n\n # She checks her mail and finds a message\n body = self.wait_for_mail(test_email, SUBJECT)\n\n # It has a a URL link in it\n self.assertIn('Use this link to log in', body)\n url_search = re.search(r'http://.+/.+$', body)\n\n if not url_search:\n self.fail(f'Could not find url in email body:\\n{body}')\n \n url = url_search.group(0)\n self.assertIn(self.live_server_url, url)\n\n # He clicks it\n self.browser.get(url)\n\n # He is logged in!\n self.wait_to_be_logged_in(test_email) \n\n # He now logs out of it\n self.browser.find_element_by_link_text('Log out').click()\n\n # He is logged out\n self.wait_to_be_logged_out(test_email)", "repo_name": "adityaprakash-bobby/py-tdd", "sub_path": "functional_tests/test_login.py", "file_name": "test_login.py", "file_ext": "py", "file_size_in_byte": 2927, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base.FunctionalTest", "line_number": 12, "usage_type": "name"}, {"api_name": "django.core.mail.outbox", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 18, "usage_type": "name"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "poplib.POP3_SSL", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 59, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 59, "usage_type": "name"}, {"api_name": "re.search", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "1844978227", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 17:27:50 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nimport requests\r\nimport lxml.html as lh\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl='http://s.cafef.vn/bao-cao-tai-chinh/ABC/BSheet/2018/0/0/1/0/bao-cao-tai-chinh-cong-ty-co-phan-truyen-thong-vmg.chn'\r\n\r\n#Create a handle, page, to handle the contents of the website\r\nimport time \r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver = webdriver.Chrome() \r\n\r\ndriver.get(url)\r\n\r\n# run\r\nstart_time = time.time()\r\ncks = ['hng', 'hsg']\r\nloadcafe(cks,driver)\r\nend_time = round((time.time() - start_time)/60,2)\r\nprint(\"TOTAL_TIME--- %s Minute ---\" %end_time,'all complete') \r\n\r\n\r\n\r\ndef loadcafe(cks,driver, outcd , outhd, outlctt):\r\n for ck in cks:\r\n ppTrucT = False\r\n # ghi ten ma chứng khoan\r\n ma_ck = driver.find_element_by_xpath(\"//input[@name='txtKeyword']\")\r\n ma_ck.clear()\r\n ma_ck.send_keys(ck)\r\n # tim ma chung khoan\r\n driver.find_element_by_xpath(\"//img[@id='btSearch']\").click()\r\n # chọn kỳ theo nam\r\n driver.find_element_by_xpath(\"//input[@id='rdo0']\").click()\r\n # truoc = driver.find_element_by_xpath(\"//img[@alt='Xem dữ liệu trước']\") \r\n # sau = driver.find_element_by_xpath(\"//img[@alt='Xem dữ liệu tiếp']\")\r\n # cac muc bao cao\r\n \r\n #----------KẾT QUẢ KINH DOANH\r\n driver.find_element_by_xpath(\"//a[@id='aNhom2']\").click()\r\n kqkd_0 = find_table(driver)\r\n \r\n #----------LƯU CHUYỂN TIỀN TỆ GIÁN TIẾP\r\n driver.find_element_by_xpath(\"//a[@id='aNhom3']\").click()\r\n lctt_gt_0 = find_table(driver)\r\n \r\n if np.isnan(lctt_gt_0[lctt_gt_0.columns[4]][1]):\r\n ppTrucT = True\r\n \r\n #----------LƯU CHUYỂN TIỀN TỆ TRỰC TIẾP\r\n if ppTrucT :\r\n driver.find_element_by_xpath(\"//a[@id='aNhom4']\").click()\r\n lctt_tt_0 = find_table(driver)\r\n \r\n #----------CÂN ĐỐI KẾ TOÁN\r\n driver.find_element_by_xpath(\"//a[@id='aNhom1']\").click() # chon cdkt truoc\r\n driver.find_element_by_link_text('Mở rộng').click()\r\n cdkt_0 = find_table(driver)\r\n #---------------------------------------------------------------\r\n lable_cdkt = cdkt_0.ix[:, 0:1] # nhan cho cac teu chi\r\n re_cdkt = cdkt_0.ix[:, 1:5] # lay noi dung cac nam\r\n \r\n lable_kqkd = kqkd_0.ix[:, 0:1] # nhan cho cac teu chi\r\n re_kqkd = kqkd_0.ix[:, 1:5] # lay noi dung cac nam\r\n \r\n lable_lctt_gt = lctt_gt_0.ix[:, 0:1] # nhan cho cac teu chi\r\n re_lctt_gt = lctt_gt_0.ix[:, 1:5] # lay noi dung cac nam\r\n if ppTrucT :\r\n lable_lctt_tt = lctt_tt_0.ix[:, 0:1] # nhan cho cac teu chi\r\n re_lctt_tt = lctt_tt_0.ix[:, 1:5] # lay noi dung cac nam\r\n \r\n \r\n conti = True\r\n count = 0\r\n while conti :\r\n for i in range(4):\r\n driver.find_element_by_xpath(\"//img[@alt='Xem dữ liệu trước']\").click()\r\n count +=4\r\n # cdkt\r\n driver.find_element_by_xpath(\"//a[@id='aNhom1']\").click()\r\n new_cdkt = find_table(driver)\r\n new_cdkt = new_cdkt.ix[:, 1:5]\r\n re_cdkt = pd.concat([ new_cdkt,re_cdkt], axis=1, sort=False)\r\n # kqkd\r\n driver.find_element_by_xpath(\"//a[@id='aNhom2']\").click()\r\n new_khkd = find_table(driver)\r\n new_khkd= new_khkd.ix[:, 1:5]\r\n re_kqkd = pd.concat([ new_cdkt,re_kqkd], axis=1, sort=False)\r\n # lctt_gt\r\n driver.find_element_by_xpath(\"//a[@id='aNhom3']\").click()\r\n new_lctt_gt = find_table(driver)\r\n new_lctt_gt= new_lctt_gt.ix[:, 1:5]\r\n re_lctt_gt = pd.concat([ new_cdkt,re_lctt_gt], axis=1, sort=False)\r\n \r\n # lctt_tt\r\n if ppTrucT :\r\n driver.find_element_by_xpath(\"//a[@id='aNhom4']\").click()\r\n new_lctt_1t = find_table(driver)\r\n new_lctt_tt= new_lctt_gt.ix[:, 1:5]\r\n re_lctt_tt = pd.concat([ new_cdkt,re_lctt_tt], axis=1, sort=False)\r\n \r\n if np.isnan(new_cdkt[new_cdkt.columns[1]][0]):\r\n conti = False\r\n \r\n re_cdkt = pd.concat([ lable_cdkt,re_cdkt], axis=1, sort=False) \r\n re_kqkd = pd.concat([ lable_kqkd,re_kqkd], axis=1, sort=False)\r\n re_lctt_gt = pd.concat([ lable_lctt_gt,re_lctt_gt], axis=1, sort=False)\r\n if ppTrucT :\r\n re_lctt_tt = pd.concat([ lable_lctt_tt,re_lctt_tt], axis=1, sort=False)\r\n \r\n \r\n re_cdkt.to_csv(ck +\"cdkt.csv\",index = False)\r\n re_kqkd.to_csv(ck +\"kqkd.csv\",index = False) \r\n re_lctt_gt.to_csv(ck +\"lctt_gt.csv\",index = False)\r\n if ppTrucT :\r\n re_lctt_tt.to_csv(ck +\"lctt_tt.csv\",index = False) \r\n for cl in range(count):\r\n driver.find_element_by_xpath(\"//img[@alt='Xem dữ liệu tiếp']\").click()\r\n \r\n#------------------------------------------------------------------------------------ \r\n\r\n\r\n \r\ndef find_table(driver): \r\n table = driver.find_element_by_id(\"tableContent\").get_attribute('outerHTML')\r\n col_year = [\"tiêu chí\"]\r\n years = driver.find_elements_by_class_name(\"h_t\")\r\n for y in years:\r\n col_year.append((y.text))\r\n df = pd.read_html(str(table))\r\n tb1 = df[0]\r\n tb1 = tb1.dropna(subset =[ 0])\r\n tb1= tb1.ix[:, 0:4]\r\n tb1.columns = col_year\r\n tb1 =tb1.reset_index(drop=True)\r\n return tb1\r\n\r\n", "repo_name": "mis-drug-dealer/anythings_py", "sub_path": "autoCafef.py", "file_name": "autoCafef.py", "file_ext": "py", "file_size_in_byte": 5682, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 119, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "9636794392", "text": "import secrets\r\nimport random\r\nimport time\r\nimport numpy as np\r\n\r\nmutationRate = 0.4\r\nbinList = []\r\nt1 = time.time()\r\nfor i in range (0,100):\r\n threshold = secrets.SystemRandom().uniform(0.000000000,1.0000000)\r\n if threshold >= 0.5:\r\n if threshold <= mutationRate:\r\n binList.append(-1)\r\n binList.append(1)\r\n elif threshold < 0.5:\r\n if threshold <= mutationRate:\r\n binList.append(-1)\r\n binList.append(0)\r\nt2 = time.time()\r\ntotalTime = t2 - t1\r\nprint(binList)\r\nprint(\"The time for that was: \", totalTime, \"s\")\r\n\r\n", "repo_name": "MichaelNorberto/NTRUPythonImplementation", "sub_path": "Random Polynomial Generation.py", "file_name": "Random Polynomial Generation.py", "file_ext": "py", "file_size_in_byte": 570, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 8, "usage_type": "call"}, {"api_name": "secrets.SystemRandom", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "74900913444", "text": "import os\n\nfrom selenium import webdriver\n\nfrom core.singlenton.app_path import AppPath\nfrom core.singlenton.logger import Logger\n\n\nclass WebDriver:\n class __WebDriver:\n def __init__(self):\n self.options = webdriver.ChromeOptions()\n self.options.add_argument(\"--start-maximized\")\n # self.options.add_argument(\"--headless\")\n # self.options.headless = True\n self.driver_path = os.path.abspath(AppPath.path + '//driver//chromedriver.exe')\n self.driver = webdriver.Chrome(executable_path=self.driver_path, chrome_options=self.options)\n self.driver.get('https://www.kickstarter.com/')\n\n def close_webdriver(self):\n print('close chrome')\n self.driver.quit()\n\n driver = None\n\n def __new__(cls):\n if not WebDriver.driver:\n try:\n WebDriver.driver = WebDriver.__WebDriver().driver\n except Exception as e:\n Logger().error('ERROR Starting webdriver')\n Logger().error(str(e))\n\n return WebDriver.driver\n", "repo_name": "jdieguezbean/kickstarterScraping", "sub_path": "core/singlenton/webdriver.py", "file_name": "webdriver.py", "file_ext": "py", "file_size_in_byte": 1080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 12, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "core.singlenton.app_path.AppPath.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "core.singlenton.app_path.AppPath", "line_number": 16, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "core.singlenton.logger.Logger", "line_number": 31, "usage_type": "call"}, {"api_name": "core.singlenton.logger.Logger", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "72248278245", "text": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Pipe Joints\",\n \"author\": \"Buerbaum Martin (Pontiac)\",\n \"version\": (0, 10, 7),\n \"blender\": (2, 5, 9),\n \"api\": 39685,\n \"location\": \"View3D > Add > Mesh > Pipe Joints\",\n \"description\": \"Add different types of pipe joints\",\n \"warning\": \"\",\n \"wiki_url\": \"http://wiki.blender.org/index.php/Extensions:2.5/Py/\"\\\n \"Scripts/Add_Mesh/Add_Pipe_Joints\",\n \"tracker_url\": \"https://projects.blender.org/tracker/index.php?\"\\\n \"func=detail&aid=21443\",\n \"category\": \"Add Mesh\"}\n\nimport bpy\nfrom math import *\nfrom bpy.props import *\n\n\n# Create a new mesh (object) from verts/edges/faces.\n# verts/edges/faces ... List of vertices/edges/faces for the\n# new mesh (as used in from_pydata).\n# name ... Name of the new mesh (& object).\ndef create_mesh_object(context, verts, edges, faces, name):\n # Create new mesh\n mesh = bpy.data.meshes.new(name)\n\n # Make a mesh from a list of verts/edges/faces.\n mesh.from_pydata(verts, edges, faces)\n\n # Update mesh geometry after adding stuff.\n mesh.update()\n\n from bpy_extras import object_utils\n return object_utils.object_data_add(context, mesh, operator=None)\n\n# A very simple \"bridge\" tool.\n# Connects two equally long vertex rows with faces.\n# Returns a list of the new faces (list of lists)\n#\n# vertIdx1 ... First vertex list (list of vertex indices).\n# vertIdx2 ... Second vertex list (list of vertex indices).\n# closed ... Creates a loop (first & last are closed).\n# flipped ... Invert the normal of the face(s).\n#\n# Note: You can set vertIdx1 to a single vertex index to create\n# a fan/star of faces.\n# Note: If both vertex idx list are the same length they have\n# to have at least 2 vertices.\ndef createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):\n faces = []\n\n if not vertIdx1 or not vertIdx2:\n return None\n\n if len(vertIdx1) < 2 and len(vertIdx2) < 2:\n return None\n\n fan = False\n if (len(vertIdx1) != len(vertIdx2)):\n if (len(vertIdx1) == 1 and len(vertIdx2) > 1):\n fan = True\n else:\n return None\n\n total = len(vertIdx2)\n\n if closed:\n # Bridge the start with the end.\n if flipped:\n face = [\n vertIdx1[0],\n vertIdx2[0],\n vertIdx2[total - 1]]\n if not fan:\n face.append(vertIdx1[total - 1])\n faces.append(face)\n\n else:\n face = [vertIdx2[0], vertIdx1[0]]\n if not fan:\n face.append(vertIdx1[total - 1])\n face.append(vertIdx2[total - 1])\n faces.append(face)\n\n # Bridge the rest of the faces.\n for num in range(total - 1):\n if flipped:\n if fan:\n face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]\n else:\n face = [vertIdx2[num], vertIdx1[num],\n vertIdx1[num + 1], vertIdx2[num + 1]]\n faces.append(face)\n else:\n if fan:\n face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]\n else:\n face = [vertIdx1[num], vertIdx2[num],\n vertIdx2[num + 1], vertIdx1[num + 1]]\n faces.append(face)\n\n return faces\n\n\nclass AddElbowJoint(bpy.types.Operator):\n # Create the vertices and polygons for a simple elbow (bent pipe).\n '''Add an Elbow pipe mesh'''\n bl_idname = \"mesh.primitive_elbow_joint_add\"\n bl_label = \"Add Pipe Elbow\"\n bl_options = {'REGISTER', 'UNDO'}\n\n radius = FloatProperty(name=\"Radius\",\n description=\"The radius of the pipe\",\n default=1.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n div = IntProperty(name=\"Divisions\",\n description=\"Number of vertices (divisions)\",\n default=32, min=3, max=256)\n\n angle = FloatProperty(name=\"Angle\",\n description=\"The angle of the branching pipe (i.e. the 'arm' - \" \\\n \"Measured from the center line of the main pipe\",\n default=radians(45.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n\n startLength = FloatProperty(name=\"Length Start\",\n description=\"Length of the beginning of the pipe\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n endLength = FloatProperty(name=\"End Length\",\n description=\"Length of the end of the pipe\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n\n def execute(self, context):\n\n radius = self.radius\n div = self.div\n\n angle = self.angle\n\n startLength = self.startLength\n endLength = self.endLength\n\n verts = []\n faces = []\n\n loop1 = [] # The starting circle\n loop2 = [] # The elbow circle\n loop3 = [] # The end circle\n\n # Create start circle\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = -startLength\n loop1.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n # Create deformed joint circle\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = locX * tan(angle / 2.0)\n loop2.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n # Create end circle\n baseEndLocX = -endLength * sin(angle)\n baseEndLocZ = endLength * cos(angle)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 - angle)\n locX = locX * sin(pi / 2.0 - angle)\n\n loop3.append(len(verts))\n # Translate and add circle vertices to the list.\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create faces\n faces.extend(createFaces(loop1, loop2, closed=True))\n faces.extend(createFaces(loop2, loop3, closed=True))\n\n base = create_mesh_object(context, verts, [], faces, \"Elbow Joint\")\n\n return {'FINISHED'}\n\n\nclass AddTeeJoint(bpy.types.Operator):\n # Create the vertices and polygons for a simple tee (T) joint.\n # The base arm of the T can be positioned in an angle if needed though.\n '''Add a Tee-Joint mesh'''\n bl_idname = \"mesh.primitive_tee_joint_add\"\n bl_label = \"Add Pipe Tee-Joint\"\n bl_options = {'REGISTER', 'UNDO'}\n\n radius = FloatProperty(name=\"Radius\",\n description=\"The radius of the pipe\",\n default=1.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n div = IntProperty(name=\"Divisions\",\n description=\"Number of vertices (divisions)\",\n default=32,\n min=4,\n max=256)\n\n angle = FloatProperty(name=\"Angle\",\n description=\"The angle of the branching pipe (i.e. the 'arm' - \" \\\n \"Measured from the center line of the main pipe\",\n default=radians(90.0),\n min=radians(0.1),\n max=radians(179.9),\n unit=\"ROTATION\")\n\n startLength = FloatProperty(name=\"Length Start\",\n description=\"Length of the beginning of the\" \\\n \" main pipe (the straight one)\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n endLength = FloatProperty(name=\"End Length\",\n description=\"Length of the end of the\" \\\n \" main pipe (the straight one)\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branchLength = FloatProperty(name=\"Arm Length\",\n description=\"Length of the arm pipe (the bent one)\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n\n def execute(self, context):\n\n radius = self.radius\n div = self.div\n\n angle = self.angle\n\n startLength = self.startLength\n endLength = self.endLength\n branchLength = self.branchLength\n\n if (div % 2):\n # Odd vertice number not supported (yet).\n return {'CANCELLED'}\n\n verts = []\n faces = []\n\n # List of vert indices of each cross section\n loopMainStart = [] # Vert indices for the\n # beginning of the main pipe.\n loopJoint1 = [] # Vert indices for joint that is used\n # to connect the joint & loopMainStart.\n loopJoint2 = [] # Vert indices for joint that is used\n # to connect the joint & loopArm.\n loopJoint3 = [] # Vert index for joint that is used\n # to connect the joint & loopMainEnd.\n loopArm = [] # Vert indices for the end of the arm.\n loopMainEnd = [] # Vert indices for the\n # end of the main pipe.\n\n # Create start circle (main pipe)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = -startLength\n loopMainStart.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n # Create deformed joint circle\n vertTemp1 = None\n vertTemp2 = None\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n\n if vertIdx == 0:\n vertTemp1 = len(verts)\n if vertIdx == div / 2:\n # @todo: This will possibly break if we\n # ever support odd divisions.\n vertTemp2 = len(verts)\n\n loopJoint1.append(len(verts))\n if (vertIdx < div / 2):\n # Straight side of main pipe.\n locZ = 0\n loopJoint3.append(len(verts))\n else:\n # Branching side\n locZ = locX * tan(angle / 2.0)\n loopJoint2.append(len(verts))\n\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n # Create 2. deformed joint (half-)circle\n loopTemp = []\n for vertIdx in range(div):\n if (vertIdx > div / 2):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = -cos(curVertAngle)\n locZ = -(radius * locX * tan((pi - angle) / 2.0))\n loopTemp.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n loopTemp2 = loopTemp[:]\n\n # Finalise 2. loop\n loopTemp.reverse()\n loopTemp.append(vertTemp1)\n loopJoint2.reverse()\n loopJoint2.extend(loopTemp)\n loopJoint2.reverse()\n\n # Finalise 3. loop\n loopTemp2.append(vertTemp2)\n loopTemp2.reverse()\n loopJoint3.extend(loopTemp2)\n\n # Create end circle (branching pipe)\n baseEndLocX = -branchLength * sin(angle)\n baseEndLocZ = branchLength * cos(angle)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 - angle)\n locX = locX * sin(pi / 2.0 - angle)\n\n loopArm.append(len(verts))\n\n # Add translated circle.\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create end circle (main pipe)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = endLength\n loopMainEnd.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n # Create faces\n faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))\n faces.extend(createFaces(loopJoint2, loopArm, closed=True))\n faces.extend(createFaces(loopJoint3, loopMainEnd, closed=True))\n\n base = create_mesh_object(context, verts, [], faces, \"Tee Joint\")\n\n return {'FINISHED'}\n\n\nclass AddWyeJoint(bpy.types.Operator):\n '''Add a Wye-Joint mesh'''\n bl_idname = \"mesh.primitive_wye_joint_add\"\n bl_label = \"Add Pipe Wye-Joint\"\n bl_options = {'REGISTER', 'UNDO'}\n\n radius = FloatProperty(name=\"Radius\",\n description=\"The radius of the pipe\",\n default=1.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n div = IntProperty(name=\"Divisions\",\n description=\"Number of vertices (divisions)\",\n default=32,\n min=4,\n max=256)\n\n angle1 = FloatProperty(name=\"Angle 1\",\n description=\"The angle of the 1. branching pipe \" \\\n \"(measured from the center line of the main pipe)\",\n default=radians(45.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n angle2 = FloatProperty(name=\"Angle 2\",\n description=\"The angle of the 2. branching pipe \" \\\n \"(measured from the center line of the main pipe) \",\n default=radians(45.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n\n startLength = FloatProperty(name=\"Length Start\",\n description=\"Length of the beginning of the\" \\\n \" main pipe (the straight one)\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branch1Length = FloatProperty(name=\"Length Arm 1\",\n description=\"Length of the 1. arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branch2Length = FloatProperty(name=\"Length Arm 2\",\n description=\"Length of the 2. arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n\n def execute(self, context):\n\n radius = self.radius\n div = self.div\n\n angle1 = self.angle1\n angle2 = self.angle2\n\n startLength = self.startLength\n branch1Length = self.branch1Length\n branch2Length = self.branch2Length\n\n if (div % 2):\n # Odd vertice number not supported (yet).\n return {'CANCELLED'}\n\n verts = []\n faces = []\n\n # List of vert indices of each cross section\n loopMainStart = [] # Vert indices for\n # the beginning of the main pipe.\n loopJoint1 = [] # Vert index for joint that is used\n # to connect the joint & loopMainStart.\n loopJoint2 = [] # Vert index for joint that\n # is used to connect the joint & loopArm1.\n loopJoint3 = [] # Vert index for joint that is\n # used to connect the joint & loopArm2.\n loopArm1 = [] # Vert idxs for end of the 1. arm.\n loopArm2 = [] # Vert idxs for end of the 2. arm.\n\n # Create start circle\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = -startLength\n loopMainStart.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n # Create deformed joint circle\n vertTemp1 = None\n vertTemp2 = None\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n\n if vertIdx == 0:\n vertTemp2 = len(verts)\n if vertIdx == div / 2:\n # @todo: This will possibly break if we\n # ever support odd divisions.\n vertTemp1 = len(verts)\n\n loopJoint1.append(len(verts))\n if (vertIdx > div / 2):\n locZ = locX * tan(angle1 / 2.0)\n loopJoint2.append(len(verts))\n else:\n locZ = locX * tan(-angle2 / 2.0)\n loopJoint3.append(len(verts))\n\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n # Create 2. deformed joint (half-)circle\n loopTemp = []\n angleJoint = (angle2 - angle1) / 2.0\n for vertIdx in range(div):\n if (vertIdx > div / 2):\n curVertAngle = vertIdx * (2.0 * pi / div)\n\n locX = (-sin(curVertAngle) * sin(angleJoint)\n / sin(angle2 - angleJoint))\n locY = -cos(curVertAngle)\n locZ = (-(sin(curVertAngle) * cos(angleJoint)\n / sin(angle2 - angleJoint)))\n\n loopTemp.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n loopTemp2 = loopTemp[:]\n\n # Finalise 2. loop\n loopTemp.append(vertTemp1)\n loopTemp.reverse()\n loopTemp.append(vertTemp2)\n loopJoint2.reverse()\n loopJoint2.extend(loopTemp)\n loopJoint2.reverse()\n\n # Finalise 3. loop\n loopTemp2.reverse()\n loopJoint3.extend(loopTemp2)\n\n # Create end circle (1. branching pipe)\n baseEndLocX = -branch1Length * sin(angle1)\n baseEndLocZ = branch1Length * cos(angle1)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 - angle1)\n locX = locX * sin(pi / 2.0 - angle1)\n\n loopArm1.append(len(verts))\n # Add translated circle.\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create end circle (2. branching pipe)\n baseEndLocX = branch2Length * sin(angle2)\n baseEndLocZ = branch2Length * cos(angle2)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 + angle2)\n locX = locX * sin(pi / 2.0 + angle2)\n\n loopArm2.append(len(verts))\n # Add translated circle\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create faces\n faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))\n faces.extend(createFaces(loopJoint2, loopArm1, closed=True))\n faces.extend(createFaces(loopJoint3, loopArm2, closed=True))\n\n base = create_mesh_object(context, verts, [], faces, \"Wye Joint\")\n\n return {'FINISHED'}\n\n\nclass AddCrossJoint(bpy.types.Operator):\n '''Add a Cross-Joint mesh'''\n # Create the vertices and polygons for a coss (+ or X) pipe joint.\n bl_idname = \"mesh.primitive_cross_joint_add\"\n bl_label = \"Add Pipe Cross-Joint\"\n bl_options = {'REGISTER', 'UNDO'}\n\n radius = FloatProperty(name=\"Radius\",\n description=\"The radius of the pipe\",\n default=1.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n div = IntProperty(name=\"Divisions\",\n description=\"Number of vertices (divisions)\",\n default=32,\n min=4,\n max=256)\n\n angle1 = FloatProperty(name=\"Angle 1\",\n description=\"The angle of the 1. arm (from the main axis)\",\n default=radians(90.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n angle2 = FloatProperty(name=\"Angle 2\",\n description=\"The angle of the 2. arm (from the main axis)\",\n default=radians(90.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n angle3 = FloatProperty(name=\"Angle 3 (center)\",\n description=\"The angle of the center arm (from the main axis)\",\n default=radians(0.0),\n min=radians(-179.9),\n max=radians(179.9),\n unit=\"ROTATION\")\n\n startLength = FloatProperty(name=\"Length Start\",\n description=\"Length of the beginning of the \" \\\n \"main pipe (the straight one)\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branch1Length = FloatProperty(name=\"Length Arm 1\",\n description=\"Length of the 1. arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branch2Length = FloatProperty(name=\"Length Arm 2\",\n description=\"Length of the 2. arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n branch3Length = FloatProperty(name=\"Length Arm 3 (center)\",\n description=\"Length of the center arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n\n def execute(self, context):\n\n radius = self.radius\n div = self.div\n\n angle1 = self.angle1\n angle2 = self.angle2\n angle3 = self.angle3\n\n startLength = self.startLength\n branch1Length = self.branch1Length\n branch2Length = self.branch2Length\n branch3Length = self.branch3Length\n if (div % 2):\n # Odd vertice number not supported (yet).\n return {'CANCELLED'}\n\n verts = []\n faces = []\n\n # List of vert indices of each cross section\n loopMainStart = [] # Vert indices for the\n # beginning of the main pipe.\n loopJoint1 = [] # Vert index for joint that is used\n # to connect the joint & loopMainStart.\n loopJoint2 = [] # Vert index for joint that is used\n # to connect the joint & loopArm1.\n loopJoint3 = [] # Vert index for joint that is used\n # to connect the joint & loopArm2.\n loopJoint4 = [] # Vert index for joint that is used\n # to connect the joint & loopArm3.\n loopArm1 = [] # Vert idxs for the end of the 1. arm.\n loopArm2 = [] # Vert idxs for the end of the 2. arm.\n loopArm3 = [] # Vert idxs for the center arm end.\n\n # Create start circle\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n locZ = -startLength\n loopMainStart.append(len(verts))\n verts.append([locX * radius, locY * radius, locZ])\n\n # Create 1. deformed joint circle\n vertTemp1 = None\n vertTemp2 = None\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n\n if vertIdx == 0:\n vertTemp2 = len(verts)\n if vertIdx == div / 2:\n # @todo: This will possibly break if we\n # ever support odd divisions.\n vertTemp1 = len(verts)\n\n loopJoint1.append(len(verts))\n if (vertIdx > div / 2):\n locZ = locX * tan(angle1 / 2.0)\n loopJoint2.append(len(verts))\n else:\n locZ = locX * tan(-angle2 / 2.0)\n loopJoint3.append(len(verts))\n\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n # loopTemp2 = loopJoint2[:] # UNUSED\n\n # Create 2. deformed joint circle\n loopTempA = []\n loopTempB = []\n angleJoint1 = (angle1 - angle3) / 2.0\n angleJoint2 = (angle2 + angle3) / 2.0\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n\n # Skip pole vertices\n # @todo: This will possibly break if\n # we ever support odd divisions.\n if not (vertIdx == 0) and not (vertIdx == div / 2):\n\n if (vertIdx > div / 2):\n angleJoint = angleJoint1\n angle = angle1\n Z = -1.0\n loopTempA.append(len(verts))\n\n else:\n angleJoint = angleJoint2\n angle = angle2\n Z = 1.0\n loopTempB.append(len(verts))\n\n locX = (sin(curVertAngle) * sin(angleJoint)\n / sin(angle - angleJoint))\n locY = -cos(curVertAngle)\n locZ = (Z * (sin(curVertAngle) * cos(angleJoint)\n / sin(angle - angleJoint)))\n\n verts.append([locX * radius, locY * radius, locZ * radius])\n\n loopTempA2 = loopTempA[:]\n loopTempB2 = loopTempB[:]\n loopTempB3 = loopTempB[:]\n\n # Finalise 2. loop\n loopTempA.append(vertTemp1)\n loopTempA.reverse()\n loopTempA.append(vertTemp2)\n loopJoint2.reverse()\n loopJoint2.extend(loopTempA)\n loopJoint2.reverse()\n\n # Finalise 3. loop\n loopJoint3.extend(loopTempB3)\n\n # Finalise 4. loop\n loopTempA2.append(vertTemp1)\n loopTempA2.reverse()\n loopTempB2.append(vertTemp2)\n loopJoint4.extend(reversed(loopTempB2))\n loopJoint4.extend(loopTempA2)\n\n # Create end circle (1. branching pipe)\n baseEndLocX = -branch1Length * sin(angle1)\n baseEndLocZ = branch1Length * cos(angle1)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 - angle1)\n locX = locX * sin(pi / 2.0 - angle1)\n\n loopArm1.append(len(verts))\n # Add translated circle.\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create end circle (2. branching pipe)\n baseEndLocX = branch2Length * sin(angle2)\n baseEndLocZ = branch2Length * cos(angle2)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 + angle2)\n locX = locX * sin(pi / 2.0 + angle2)\n\n loopArm2.append(len(verts))\n # Add translated circle\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create end circle (center pipe)\n baseEndLocX = branch3Length * sin(angle3)\n baseEndLocZ = branch3Length * cos(angle3)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 + angle3)\n locX = locX * sin(pi / 2.0 + angle3)\n\n loopArm3.append(len(verts))\n # Add translated circle\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n # Create faces\n faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))\n faces.extend(createFaces(loopJoint2, loopArm1, closed=True))\n faces.extend(createFaces(loopJoint3, loopArm2, closed=True))\n faces.extend(createFaces(loopJoint4, loopArm3, closed=True))\n\n base = create_mesh_object(context, verts, [], faces, \"Cross Joint\")\n\n return {'FINISHED'}\n\n\nclass AddNJoint(bpy.types.Operator):\n '''Add a N-Joint mesh'''\n # Create the vertices and polygons for a regular n-joint.\n bl_idname = \"mesh.primitive_n_joint_add\"\n bl_label = \"Add Pipe N-Joint\"\n bl_options = {'REGISTER', 'UNDO'}\n\n radius = FloatProperty(name=\"Radius\",\n description=\"The radius of the pipe\",\n default=1.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n div = IntProperty(name=\"Divisions\",\n description=\"Number of vertices (divisions)\",\n default=32,\n min=4,\n max=256)\n number = IntProperty(name=\"Arms/Joints\",\n description=\"Number of joints/arms\",\n default=5,\n min=2,\n max=99999)\n length = FloatProperty(name=\"Length\",\n description=\"Length of each joint/arm\",\n default=3.0,\n min=0.01,\n max=100.0,\n unit=\"LENGTH\")\n\n def execute(self, context):\n radius = self.radius\n div = self.div\n number = self.number\n length = self.length\n\n if (div % 2):\n # Odd vertice number not supported (yet).\n return {'CANCELLED'}\n\n if (number < 2):\n return {'CANCELLED'}\n\n verts = []\n faces = []\n\n loopsEndCircles = []\n loopsJointsTemp = []\n loopsJoints = []\n\n vertTemp1 = None\n vertTemp2 = None\n\n angleDiv = (2.0 * pi / number)\n\n # Create vertices for the end circles.\n for num in range(number):\n circle = []\n # Create start circle\n angle = num * angleDiv\n\n baseEndLocX = length * sin(angle)\n baseEndLocZ = length * cos(angle)\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n # Create circle\n locX = sin(curVertAngle) * radius\n locY = cos(curVertAngle) * radius\n locZ = 0.0\n\n # Rotate circle\n locZ = locX * cos(pi / 2.0 + angle)\n locX = locX * sin(pi / 2.0 + angle)\n\n circle.append(len(verts))\n # Add translated circle\n verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])\n\n loopsEndCircles.append(circle)\n\n # Create vertices for the joint circles.\n loopJoint = []\n for vertIdx in range(div):\n curVertAngle = vertIdx * (2.0 * pi / div)\n locX = sin(curVertAngle)\n locY = cos(curVertAngle)\n\n skipVert = False\n # Store pole vertices\n if vertIdx == 0:\n if (num == 0):\n vertTemp2 = len(verts)\n else:\n skipVert = True\n elif vertIdx == div / 2:\n # @todo: This will possibly break if we\n # ever support odd divisions.\n if (num == 0):\n vertTemp1 = len(verts)\n else:\n skipVert = True\n\n if not skipVert:\n if (vertIdx > div / 2):\n locZ = -locX * tan((pi - angleDiv) / 2.0)\n loopJoint.append(len(verts))\n\n # Rotate the vert\n cosAng = cos(-angle)\n sinAng = sin(-angle)\n LocXnew = locX * cosAng - locZ * sinAng\n LocZnew = locZ * cosAng + locX * sinAng\n locZ = LocZnew\n locX = LocXnew\n\n verts.append([\n locX * radius,\n locY * radius,\n locZ * radius])\n else:\n # These two vertices will only be\n # added the very first time.\n if vertIdx == 0 or vertIdx == div / 2:\n verts.append([locX * radius, locY * radius, locZ])\n\n loopsJointsTemp.append(loopJoint)\n\n # Create complete loops (loopsJoints) out of the\n # double number of half loops in loopsJointsTemp.\n for halfLoopIdx in range(len(loopsJointsTemp)):\n if (halfLoopIdx == len(loopsJointsTemp) - 1):\n idx1 = halfLoopIdx\n idx2 = 0\n else:\n idx1 = halfLoopIdx\n idx2 = halfLoopIdx + 1\n\n loopJoint = []\n loopJoint.append(vertTemp2)\n loopJoint.extend(reversed(loopsJointsTemp[idx2]))\n loopJoint.append(vertTemp1)\n loopJoint.extend(loopsJointsTemp[idx1])\n\n loopsJoints.append(loopJoint)\n\n # Create faces from the two\n # loop arrays (loopsJoints -> loopsEndCircles).\n for loopIdx in range(len(loopsEndCircles)):\n faces.extend(\n createFaces(loopsJoints[loopIdx],\n loopsEndCircles[loopIdx], closed=True))\n\n base = create_mesh_object(context, verts, [], faces, \"N Joint\")\n\n return {'FINISHED'}\n\n\nclass INFO_MT_mesh_pipe_joints_add(bpy.types.Menu):\n # Define the \"Pipe Joints\" menu\n bl_idname = \"INFO_MT_mesh_pipe_joints_add\"\n bl_label = \"Pipe Joints\"\n\n def draw(self, context):\n layout = self.layout\n layout.operator_context = 'INVOKE_REGION_WIN'\n layout.operator(\"mesh.primitive_elbow_joint_add\",\n text=\"Pipe Elbow\")\n layout.operator(\"mesh.primitive_tee_joint_add\",\n text=\"Pipe T-Joint\")\n layout.operator(\"mesh.primitive_wye_joint_add\",\n text=\"Pipe Y-Joint\")\n layout.operator(\"mesh.primitive_cross_joint_add\",\n text=\"Pipe Cross-Joint\")\n layout.operator(\"mesh.primitive_n_joint_add\",\n text=\"Pipe N-Joint\")\n\n################################\n\n\n# Define \"Pipe Joints\" menu\ndef menu_func(self, context):\n self.layout.menu(\"INFO_MT_mesh_pipe_joints_add\", icon=\"PLUGIN\")\n\n\ndef register():\n bpy.utils.register_module(__name__)\n\n # Add \"Pipe Joints\" menu to the \"Add Mesh\" menu\n bpy.types.INFO_MT_mesh_add.append(menu_func)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n # Remove \"Pipe Joints\" menu from the \"Add Mesh\" menu.\n bpy.types.INFO_MT_mesh_add.remove(menu_func)\n\n\nif __name__ == \"__main__\":\n register()\n", "repo_name": "damiles/blendocv", "sub_path": "release/scripts/addons/add_mesh_pipe_joint.py", "file_name": "add_mesh_pipe_joint.py", "file_ext": "py", "file_size_in_byte": 35356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bpy.data.meshes.new", "line_number": 45, "usage_type": "call"}, {"api_name": "bpy.data", "line_number": 45, "usage_type": "attribute"}, {"api_name": "bpy_extras.object_utils.object_data_add", "line_number": 54, "usage_type": "call"}, {"api_name": "bpy_extras.object_utils", "line_number": 54, "usage_type": "name"}, {"api_name": "bpy.types", "line_number": 125, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 225, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 404, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 598, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 853, "usage_type": "attribute"}, {"api_name": "bpy.types", "line_number": 1010, "usage_type": "attribute"}, {"api_name": "bpy.utils.register_module", "line_number": 1038, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 1038, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_mesh_add.append", "line_number": 1041, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 1041, "usage_type": "attribute"}, {"api_name": "bpy.utils.unregister_module", "line_number": 1045, "usage_type": "call"}, {"api_name": "bpy.utils", "line_number": 1045, "usage_type": "attribute"}, {"api_name": "bpy.types.INFO_MT_mesh_add.remove", "line_number": 1048, "usage_type": "call"}, {"api_name": "bpy.types", "line_number": 1048, "usage_type": "attribute"}]} +{"seq_id": "32605948504", "text": "from __future__ import (\n absolute_import,\n division,\n print_function,\n)\n\nimport base64\nimport io\nimport json\nimport os\n\ntry:\n # python 2\n from pipes import quote as shell_quote\nexcept ImportError:\n # python 3\n from shlex import quote as shell_quote\nimport re\nimport signal\nimport subprocess\nimport sys\ntry:\n # python2\n from urllib import urlencode as urllib_urlencode\nexcept ImportError:\n # python3\n from urllib.parse import urlencode as urllib_urlencode\n\nfrom pcs import settings\nfrom pcs.common import pcs_pycurl as pycurl\nfrom pcs.common.tools import (\n join_multilines,\n simple_cache,\n)\nfrom pcs.lib import reports\nfrom pcs.lib.errors import LibraryError, ReportItemSeverity\n\n\n\n_chkconfig = settings.chkconfig_binary\n_service = settings.service_binary\n_systemctl = settings.systemctl_binary\n\nclass ManageServiceError(Exception):\n #pylint: disable=super-init-not-called\n def __init__(self, service, message=None, instance=None):\n self.service = service\n self.message = message\n self.instance = instance\n\nclass DisableServiceError(ManageServiceError):\n pass\n\nclass EnableServiceError(ManageServiceError):\n pass\n\nclass StartServiceError(ManageServiceError):\n pass\n\nclass StopServiceError(ManageServiceError):\n pass\n\nclass KillServicesError(ManageServiceError):\n pass\n\n\ndef is_dir_nonempty(path):\n if not os.path.exists(path):\n return False\n if not os.path.isdir(path):\n return True\n return len(os.listdir(path)) > 0\n\n\ndef _get_service_name(service, instance=None):\n return \"{0}{1}.service\".format(\n service, \"\" if instance is None else \"@{0}\".format(instance)\n )\n\ndef ensure_is_systemd():\n \"\"\"\n Ensure if current system is systemd system. Raises Library error if not.\n \"\"\"\n if not is_systemctl():\n raise LibraryError(\n reports.unsupported_operation_on_non_systemd_systems()\n )\n\n\n\n@simple_cache\ndef is_systemctl():\n \"\"\"\n Check whenever is local system running on systemd.\n Returns True if current system is systemctl compatible, False otherwise.\n \"\"\"\n systemd_paths = [\n '/run/systemd/system',\n '/var/run/systemd/system',\n ]\n for path in systemd_paths:\n if os.path.isdir(path):\n return True\n return False\n\n\ndef disable_service(runner, service, instance=None):\n \"\"\"\n Disable specified service in local system.\n Raise DisableServiceError or LibraryError on failure.\n\n runner -- CommandRunner\n service -- name of service\n instance -- instance name, it ha no effect on not systemd systems.\n If None no instance name will be used.\n \"\"\"\n if not is_service_installed(runner, service, instance):\n return\n if is_systemctl():\n stdout, stderr, retval = runner.run([\n _systemctl, \"disable\", _get_service_name(service, instance)\n ])\n else:\n stdout, stderr, retval = runner.run([_chkconfig, service, \"off\"])\n if retval != 0:\n raise DisableServiceError(\n service,\n join_multilines([stderr, stdout]),\n instance\n )\n\n\ndef enable_service(runner, service, instance=None):\n \"\"\"\n Enable specified service in local system.\n Raise EnableServiceError or LibraryError on failure.\n\n runner -- CommandRunner\n service -- name of service\n instance -- instance name, it ha no effect on not systemd systems.\n If None no instance name will be used.\n \"\"\"\n if is_systemctl():\n stdout, stderr, retval = runner.run([\n _systemctl, \"enable\", _get_service_name(service, instance)\n ])\n else:\n stdout, stderr, retval = runner.run([_chkconfig, service, \"on\"])\n if retval != 0:\n raise EnableServiceError(\n service,\n join_multilines([stderr, stdout]),\n instance\n )\n\n\ndef start_service(runner, service, instance=None):\n \"\"\"\n Start specified service in local system\n CommandRunner runner\n string service service name\n string instance instance name, it ha no effect on not systemd systems.\n If None no instance name will be used.\n \"\"\"\n if is_systemctl():\n stdout, stderr, retval = runner.run([\n _systemctl, \"start\", _get_service_name(service, instance)\n ])\n else:\n stdout, stderr, retval = runner.run([_service, service, \"start\"])\n if retval != 0:\n raise StartServiceError(\n service,\n join_multilines([stderr, stdout]),\n instance\n )\n\n\ndef stop_service(runner, service, instance=None):\n \"\"\"\n Stop specified service in local system\n CommandRunner runner\n string service service name\n string instance instance name, it ha no effect on not systemd systems.\n If None no instance name will be used.\n \"\"\"\n if is_systemctl():\n stdout, stderr, retval = runner.run([\n _systemctl, \"stop\", _get_service_name(service, instance)\n ])\n else:\n stdout, stderr, retval = runner.run([_service, service, \"stop\"])\n if retval != 0:\n raise StopServiceError(\n service,\n join_multilines([stderr, stdout]),\n instance\n )\n\n\ndef kill_services(runner, services):\n \"\"\"\n Kill specified services in local system\n CommandRunner runner\n iterable services service names\n \"\"\"\n # make killall not report that a process is not running\n stdout, stderr, retval = runner.run(\n [\"killall\", \"--quiet\", \"--signal\", \"9\", \"--\"] + list(services)\n )\n # If a process isn't running, killall will still return 1 even with --quiet.\n # We don't consider that an error, so we check for output string as well.\n # If it's empty, no actuall error happened.\n if retval != 0:\n message = join_multilines([stderr, stdout])\n if message:\n raise KillServicesError(list(services), message)\n\n\ndef is_service_enabled(runner, service, instance=None):\n \"\"\"\n Check if specified service is enabled in local system.\n\n runner -- CommandRunner\n service -- name of service\n \"\"\"\n if is_systemctl():\n dummy_stdout, dummy_stderr, retval = runner.run(\n [_systemctl, \"is-enabled\", _get_service_name(service, instance)]\n )\n else:\n dummy_stdout, dummy_stderr, retval = runner.run([_chkconfig, service])\n\n return retval == 0\n\n\ndef is_service_running(runner, service, instance=None):\n \"\"\"\n Check if specified service is currently running on local system.\n\n runner -- CommandRunner\n service -- name of service\n \"\"\"\n if is_systemctl():\n dummy_stdout, dummy_stderr, retval = runner.run([\n _systemctl,\n \"is-active\",\n _get_service_name(service, instance)\n ])\n else:\n dummy_stdout, dummy_stderr, retval = runner.run(\n [_service, service, \"status\"]\n )\n\n return retval == 0\n\n\ndef is_service_installed(runner, service, instance=None):\n \"\"\"\n Check if specified service is installed on local system.\n\n runner -- CommandRunner\n service -- name of service\n instance -- systemd service instance\n \"\"\"\n if not is_systemctl():\n return service in get_non_systemd_services(runner)\n service_name = \"{0}{1}\".format(service, \"\" if instance is None else \"@\")\n return service_name in get_systemd_services(runner)\n\n\ndef get_non_systemd_services(runner):\n \"\"\"\n Returns list of all installed services on non systemd system.\n\n runner -- CommandRunner\n \"\"\"\n if is_systemctl():\n return []\n\n stdout, dummy_stderr, return_code = runner.run([_chkconfig])\n if return_code != 0:\n return []\n\n service_list = []\n for service in stdout.splitlines():\n service = service.split(\" \", 1)[0]\n if service:\n service_list.append(service)\n return service_list\n\n\ndef get_systemd_services(runner):\n \"\"\"\n Returns list of all systemd services installed on local system.\n\n runner -- CommandRunner\n \"\"\"\n if not is_systemctl():\n return []\n\n stdout, dummy_stderr, return_code = runner.run([\n _systemctl, \"list-unit-files\", \"--full\"\n ])\n if return_code != 0:\n return []\n\n service_list = []\n for service in stdout.splitlines():\n match = re.search(r'^([\\S]*)\\.service', service)\n if match:\n service_list.append(match.group(1))\n return service_list\n\n\ndef is_cman_cluster(runner):\n \"\"\"\n Detect if underlaying locally installed cluster is CMAN based\n \"\"\"\n # Checking corosync version works in most cases and supports non-rhel\n # distributions as well as running (manually compiled) corosync2 on rhel6.\n # - corosync2 does not support cman at all\n # - corosync1 runs with cman on rhel6\n # - corosync1 can be used without cman, but we don't support it anyways\n # - corosync2 is the default result if errors occur\n stdout, dummy_stderr, retval = runner.run([\n os.path.join(settings.corosync_binaries, \"corosync\"),\n \"-v\"\n ])\n if retval != 0:\n return False\n match = re.search(r\"version\\D+(\\d+)\", stdout)\n return match is not None and match.group(1) == \"1\"\n\n\ndef is_proxy_set(env_dict):\n \"\"\"\n Returns True whenever any of proxy environment variables (https_proxy,\n HTTPS_PROXY, all_proxy, ALL_PROXY) are set in env_dict. False otherwise.\n\n env_dict -- environment variables in dict\n \"\"\"\n proxy_list = [\"https_proxy\", \"all_proxy\"]\n for var in proxy_list + [v.upper() for v in proxy_list]:\n if env_dict.get(var, \"\") != \"\":\n return True\n return False\n\n\nclass CommandRunner(object):\n def __init__(self, logger, reporter, env_vars=None):\n self._logger = logger\n self._reporter = reporter\n # Reset environment variables by empty dict is desired here. We need\n # to get rid of defaults - we do not know the context and environment\n # where the library runs. We also get rid of PATH settings, so all\n # executables must be specified with full path unless the PATH variable\n # is set from outside.\n self._env_vars = env_vars if env_vars else dict()\n self._python2 = sys.version[0] == \"2\"\n\n @property\n def env_vars(self):\n return self._env_vars.copy()\n\n def run(\n self, args, stdin_string=None, env_extend=None, binary_output=False\n ):\n # Allow overriding default settings. If a piece of code really wants to\n # set own PATH or CIB_file, we must allow it. I.e. it wants to run\n # a pacemaker tool on a CIB in a file but cannot afford the risk of\n # changing the CIB in the file specified by the user.\n env_vars = self._env_vars.copy()\n env_vars.update(\n dict(env_extend) if env_extend else dict()\n )\n\n log_args = \" \".join([shell_quote(x) for x in args])\n self._logger.debug(\n \"Running: {args}\\nEnvironment:{env_vars}{stdin_string}\".format(\n args=log_args,\n stdin_string=(\"\" if not stdin_string else (\n \"\\n--Debug Input Start--\\n{0}\\n--Debug Input End--\"\n .format(stdin_string)\n )),\n env_vars=(\"\" if not env_vars else (\n \"\\n\" + \"\\n\".join([\n \" {0}={1}\".format(key, val)\n for key, val in sorted(env_vars.items())\n ])\n ))\n )\n )\n self._reporter.process(\n reports.run_external_process_started(\n log_args, stdin_string, env_vars\n )\n )\n\n try:\n process = subprocess.Popen(\n args,\n # Some commands react differently if they get anything via stdin\n stdin=(subprocess.PIPE if stdin_string is not None else None),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n preexec_fn=(\n lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)\n ),\n close_fds=True,\n shell=False,\n env=env_vars,\n # decodes newlines and in python3 also converts bytes to str\n universal_newlines=(not self._python2 and not binary_output)\n )\n out_std, out_err = process.communicate(stdin_string)\n retval = process.returncode\n except OSError as e:\n raise LibraryError(\n reports.run_external_process_error(log_args, e.strerror)\n )\n\n self._logger.debug(\n (\n \"Finished running: {args}\\nReturn value: {retval}\"\n + \"\\n--Debug Stdout Start--\\n{out_std}\\n--Debug Stdout End--\"\n + \"\\n--Debug Stderr Start--\\n{out_err}\\n--Debug Stderr End--\"\n ).format(\n args=log_args,\n retval=retval,\n out_std=out_std,\n out_err=out_err\n )\n )\n self._reporter.process(reports.run_external_process_finished(\n log_args, retval, out_std, out_err\n ))\n return out_std, out_err, retval\n\n\n# deprecated\nclass NodeCommunicationException(Exception):\n # pylint: disable=super-init-not-called\n def __init__(self, node, command, reason):\n self.node = node\n self.command = command\n self.reason = reason\n\n\n# deprecated\nclass NodeConnectionException(NodeCommunicationException):\n pass\n\n\n# deprecated\nclass NodeAuthenticationException(NodeCommunicationException):\n pass\n\n\n# deprecated\nclass NodePermissionDeniedException(NodeCommunicationException):\n pass\n\n# deprecated\nclass NodeCommandUnsuccessfulException(NodeCommunicationException):\n pass\n\n# deprecated\nclass NodeUnsupportedCommandException(NodeCommunicationException):\n pass\n\n\n# deprecated\nclass NodeConnectionTimedOutException(NodeCommunicationException):\n pass\n\n\n# deprecated\ndef node_communicator_exception_to_report_item(\n e, severity=ReportItemSeverity.ERROR, forceable=None\n):\n \"\"\"\n Transform NodeCommunicationException to ReportItem\n \"\"\"\n if isinstance(e, NodeCommandUnsuccessfulException):\n return reports.node_communication_command_unsuccessful(\n e.node,\n e.command,\n e.reason\n )\n exception_to_report = {\n NodeAuthenticationException:\n reports.node_communication_error_not_authorized,\n NodePermissionDeniedException:\n reports.node_communication_error_permission_denied,\n NodeUnsupportedCommandException:\n reports.node_communication_error_unsupported_command,\n NodeCommunicationException:\n reports.node_communication_error_other_error,\n NodeConnectionException:\n reports.node_communication_error_unable_to_connect,\n NodeConnectionTimedOutException:\n reports.node_communication_error_timed_out,\n }\n if e.__class__ in exception_to_report:\n return exception_to_report[e.__class__](\n e.node,\n e.command,\n e.reason,\n severity,\n forceable\n )\n raise e\n\n# deprecated, use pcs.common.node_communicator.Communicator\nclass NodeCommunicator(object):\n \"\"\"\n Sends requests to nodes\n \"\"\"\n\n @classmethod\n def format_data_dict(cls, data):\n \"\"\"\n Encode data for transport (only plain dict is supported)\n \"\"\"\n return urllib_urlencode(data)\n\n @classmethod\n def format_data_json(cls, data):\n \"\"\"\n Encode data for transport (more complex data than in format_data_dict)\n \"\"\"\n return json.dumps(data)\n\n def __init__(\n self, logger, reporter, auth_tokens, user=None, groups=None,\n request_timeout=None\n ):\n \"\"\"\n auth_tokens authorization tokens for nodes: {node: token}\n user username\n groups groups the user is member of\n request_timeout -- positive integer, time for one reqest in seconds\n \"\"\"\n self._logger = logger\n self._reporter = reporter\n self._auth_tokens = auth_tokens\n self._user = user\n self._groups = groups\n self._request_timeout = request_timeout\n\n @property\n def request_timeout(self):\n return (\n settings.default_request_timeout\n if self._request_timeout is None\n else self._request_timeout\n )\n\n def call_node(self, node_addr, request, data, request_timeout=None):\n \"\"\"\n Send a request to a node\n node_addr destination node, instance of NodeAddresses\n request command to be run on the node\n data command parameters, encoded by format_data_* method\n \"\"\"\n return self.call_host(node_addr.ring0, request, data, request_timeout)\n\n def call_host(self, host, request, data, request_timeout=None):\n \"\"\"\n Send a request to a host\n host host address\n request command to be run on the host\n data command parameters, encoded by format_data_* method\n request timeout float timeout for request, if not set object property\n will be used\n \"\"\"\n def __debug_callback(data_type, debug_data):\n prefixes = {\n pycurl.DEBUG_TEXT: b\"* \",\n pycurl.DEBUG_HEADER_IN: b\"< \",\n pycurl.DEBUG_HEADER_OUT: b\"> \",\n pycurl.DEBUG_DATA_IN: b\"<< \",\n pycurl.DEBUG_DATA_OUT: b\">> \",\n }\n if data_type in prefixes:\n debug_output.write(prefixes[data_type])\n debug_output.write(debug_data)\n if not debug_data.endswith(b\"\\n\"):\n debug_output.write(b\"\\n\")\n\n output = io.BytesIO()\n debug_output = io.BytesIO()\n cookies = self.__prepare_cookies(host)\n timeout = (\n request_timeout\n if request_timeout is not None\n else self.request_timeout\n )\n url = \"https://{host}:2224/{request}\".format(\n host=(\"[{0}]\".format(host) if \":\" in host else host),\n request=request\n )\n\n handler = pycurl.Curl()\n handler.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTPS)\n handler.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000))\n handler.setopt(pycurl.URL, url.encode(\"utf-8\"))\n handler.setopt(pycurl.WRITEFUNCTION, output.write)\n handler.setopt(pycurl.VERBOSE, 1)\n handler.setopt(pycurl.DEBUGFUNCTION, __debug_callback)\n handler.setopt(pycurl.SSL_VERIFYHOST, 0)\n handler.setopt(pycurl.SSL_VERIFYPEER, 0)\n handler.setopt(pycurl.NOSIGNAL, 1) # required for multi-threading\n if cookies:\n handler.setopt(pycurl.COOKIE, \";\".join(cookies).encode(\"utf-8\"))\n if data:\n handler.setopt(pycurl.COPYPOSTFIELDS, data.encode(\"utf-8\"))\n\n msg = \"Sending HTTP Request to: {url}\"\n if data:\n msg += \"\\n--Debug Input Start--\\n{data}\\n--Debug Input End--\"\n self._logger.debug(msg.format(url=url, data=data))\n self._reporter.process(\n reports.node_communication_started(url, data)\n )\n result_msg = (\n \"Finished calling: {url}\\nResponse Code: {code}\"\n + \"\\n--Debug Response Start--\\n{response}\\n--Debug Response End--\"\n )\n\n try:\n handler.perform()\n response_data = output.getvalue().decode(\"utf-8\")\n response_code = handler.getinfo(pycurl.RESPONSE_CODE)\n self._logger.debug(result_msg.format(\n url=url,\n code=response_code,\n response=response_data\n ))\n self._reporter.process(reports.node_communication_finished(\n url, response_code, response_data\n ))\n if response_code == 400:\n # old pcsd protocol: error messages are commonly passed in plain\n # text in response body with HTTP code 400\n # we need to be backward compatible with that\n raise NodeCommandUnsuccessfulException(\n host, request, response_data.rstrip()\n )\n elif response_code == 401:\n raise NodeAuthenticationException(\n host, request, \"HTTP error: {0}\".format(response_code)\n )\n elif response_code == 403:\n raise NodePermissionDeniedException(\n host, request, \"HTTP error: {0}\".format(response_code)\n )\n elif response_code == 404:\n raise NodeUnsupportedCommandException(\n host, request, \"HTTP error: {0}\".format(response_code)\n )\n elif response_code >= 400:\n raise NodeCommunicationException(\n host, request, \"HTTP error: {0}\".format(response_code)\n )\n return response_data\n except pycurl.error as e:\n # In pycurl versions lower then 7.19.3 it is not possible to set\n # NOPROXY option. Therefore for the proper support of proxy settings\n # we have to use environment variables.\n if is_proxy_set(os.environ):\n self._logger.warning(\"Proxy is set\")\n self._reporter.process(\n reports.node_communication_proxy_is_set()\n )\n errno, reason = e.args\n msg = \"Unable to connect to {node} ({reason})\"\n self._logger.debug(msg.format(node=host, reason=reason))\n self._reporter.process(\n reports.node_communication_not_connected(host, reason)\n )\n if errno == pycurl.E_OPERATION_TIMEDOUT:\n raise NodeConnectionTimedOutException(host, request, reason)\n else:\n raise NodeConnectionException(host, request, reason)\n finally:\n debug_data = debug_output.getvalue().decode(\"utf-8\", \"ignore\")\n self._logger.debug(\n (\n \"Communication debug info for calling: {url}\\n\"\n \"--Debug Communication Info Start--\\n\"\n \"{data}\\n\"\n \"--Debug Communication Info End--\"\n ).format(url=url, data=debug_data)\n )\n self._reporter.process(\n reports.node_communication_debug_info(url, debug_data)\n )\n\n def __prepare_cookies(self, host):\n # Let's be safe about characters in variables (they can come from env)\n # and do base64. We cannot do it for CIB_user however to be backward\n # compatible so we at least remove disallowed characters.\n cookies = []\n if host in self._auth_tokens:\n cookies.append(\"token={0}\".format(self._auth_tokens[host]))\n if self._user:\n cookies.append(\"CIB_user={0}\".format(\n re.sub(r\"[^!-~]\", \"\", self._user).replace(\";\", \"\")\n ))\n if self._groups:\n cookies.append(\"CIB_user_groups={0}\".format(\n # python3 requires the value to be bytes not str\n base64.b64encode(\n \" \".join(self._groups).encode(\"utf-8\")\n ).decode(\"utf-8\")\n ))\n return cookies\n", "repo_name": "wangww631/pcs", "sub_path": "pcs/lib/external.py", "file_name": "external.py", "file_ext": "py", "file_size_in_byte": 23397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "pcs.settings.chkconfig_binary", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pcs.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "pcs.settings.service_binary", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pcs.settings", "line_number": 41, "usage_type": "name"}, {"api_name": "pcs.settings.systemctl_binary", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pcs.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 72, "usage_type": "call"}, {"api_name": "pcs.lib.errors.LibraryError", "line_number": 85, "usage_type": "call"}, {"api_name": "pcs.lib.reports.unsupported_operation_on_non_systemd_systems", "line_number": 86, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 86, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pcs.common.tools.simple_cache", "line_number": 91, "usage_type": "name"}, {"api_name": "pcs.common.tools.join_multilines", "line_number": 128, "usage_type": "call"}, {"api_name": "pcs.common.tools.join_multilines", "line_number": 152, "usage_type": "call"}, {"api_name": "pcs.common.tools.join_multilines", "line_number": 174, "usage_type": "call"}, {"api_name": "pcs.common.tools.join_multilines", "line_number": 196, "usage_type": "call"}, {"api_name": "pcs.common.tools.join_multilines", "line_number": 215, "usage_type": "call"}, {"api_name": "re.search", "line_number": 310, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 327, "usage_type": "call"}, {"api_name": "os.path", "line_number": 327, "usage_type": "attribute"}, {"api_name": "pcs.settings.corosync_binaries", "line_number": 327, "usage_type": "attribute"}, {"api_name": "pcs.settings", "line_number": 327, "usage_type": "name"}, {"api_name": "re.search", "line_number": 332, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 360, "usage_type": "attribute"}, {"api_name": "shlex.quote", "line_number": 378, "usage_type": "call"}, {"api_name": "pcs.lib.reports.run_external_process_started", "line_number": 395, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 395, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 401, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 404, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 405, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 406, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 408, "usage_type": "call"}, {"api_name": "signal.SIGPIPE", "line_number": 408, "usage_type": "attribute"}, {"api_name": "signal.SIG_DFL", "line_number": 408, "usage_type": "attribute"}, {"api_name": "pcs.lib.errors.LibraryError", "line_number": 419, "usage_type": "call"}, {"api_name": "pcs.lib.reports.run_external_process_error", "line_number": 420, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 420, "usage_type": "name"}, {"api_name": "pcs.lib.reports.run_external_process_finished", "line_number": 435, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 435, "usage_type": "name"}, {"api_name": "pcs.lib.errors.ReportItemSeverity.ERROR", "line_number": 480, "usage_type": "attribute"}, {"api_name": "pcs.lib.errors.ReportItemSeverity", "line_number": 480, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_command_unsuccessful", "line_number": 486, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 486, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_not_authorized", "line_number": 493, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 493, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_permission_denied", "line_number": 495, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 495, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_unsupported_command", "line_number": 497, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 497, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_other_error", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 499, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_unable_to_connect", "line_number": 501, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 501, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_error_timed_out", "line_number": 503, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 503, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 526, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 533, "usage_type": "call"}, {"api_name": "pcs.settings.default_request_timeout", "line_number": 555, "usage_type": "attribute"}, {"api_name": "pcs.settings", "line_number": 555, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUG_TEXT", "line_number": 580, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 580, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUG_HEADER_IN", "line_number": 581, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 581, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUG_HEADER_OUT", "line_number": 582, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 582, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUG_DATA_IN", "line_number": 583, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 583, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUG_DATA_OUT", "line_number": 584, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 584, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 592, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 593, "usage_type": "call"}, {"api_name": "pcs.common.pcs_pycurl.Curl", "line_number": 605, "usage_type": "call"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 605, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.PROTOCOLS", "line_number": 606, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 606, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.PROTO_HTTPS", "line_number": 606, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl.TIMEOUT_MS", "line_number": 607, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 607, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.URL", "line_number": 608, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 608, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.WRITEFUNCTION", "line_number": 609, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 609, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.VERBOSE", "line_number": 610, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 610, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.DEBUGFUNCTION", "line_number": 611, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 611, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.SSL_VERIFYHOST", "line_number": 612, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 612, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.SSL_VERIFYPEER", "line_number": 613, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 613, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.NOSIGNAL", "line_number": 614, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 614, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.COOKIE", "line_number": 616, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 616, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.COPYPOSTFIELDS", "line_number": 618, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 618, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_started", "line_number": 625, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 625, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.RESPONSE_CODE", "line_number": 635, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 635, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_finished", "line_number": 641, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 641, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.error", "line_number": 668, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 668, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 672, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports.node_communication_proxy_is_set", "line_number": 675, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 675, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_not_connected", "line_number": 681, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 681, "usage_type": "name"}, {"api_name": "pcs.common.pcs_pycurl.E_OPERATION_TIMEDOUT", "line_number": 683, "usage_type": "attribute"}, {"api_name": "pcs.common.pcs_pycurl", "line_number": 683, "usage_type": "name"}, {"api_name": "pcs.lib.reports.node_communication_debug_info", "line_number": 698, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 698, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 710, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 715, "usage_type": "call"}]} +{"seq_id": "3002356018", "text": "import uuid\nfrom http import HTTPStatus\nfrom typing import List, Optional\n\nfrom api.v1.films import FilmList\nfrom fastapi import APIRouter, Depends, HTTPException, Query\nfrom pydantic import BaseModel\nfrom services.film import FilmService, get_film_service\nfrom services.persons import PersonService, get_person_service\n\n# Объект router, в котором регистрируем обработчики\nrouter = APIRouter()\n\n\n# Модель ответа API\nclass Person(BaseModel):\n uuid: uuid.UUID\n full_name: str\n role: Optional[List[str]]\n film_ids: Optional[List]\n\n\n@router.get(\n \"/search\",\n response_model=List[Person],\n summary=\"Поиск персон\",\n description=\"Полнотекстовый поиск по персонам\",\n response_description=\"Полное имя, роль и id фильмов\",\n)\nasync def person_search(\n query: str = \"\",\n number: int = Query(default=1, alias=\"page[number]\"),\n size: int = Query(default=20, alias=\"page[size]\"),\n person_service: PersonService = Depends(get_person_service),\n) -> List[Person]:\n persons = await person_service.search(query, number, size)\n\n if not persons:\n # Если фильм не найден, отдаём 404 статус\n raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=\"person not found\")\n\n return [\n Person(\n uuid=uuid.UUID(person.uuid),\n full_name=person.full_name,\n role=person.role,\n film_ids=person.film_ids,\n )\n for person in persons\n ]\n\n\n@router.get(\n \"/{person_id}/film\",\n response_model=List[FilmList],\n summary=\"Выдача фильмов персоны по ID\",\n description=\"Выдача фильмов персоны по ID\",\n response_description=\"Название и рейтинг фильмов\",\n)\nasync def person_films(\n person_id: str,\n person_service: PersonService = Depends(get_person_service),\n film_service: FilmService = Depends(get_film_service),\n) -> List[FilmList]:\n person = await person_service.get_by_id(person_id)\n if not person:\n raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=\"person not found\")\n films = await film_service.get_by_ids(person.film_ids)\n if not films:\n raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=\"films not found\")\n return [\n FilmList(\n uuid=uuid.UUID(film.uuid),\n title=film.title,\n imdb_rating=film.imdb_rating,\n )\n for film in films\n ]\n\n\n@router.get(\n \"/{person_id}\",\n response_model=Person,\n summary=\"Выдача персоны по ID\",\n description=\"Выдача персоны по ID\",\n response_description=\"Полное имя, роль и ID фильмов\",\n)\nasync def person_details(\n person_id: str,\n person_service: PersonService = Depends(get_person_service),\n) -> Person:\n person = await person_service.get_by_id(person_id)\n if not person:\n raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=\"person not found\")\n return Person(\n uuid=uuid.UUID(person.uuid),\n full_name=person.full_name,\n role=person.role,\n film_ids=person.film_ids,\n )\n", "repo_name": "LiubovAnt/backend_example", "sub_path": "movie_api/app/api/v1/persons.py", "file_name": "persons.py", "file_ext": "py", "file_size_in_byte": 3258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 16, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 17, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "services.persons.PersonService", "line_number": 34, "usage_type": "name"}, {"api_name": "fastapi.Query", "line_number": 32, "usage_type": "call"}, {"api_name": "fastapi.Query", "line_number": 33, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 34, "usage_type": "call"}, {"api_name": "services.persons.get_person_service", "line_number": 34, "usage_type": "argument"}, {"api_name": "fastapi.HTTPException", "line_number": 40, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 40, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 40, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "services.persons.PersonService", "line_number": 62, "usage_type": "name"}, {"api_name": "services.film.FilmService", "line_number": 63, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 62, "usage_type": "call"}, {"api_name": "services.persons.get_person_service", "line_number": 62, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 63, "usage_type": "call"}, {"api_name": "services.film.get_film_service", "line_number": 63, "usage_type": "argument"}, {"api_name": "fastapi.HTTPException", "line_number": 67, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 67, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 67, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 70, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 70, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 70, "usage_type": "name"}, {"api_name": "api.v1.films.FilmList", "line_number": 72, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 73, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 55, "usage_type": "name"}, {"api_name": "api.v1.films.FilmList", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "api.v1.films.FilmList", "line_number": 64, "usage_type": "name"}, {"api_name": "services.persons.PersonService", "line_number": 90, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 90, "usage_type": "call"}, {"api_name": "services.persons.get_person_service", "line_number": 90, "usage_type": "argument"}, {"api_name": "fastapi.HTTPException", "line_number": 94, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 94, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 94, "usage_type": "name"}, {"api_name": "uuid.UUID", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "20287139230", "text": "\nimport os\nfrom flask import Flask, render_template, jsonify, request, send_file, send_from_directory, Response, abort\nfrom werkzeug.utils import secure_filename\nfrom flask import Flask\nfrom flask_restful import Api\nimport base64\nfrom edge_detection import edge_detection\nimport cv2\n\napp = Flask(__name__)\n\nINPUT_DIR_NAME = 'input/'\nOUTPUT_DIR_NAME = 'output/'\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n# static 파일 전달에 필요한 라우팅 \n@app.route('/static/')\ndef getResource(filename):\n if os.path.isfile('static/{}'.format(filename)):\n return send_file('static/{}'.format(filename))\n else:\n return abort(404, description=\"Resource not found\")\n\n\n\n\n#이미지 업로드에 사용되는\n@app.route('/uploadImage', methods = ['POST'])\ndef uploadImage():\n\n #파일 저장 이후,\n f = request.files['file']\n f.save(INPUT_DIR_NAME + f.filename)\n\n\n processed_filename = \"edge_{}.jpg\".format(f.filename)\n #엣지 디텍션 한다.\n print(\"이미지 읽는중..\")\n color_img = cv2.imread(INPUT_DIR_NAME + f.filename, cv2.IMREAD_COLOR) # 이미지 읽기\n print(\"엣지 디텍션중..\")\n edge = edge_detection(color_img) # 엣지 검출\n print(\"엣지 디텍션된 이미지 쓰는중중..\")\n cv2.imwrite(OUTPUT_DIR_NAME + processed_filename, edge) # 검출된 엣지 이미지 저장\n\n\n return send_file(OUTPUT_DIR_NAME + processed_filename, as_attachment=True)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=20000, debug=True)\n", "repo_name": "nacl1119/SportRecogProject", "sub_path": "edge_detection/start_edge_extractor_svr.py", "file_name": "start_edge_extractor_svr.py", "file_ext": "py", "file_size_in_byte": 1561, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.send_file", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 43, "usage_type": "attribute"}, {"api_name": "edge_detection.edge_detection", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "19342844499", "text": "import os\nfrom json import load\n\n__all__ = ['Planet']\n\nplanets_path = os.path.join(os.path.dirname(__file__), 'data', 'planets.json')\n\n\nclass Planet(object):\n \"\"\"\n Transiting planet parameters.\n\n This is meant to be a duck-type drop-in for the ``batman`` package's\n transiting exoplanet parameters ``TransitParams`` object.\n \"\"\"\n def __init__(self, per=None, t0=None, inc=None, rp=None, ecc=None, w=None,\n a=None, u=None, fp=None, t_secondary=None,\n limb_dark='quadratic'):\n self.per = per\n self.t0 = t0\n self.inc = inc\n self.rp = rp\n self.ecc = ecc\n self.w = w\n self.a = a\n self.u = u\n self.limb_dark = limb_dark\n self.fp = fp\n self.t_secondary = t_secondary\n\n @classmethod\n def from_name(cls, name):\n \"\"\"\n Initialize a Planet instance from the target name.\n\n There's a small (but growing?) database of planets pre-defined in the\n ``linea/data/planets.json`` file. If your favorite planet is missing,\n pull requests are welcome!\n\n Parameters\n ----------\n name : str (i.e.: \"55 Cnc e\" or \"WASP-189 b\")\n Name of the planet\n \"\"\"\n with open(planets_path, 'r') as f:\n planets = load(f)\n\n return cls(**planets[name])\n", "repo_name": "bmorris3/linea", "sub_path": "linea/planets.py", "file_name": "planets.py", "file_ext": "py", "file_size_in_byte": 1345, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "json.load", "line_number": 46, "usage_type": "call"}]} +{"seq_id": "9664654900", "text": "import typing\n\nimport discord\nfrom discord.ext import commands\n\nfrom jishaku.flags import Flags\nfrom jishaku.hljs import get_language, guess_file_traits\nfrom jishaku.shim.paginator_base import EmojiSettings\nfrom jishaku.types import ContextA\n\n# Version detection\nif discord.version_info >= (2, 0, 0):\n from jishaku.shim.paginator_200 import PaginatorEmbedInterface, PaginatorInterface\nelse:\n from jishaku.shim.paginator_170 import PaginatorEmbedInterface, PaginatorInterface\n\n__all__ = ('EmojiSettings', 'PaginatorInterface', 'PaginatorEmbedInterface',\n 'WrappedPaginator', 'FilePaginator', 'use_file_check')\n\n\nclass WrappedPaginator(commands.Paginator):\n \"\"\"\n A paginator that allows automatic wrapping of lines should they not fit.\n\n This is useful when paginating unpredictable output,\n as it allows for line splitting on big chunks of data.\n\n Delimiters are prioritized in the order of their tuple.\n\n Parameters\n -----------\n wrap_on: tuple\n A tuple of wrapping delimiters.\n include_wrapped: bool\n Whether to include the delimiter at the end of a wrapped line.\n force_wrap: bool\n If this is True, lines will be split at their maximum points should trimming not be possible\n with any provided delimiter.\n \"\"\"\n\n def __init__(\n self,\n *args: typing.Any,\n wrap_on: typing.Tuple[str, ...] = ('\\n', ' '),\n include_wrapped: bool = True,\n force_wrap: bool = False,\n **kwargs: typing.Any\n ):\n super().__init__(*args, **kwargs)\n self.wrap_on = wrap_on\n self.include_wrapped = include_wrapped\n self.force_wrap = force_wrap\n\n def add_line(self, line: str = '', *, empty: bool = False):\n true_max_size = self.max_size - self._prefix_len - self._suffix_len - 2 * self._linesep_len\n start = 0\n needle = 0\n last_delimiter = -1\n last_space = -1\n\n while needle < len(line):\n if needle - start >= true_max_size:\n if last_delimiter != -1:\n if self.include_wrapped and line[last_delimiter] != '\\n':\n super().add_line(line[start:last_delimiter + 1])\n needle = last_delimiter + 1\n start = last_delimiter + 1\n else:\n super().add_line(line[start:last_delimiter])\n needle = last_delimiter + 1\n start = last_delimiter + 1\n elif last_space != -1:\n super().add_line(line[start:last_space])\n needle = last_space + 1\n start = last_space\n else:\n super().add_line(line[start:needle])\n start = needle\n\n last_delimiter = -1\n last_space = -1\n\n if line[needle] in self.wrap_on:\n last_delimiter = needle\n elif line[needle] == ' ':\n last_space = needle\n\n needle += 1\n\n last_line = line[start:needle]\n if last_line:\n super().add_line(last_line)\n\n if empty:\n self._current_page.append('')\n self._count += self._linesep_len\n\n\nclass FilePaginator(commands.Paginator):\n \"\"\"\n A paginator of syntax-highlighted codeblocks, read from a file-like.\n\n Parameters\n -----------\n fp\n A file-like (implements ``fp.read``) to read the data for this paginator from.\n line_span: Optional[Tuple[int, int]]\n A linespan to read from the file. If None, reads the whole file.\n language_hints: Tuple[str, ...]\n A tuple of strings that may hint to the language of this file.\n This could include filenames, MIME types, or shebangs.\n A shebang present in the actual file will always be prioritized over this.\n \"\"\"\n\n def __init__(\n self,\n fp: typing.BinaryIO,\n line_span: typing.Optional[typing.Tuple[int, int]] = None,\n language_hints: typing.Tuple[str, ...] = (),\n **kwargs: typing.Any\n ):\n language = ''\n\n for hint in language_hints:\n language = get_language(hint)\n\n if language:\n break\n\n if not language:\n try:\n language = get_language(fp.name)\n except AttributeError:\n pass\n\n content, _, file_language = guess_file_traits(fp.read())\n\n language = file_language or language\n lines = content.split('\\n')\n\n super().__init__(prefix=f'```{language}', suffix='```', **kwargs)\n\n if line_span:\n if line_span[1] < line_span[0]:\n line_span = (line_span[1], line_span[0])\n\n if line_span[0] < 1 or line_span[1] > len(lines):\n raise ValueError(\"Linespan goes out of bounds.\")\n\n lines = lines[line_span[0] - 1:line_span[1]]\n\n for line in lines:\n self.add_line(line)\n\n\nclass WrappedFilePaginator(FilePaginator, WrappedPaginator):\n \"\"\"\n Combination of FilePaginator and WrappedPaginator.\n In other words, a FilePaginator that supports line wrapping.\n \"\"\"\n\n\ndef use_file_check(\n ctx: ContextA,\n size: int\n) -> bool:\n \"\"\"\n A check to determine if uploading a file and relying on Discord's file preview is acceptable over a PaginatorInterface.\n \"\"\"\n\n return all([\n size < 50_000, # Check the text is below the Discord cutoff point;\n not Flags.FORCE_PAGINATOR, # Check the user hasn't explicitly disabled this;\n (\n # Ensure the user isn't on mobile\n not ctx.author.is_on_mobile()\n if ctx.guild and ctx.bot.intents.presences and isinstance(ctx.author, discord.Member)\n else True\n )\n ])\n", "repo_name": "hifthot/skidcity", "sub_path": "blood/.local/lib/python3.10/site-packages/jishaku/paginators.py", "file_name": "paginators.py", "file_ext": "py", "file_size_in_byte": 5811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "discord.version_info", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Paginator", "line_number": 21, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 43, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 44, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 47, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Paginator", "line_number": 99, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.BinaryIO", "line_number": 117, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 118, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 118, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 119, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 120, "usage_type": "attribute"}, {"api_name": "jishaku.hljs.get_language", "line_number": 125, "usage_type": "call"}, {"api_name": "jishaku.hljs.get_language", "line_number": 132, "usage_type": "call"}, {"api_name": "jishaku.hljs.guess_file_traits", "line_number": 136, "usage_type": "call"}, {"api_name": "jishaku.types.ContextA", "line_number": 164, "usage_type": "name"}, {"api_name": "jishaku.flags.Flags.FORCE_PAGINATOR", "line_number": 173, "usage_type": "attribute"}, {"api_name": "jishaku.flags.Flags", "line_number": 173, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 177, "usage_type": "attribute"}]} +{"seq_id": "70362227044", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('frame', views.frame),\n path('sign_in', views.sign_in),\n path('sign_up', views.sign_up),\n path('sign_out', views.sign_out),\n]\n", "repo_name": "eldarian-1/remote_cam", "sub_path": "server/main/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "17272316348", "text": "# boot.py -- run on boot-up\nimport config\nconfig = config.config()\n\nfrom machine import WDT, deepsleep, pin_sleep_wakeup\nwdt = WDT(timeout=config.WATCHDOG_TIMEOUT_MS)\nfrom utime import ticks_ms, ticks_diff, sleep_ms\nstart_ticks = ticks_ms\nfrom utility import log, releasePinHold, setPinHold, changePin, setPin\nfrom utility import blinkLED\nfrom pycom import heartbeat\nheartbeat(False)\n\n# variables\nled_time = 250\nbutton_push_period = 0\nforced_reset = False\nblinkLED(\"blue\",led_time,1)\nfrom os import dupterm\nfrom machine import UART, Pin\nfrom sys import print_exception\n\nprimaryUART = UART(0, 115200)\ndupterm(primaryUART)\n\nlog(\"Boot: Starting Bee-IoT-Scale-App-v1.0 (2022-03-14)...\")\n\n# import libaries\nfrom machine import reset_cause, wake_reason\nfrom utility import setToNVRAM, getFromNVRAM, getBootCountFromNVRAM, setBootCountToNVRAM\nimport machine\n\ntry:\n bootCount = getBootCountFromNVRAM() + 1\n reset_cause = reset_cause()\n wake_reason = wake_reason()\n\n log(\"Main: Reset cause: %s\" % (reset_cause))\n log(\"Main: Wake up reason: %s %s\" % (wake_reason[0],wake_reason[1]))\n\n if(reset_cause == machine.PWRON_RESET):\n log(\"Main: Forced system reset..(PWRON_RESET).\")\n forced_reset = True\n bootCount = 1\n elif(reset_cause == machine.HARD_RESET):\n log(\"Main: Forced system reset (HARD_RESET)....\")\n forced_reset = True\n bootCount = 1\n elif(reset_cause == machine.WDT_RESET):\n log(\"Main: Forced system reset (WDT_RESET)...\")\n forced_reset = True\n elif(reset_cause == machine.DEEPSLEEP_RESET):\n log(\"Main: Deepsleep reset, this is expected behaviour\")\n forced_reset = False\n elif(reset_cause == machine.SOFT_RESET):\n log(\"Main: SOFT Reset...\")\n elif(reset_cause == machine.BROWN_OUT_RESET):\n log(\"Main: Brown out reset...\")\n forced_reset = True\n\n if wake_reason[0] == machine.PWRON_WAKE:\n log(\"Main: Woke up by reset button\")\n elif wake_reason[0] == machine.PIN_WAKE:\n log(\"Main: Woke up by external pin (external interrupt)\")\n read_sensors = True\n elif wake_reason[0] == machine.RTC_WAKE:\n log(\"Main: Woke up by RTC (timer ran out)\")\n elif wake_reason[0] == machine.ULP_WAKE:\n log(\"Main: Woke up by ULP (capacitive touch)\")\nexcept Exception as e:\n print_exception(e)\n\nlog(\"Main: bootCount: %d\" % (bootCount))\nsetBootCountToNVRAM(bootCount)\nsleep_time = config.MEASUREMENT_INTERVAL\n", "repo_name": "Lapland-UAS-Tequ/tequ-bee-nest-scale", "sub_path": "boot.py", "file_name": "boot.py", "file_ext": "py", "file_size_in_byte": 2450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.config", "line_number": 3, "usage_type": "call"}, {"api_name": "machine.WDT", "line_number": 6, "usage_type": "call"}, {"api_name": "config.WATCHDOG_TIMEOUT_MS", "line_number": 6, "usage_type": "attribute"}, {"api_name": "utime.ticks_ms", "line_number": 8, "usage_type": "name"}, {"api_name": "pycom.heartbeat", "line_number": 12, "usage_type": "call"}, {"api_name": "utility.blinkLED", "line_number": 18, "usage_type": "call"}, {"api_name": "machine.UART", "line_number": 23, "usage_type": "call"}, {"api_name": "os.dupterm", "line_number": 24, "usage_type": "call"}, {"api_name": "utility.log", "line_number": 26, "usage_type": "call"}, {"api_name": "utility.getBootCountFromNVRAM", "line_number": 34, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 35, "usage_type": "name"}, {"api_name": "machine.wake_reason", "line_number": 36, "usage_type": "name"}, {"api_name": "utility.log", "line_number": 38, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 38, "usage_type": "name"}, {"api_name": "utility.log", "line_number": 39, "usage_type": "call"}, {"api_name": "machine.wake_reason", "line_number": 39, "usage_type": "name"}, {"api_name": "machine.reset_cause", "line_number": 41, "usage_type": "name"}, {"api_name": "machine.PWRON_RESET", "line_number": 41, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 42, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 45, "usage_type": "name"}, {"api_name": "machine.HARD_RESET", "line_number": 45, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 46, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 49, "usage_type": "name"}, {"api_name": "machine.WDT_RESET", "line_number": 49, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 50, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 52, "usage_type": "name"}, {"api_name": "machine.DEEPSLEEP_RESET", "line_number": 52, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 53, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 55, "usage_type": "name"}, {"api_name": "machine.SOFT_RESET", "line_number": 55, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 56, "usage_type": "call"}, {"api_name": "machine.reset_cause", "line_number": 57, "usage_type": "name"}, {"api_name": "machine.BROWN_OUT_RESET", "line_number": 57, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 58, "usage_type": "call"}, {"api_name": "machine.wake_reason", "line_number": 61, "usage_type": "name"}, {"api_name": "machine.PWRON_WAKE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 62, "usage_type": "call"}, {"api_name": "machine.wake_reason", "line_number": 63, "usage_type": "name"}, {"api_name": "machine.PIN_WAKE", "line_number": 63, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 64, "usage_type": "call"}, {"api_name": "machine.wake_reason", "line_number": 66, "usage_type": "name"}, {"api_name": "machine.RTC_WAKE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 67, "usage_type": "call"}, {"api_name": "machine.wake_reason", "line_number": 68, "usage_type": "name"}, {"api_name": "machine.ULP_WAKE", "line_number": 68, "usage_type": "attribute"}, {"api_name": "utility.log", "line_number": 69, "usage_type": "call"}, {"api_name": "sys.print_exception", "line_number": 71, "usage_type": "call"}, {"api_name": "utility.log", "line_number": 73, "usage_type": "call"}, {"api_name": "utility.setBootCountToNVRAM", "line_number": 74, "usage_type": "call"}, {"api_name": "config.MEASUREMENT_INTERVAL", "line_number": 75, "usage_type": "attribute"}]} +{"seq_id": "69883619685", "text": "import potential_portfolios\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nclass PortfolioGraph:\n def __init__(self):\n self.client = potential_portfolios.client\n\n\ndata = pd.read_csv(\"Visualization.csv\", index_col=False)\ndata[\"period\"] = data[\"YEAR\"].astype(str) + data[\"QUARTER\"]\nuser_portfolio = [\"BF\", \"AF\", \"BE\", \"AI\", \"M\"]\npotential_portfolio = [\"BF\", \"AF\", \"BJ\", \"AL\", \"AY\"]\n\n\n# Original Portfolio\nop_data = data.TICKER.isin(user_portfolio)\nop = data[op_data]\n# Points by portfolio\nop_agg1 = op.groupby('period').agg({'MONEY': 'mean'}).reset_index()\nop_agg2 = op.groupby('period').agg({'CARBON_FOOTPRINT': 'mean'}).reset_index()\n\n# Potential Portfolio\npp_data = data.TICKER.isin(potential_portfolio)\npp = data[pp_data]\npp_agg1 = pp.groupby('period').agg({'MONEY': 'mean'}).reset_index()\npp_agg2 = pp.groupby('period').agg({'CARBON_FOOTPRINT': 'mean'}).reset_index()\n\n# create figure and axis objects with subplots\nfig, ax = plt.subplots()\nax2 = ax.twinx()\n\n# Original Plot\nplt.plot(op_agg2.period, op_agg2.CARBON_FOOTPRINT, marker=\"o\", label=\"Current Carbon Footprint\")\nplt.plot(op_agg1.period, op_agg1.MONEY, marker=\"o\", label=\"Current Returns\")\n\n# Potential Portfolio Plot\nplt.plot(pp_agg2.period, pp_agg2.CARBON_FOOTPRINT, linestyle='dashed', marker=\"o\", label=\"Potential Carbon Footprint\")\nplt.plot(pp_agg1.period, pp_agg1.MONEY, linestyle='dashed', marker=\"o\", label=\"Potential Returns\")\n\n# Labelling\nax.set_xlabel(\"Time Periods\", fontsize=14)\nax.set_ylabel(\"Carbon Footprint(tCO\\u2082e/bnUSD)\", color=\"red\", fontsize=14)\nax2.set_ylabel(\"Returns\", color=\"blue\", fontsize=14)\n\nax.legend()\nplt.legend()\nplt.show()\n", "repo_name": "bpk1bpk1/JUMP-2022", "sub_path": "visualization.py", "file_name": "visualization.py", "file_ext": "py", "file_size_in_byte": 1638, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "potential_portfolios.client", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "39468418349", "text": "from fastapi.testclient import TestClient\nimport sys\nfrom pathlib import Path\n\nsys.path.append(str(Path(__file__).resolve().parents[1]))\nfrom main import app\n\nclient = TestClient(app)\n\ndef test_read_root():\n response = client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\"Hello\": \"World\"}\n\ndef test_read_main():\n\tresponse = client.get(\"/hello\")\n\tassert response.status_code == 200\n\tassert response.json() == {\"pease\": \"hello\"}", "repo_name": "HermitBroccoli/Applicant365_server", "sub_path": "test/test_one.py", "file_name": "test_one.py", "file_ext": "py", "file_size_in_byte": 463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 5, "usage_type": "call"}, {"api_name": "fastapi.testclient.TestClient", "line_number": 8, "usage_type": "call"}, {"api_name": "main.app", "line_number": 8, "usage_type": "argument"}]} +{"seq_id": "39764536153", "text": "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n \n self.conv1 = nn.Conv2d(3, 10, 5)\n self.conv2 = nn.Conv2d(10, 20, 5)\n self.conv3 = nn.Conv2d(20, 30, 5)\n \n def forward(self, i):\n x = i.view(-1, i.shape[2], i.shape[3], i.shape[4])\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n x = nn.AvgPool2d(4)(x)\n x = x.view(i.shape[0], i.shape[1], -1)\n return x\n \nclass CNNLSTM(nn.Module):\n def __init__(self, seq_length, num_classes):\n super(CNNLSTM, self).__init__()\n self.net_cnn = CNN()\n self.lstm = nn.LSTM(750, 100)\n self.fc = nn.Linear(100*seq_length, num_classes)\n \n def forward(self, x):\n x = self.net_cnn(x)\n x, _ = self.lstm(x)\n x = x.view(x.shape[0], -1)\n x = self.fc(x)\n return x ", "repo_name": "medengessia/KTH-Action-Recognition", "sub_path": "models/cnn_lstm.py", "file_name": "cnn_lstm.py", "file_ext": "py", "file_size_in_byte": 972, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 4, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 4, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "11908211563", "text": "from typing import List\nfrom collections import deque\n\ndef get_neighbors(node: List[int], image: List[List[int]]):\n neighbors = []\n delta_col = [-1, 0, 1, 0]\n delta_row = [0, -1, 0, 1]\n for i in range(4):\n new_row = node[0] + delta_row[i] \n new_col = node[1] + delta_col[i]\n if new_row < 0 or new_row >= len(image): continue\n if new_col < 0 or new_col >= len(image[0]): continue\n neighbors.append([new_row, new_col])\n return neighbors\n\ndef flood_fill(r: int, c: int, replacement: int, image: List[List[int]]) -> List[List[int]]:\n queue = deque([[r, c]])\n original = image[r][c]\n image[r][c] = replacement\n while len(queue) > 0:\n node = queue.popleft()\n for neighbor in get_neighbors(node, image):\n if image[neighbor[0]][neighbor[1]] == original:\n queue.append(neighbor)\n image[neighbor[0]][neighbor[1]] = replacement\n return image\n\nif __name__ == '__main__':\n r = int(input())\n c = int(input())\n replacement = int(input())\n image = [[int(x) for x in input().split()] for _ in range(int(input()))]\n res = flood_fill(r, c, replacement, image)\n for row in res:\n print(' '.join(map(str, row)))\n\n", "repo_name": "yashanand1910/solutions", "sub_path": "algomonster/graph/flood_fill.py", "file_name": "flood_fill.py", "file_ext": "py", "file_size_in_byte": 1236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "30401404734", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport json\nimport requests\n\nfrom ..filter import RsaRuleFilter, AnonymizationFilter\nfrom ..inspector import CognitiveInspector, TopologyInspector, SchemaInspector\n\nfrom chariot_base.utilities import Traceable\nfrom chariot_base.utilities.iotlwrap import IoTLWrapper\n\nfrom influxdb import InfluxDBClient\n\nfrom datetime import datetime, timedelta\n\nclass Engine(Traceable):\n def __init__(self, options={}):\n self.tracer = None\n self.southbound = None\n self.northbound = None\n self.schema = []\n self.db = self.init_db(**options['cognitive']['database'])\n\n self.iotl = None\n self.session = requests.Session()\n self.session.trust_env = False\n self.options = options\n self.last_sync_datetime = None\n\n self.inspectors = [\n CognitiveInspector(self),\n TopologyInspector(self),\n SchemaInspector(self)\n ]\n self.filters = [\n AnonymizationFilter(self),\n RsaRuleFilter(self)\n ]\n\n self.prepare_global_mute_options()\n\n def inject(self, southbound, northbound):\n self.southbound = southbound\n self.northbound = northbound\n\n def start(self):\n self.subscribe_to_southbound()\n self.subscribe_to_northbound()\n\n def inject_iotl(self, iotl):\n self.iotl = iotl\n\n def set_up_iotl_url(self, iotl_url):\n self.iotl_url = iotl_url\n\n def subscribe_to_southbound(self):\n self.southbound.subscribe_to_topics()\n\n def subscribe_to_northbound(self):\n pass\n\n def apply(self, message, child_span):\n span = self.start_span('apply', child_span)\n self.sync_iotl(span)\n self.filter(message, span)\n self.inspect(message, span)\n self.close_span(span)\n return 0\n\n def inspect(self, message, child_span):\n for _inspector in self.inspectors:\n if self.is_not_muted(_inspector.human_name):\n if self.is_not_muted_for_sensor(_inspector.human_name, message):\n span = self.start_span(f'filter_{_inspector.human_name}', child_span)\n _inspector.check(message, span)\n self.close_span(span)\n else:\n logging.debug(f'Rule {_inspector.human_name} is muted for sensor \"{message.sensor_id}\"')\n else:\n logging.debug(f'Rule {_inspector.human_name} is muted')\n\n def filter(self, message, child_span):\n messages = [message]\n for _filter in self.filters:\n if self.is_not_muted(_filter.human_name):\n if self.is_not_muted_for_sensor(_filter.human_name, message):\n span = self.start_span(f'filter_{_filter.human_name}', child_span)\n messages = _filter.do(messages, span)\n self.close_span(span)\n else:\n logging.debug(f'Rule {_filter.human_name} is muted for sensor \"{message.sensor_id}\"')\n else:\n logging.debug(f'Rule {_filter.human_name} is muted')\n\n for filtered_message in messages:\n self.publish(filtered_message, span)\n\n def publish(self, message, span):\n m = self.inject_to_message(span, message.dict())\n self.southbound.publish('northbound', json.dumps(m))\n logging.debug(f'Publish message from \"{message.sensor_id}\" to \"{message.destination}\"')\n\n def raise_alert(self, alert, span):\n m = json.dumps(self.inject_to_message(span, alert.dict()))\n logging.debug(m)\n self.northbound.publish('alerts', m)\n\n def get_acl(self, span, message):\n return self.iotl.acl(message.sensor_id)\n\n def get_params(self, span, destination):\n return self.iotl.params(destination)\n\n def is_sensitive(self, span, message):\n return self.iotl.isSensitive(message.sensor_id)\n\n def is_match(self, span, schema, message):\n return self.iotl.is_match(schema, message.value)\n\n def execute(self, payload):\n span = self.start_span(f'execute_command')\n msg = payload.decode('utf-8')\n command = json.loads(msg)\n \n name = command['name']\n logging.debug(f'executing command {name}')\n if name == 'refresh_iotl':\n self.execute_sync_iotl(span)\n else:\n logging.debug('Unkwown command');\n self.close_span(span)\n \n def execute_sync_iotl(self, span):\n if self.iotl_url is None:\n return\n\n logging.debug('Sync topology')\n url = self.iotl_url\n headers = self.inject_to_request_header(span, url)\n self.set_tag(span, 'url', url)\n result = self.session.get(url, headers=headers)\n current_iotl = result.json()\n self.iotl.load(current_iotl['code'])\n self.schema = self.iotl.schema(True)\n\n self.last_sync_datetime = datetime.now()\n\n def sync_iotl(self, span):\n if self.iotl_url is None:\n return\n\n if self.should_sync_iotl():\n self.execute_sync_iotl(span)\n\n def should_sync_iotl(self):\n if self.last_sync_datetime is None:\n return True\n return datetime.now() - self.last_sync_datetime >= timedelta(seconds=self.options.get('iotl_sync_delay', 60))\n\n def is_not_muted(self, id):\n return not self.muted_options[id]\n\n def is_not_muted_for_sensor(self, id, message):\n muted = self.iotl.params(message.sensor_id).get('mute', {})\n return muted.get(id, 0) == 0\n\n def prepare_global_mute_options(self):\n self.muted_options = self.options.get('mute', {})\n for _inspector in self.inspectors:\n if _inspector.human_name not in self.muted_options:\n self.muted_options[_inspector.human_name] = False\n for _filters in self.filters:\n if _filters.human_name not in self.muted_options:\n self.muted_options[_filters.human_name] = False\n\n def init_db(self, host, port, username, password, database, path, duration='4w'):\n logging.debug(f'{host}/{path}:{port} <{username}> ({database})')\n db = InfluxDBClient(host=host, port=port, username=username, password=password, database=database, path=path)\n db.create_database(database)\n db.create_retention_policy('awesome_policy', duration, 3, default=True)\n return db\n\n def save_instance(self, span, message):\n is_sensitive = self.is_sensitive(span, message)\n sensor_id = message.sensor_id\n timestamp = message.timestamp\n values = json.loads(message.value)\n\n points = []\n\n for k in values.keys():\n v = values[k]\n points.append({\n 'measurement': 'instances',\n 'tags': {\n 'sensor_id': sensor_id,\n 'value_name': k,\n },\n 'time': timestamp,\n 'fields': {\n 'sensor_id': sensor_id,\n 'value_name': k,\n 'value': str(v),\n 'is_sensitive': is_sensitive\n }\n })\n\n return self.db.write_points(points, protocol='json', retention_policy='awesome_policy')", "repo_name": "charIoT-h2020/chariot-privacy-engine", "sub_path": "chariot_privacy_engine/engine/engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 7250, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chariot_base.utilities.Traceable", "line_number": 16, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 25, "usage_type": "call"}, {"api_name": "inspector.CognitiveInspector", "line_number": 31, "usage_type": "call"}, {"api_name": "inspector.TopologyInspector", "line_number": 32, "usage_type": "call"}, {"api_name": "inspector.SchemaInspector", "line_number": 33, "usage_type": "call"}, {"api_name": "filter.AnonymizationFilter", "line_number": 36, "usage_type": "call"}, {"api_name": "filter.RsaRuleFilter", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 91, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 93, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 105, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 126, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 130, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 146, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 146, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 177, "usage_type": "call"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 178, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "53222852", "text": "from __future__ import print_function\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding\nfrom keras.layers import LSTM\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.models import load_model\nimport pandas as pa\nimport numpy as np\nfrom numpy import argmax\n\nword_max_features = 1116\nmaxlen = 29 # cut texts after this number of words (among top max_features most common words)\nbatch_size = 20\ntrain_data_df= pa.read_csv('sequenceData/trainSentenceSequence.txt',header=None)\ntrain_label_df=pa.read_csv('sequenceData/trainLabelSequence.txt',header=None)\ntest_data_df= pa.read_csv('sequenceData/testSentenceSequence.txt',header=None)\ntest_label_df=pa.read_csv('sequenceData/testLabelSequence.txt',header=None)\n\nx_train = np.array(train_data_df.iloc[:,0:29])\ny_train = np.array(train_label_df.iloc[:,0:8])\nx_test = np.array(test_data_df.iloc[:,0:29])\ny_test = np.array(test_label_df.iloc[:,0:8])\n\"\"\"\ntrain_test_df= pa.read_csv('test_sequence.csv',header=None)\nlabel_test_df=pa.read_csv('test_label.csv',header=None)\nprint('Loading data...')\n\n\n\n\nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=maxlen)\nx_test = sequence.pad_sequences(x_test, maxlen=8)\n\"\"\"\nprint('x_train shape:', x_train.shape)\n\nprint('y_train shape:', y_train.shape)\nprint('x_test shape:', x_test.shape)\n\nprint('y_test shape:', y_test.shape)\nprint('Build model...')\nmodel = Sequential()\nmodel.add(Embedding(word_max_features, 128))\nmodel.add(LSTM(256,return_sequences=True, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(32, activation='sigmoid'))\nmodel.add(LSTM(256, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(8, activation='softmax'))\n\n# try using different optimizers and different optimizer configs\n\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nprint('Train...')\n\n#model.fit(x_train, y_train,batch_size=batch_size,validation_split=0.1,epochs=40)\n\n\n#model.save('model/test.mod')\n\n#model=load_model('model/test.mod')\n\nscore, acc = model.evaluate(x_test, y_test)\nprint('Test score:', score)\nprint('Test accuracy:', acc)\n\nfor i,data in enumerate(x_test):\n\tres = model.predict(data)\n\tprint (argmax(res))\n\t#print(res.shape)", "repo_name": "wmmks/SemanticParser", "sub_path": "intent/IntentClassification.py", "file_name": "IntentClassification.py", "file_ext": "py", "file_size_in_byte": 2288, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Embedding", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "33262346247", "text": "\"\"\"empty message\n\nRevision ID: 7086759e272f\nRevises: e017ed70b3f8\nCreate Date: 2016-10-15 09:31:31.262126\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '7086759e272f'\ndown_revision = 'e017ed70b3f8'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('login_type', sa.String(length=12), nullable=True))\n op.drop_column('users', 'login_method')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('login_method', postgresql.ENUM('local', 'oauth', name='login_method'), autoincrement=False, nullable=True))\n op.drop_column('users', 'login_type')\n ### end Alembic commands ###\n", "repo_name": "mosegontar/chessquick", "sub_path": "migrations/versions/7086759e272f_.py", "file_name": "7086759e272f_.py", "file_ext": "py", "file_size_in_byte": 851, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 26, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 26, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.ENUM", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 26, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "27917410400", "text": "from datetime import datetime\n\nimport app.utils.date_formater\nfrom app.calculator.day_work import DayWork\n\n\ndef test_day_work_create_class(work_day1_fixture):\n expected_day = 'MO'\n expected_start_hour = datetime.strptime(\"10:00\", '%H:%M')\n expected_finish_hour = datetime.strptime(\"12:00\", '%H:%M')\n assert work_day1_fixture.day == expected_day\n assert work_day1_fixture.start_time == expected_start_hour\n assert work_day1_fixture.finish_time == expected_finish_hour\n\n\ndef test_get_day(work_day1_fixture: DayWork) -> None:\n expected_day = 'MO'\n day = work_day1_fixture.get_day(\"MO10:00-12:00\")\n assert day == expected_day\n\n\ndef test_get_start_hour(mocker: any, work_day1_fixture: DayWork) -> None:\n expected_start_hour = datetime.strptime(\"10:00\", '%H:%M')\n mocker.patch(\"app.utils.date_formater.to_date\", return_value=datetime.strptime(\"10:00\", '%H:%M'))\n start_hour = work_day1_fixture.get_start_hour(\"MO10:00-12:00\")\n assert start_hour == expected_start_hour\n\n\ndef test_finish_hour(mocker: any, work_day1_fixture: DayWork) -> None:\n expected_finish_hour = datetime.strptime(\"12:00\", '%H:%M')\n mocker.patch(\"app.utils.date_formater.to_date\", return_value=datetime.strptime(\"12:00\", '%H:%M'))\n finish_hour = work_day1_fixture.get_finish_hour(\"MO12:00-12:00\")\n assert finish_hour == expected_finish_hour\n", "repo_name": "diegocanepa/acme-exercise", "sub_path": "tests/app/calculator/test_day_work.py", "file_name": "test_day_work.py", "file_ext": "py", "file_size_in_byte": 1355, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "app.calculator.day_work.DayWork", "line_number": 16, "usage_type": "name"}, {"api_name": "app.calculator.day_work.DayWork", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "app.calculator.day_work.DayWork", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 31, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "14500021264", "text": "from tkinter import Tk, filedialog, END\nfrom p_view import PdfView\nfrom p_model import Pdf\nfrom PyPDF2 import PdfFileReader, PdfFileMerger\nfrom contextlib import ExitStack\nimport re\n\nclass PdfController():\n #Initializes the GUI\n def __init__(self):\n self.root = Tk()\n self.view = PdfView(self.root)\n self.pdfs = []\n self.view.add_btn['command'] = self.add_file\n self.view.delete_btn['command'] = self.delete\n self.view.path_list_view.bind(\"<>\", self.get_info)\n self.view.merge_btn['command'] = self.merge\n self.view.up_btn['command'] = self.moveup\n self.view.down_btn['command'] = self.movedown\n \n # Starts the Gui for the entire program\n def run(self):\n self.root.title(\"PMerge - PDF file Merger\")\n self.root.geometry('700x700')\n self.root.mainloop()\n\n #opens the Filename open dialog box and sets the import_view to the selected file\n #TODO: Don't forget to change the initialdir to C:/ for production\n def add_file(self):\n file_count = 1\n filepath = filedialog.askopenfilenames(title=\"Select PDF\",\n initialdir=\"C:\\\\\",\n filetypes=[('pdf files', '.pdf')])\n for file in filepath:\n pdf_obj = Pdf()\n with open(file, 'rb') as file_binary:\n pdf = PdfFileReader(file_binary)\n pdf_obj.info = pdf.getDocumentInfo()\n pdf_obj.page_num = pdf.getNumPages()\n pdf_obj.filepath = file\n self.view.path_list_view.insert(END, file)\n self.pdfs.append(pdf_obj)\n file_binary.close()\n\n #build the filetree of files to be manipulated\n #TODO: Add Filetree version to do manipulations like split and merge page by page\n def build_filetree(self, filename='', num_of_pages=1):\n file_count = len(self.view.filetree.get_children())\n folder = self.view.filetree.insert(\"\", file_count,\n \"\", text=filename)\n \n\n\n #deletes the selected files in path_list_view\n #TODO Delete multiple files at once\n def delete(self):\n files = self.view.path_list_view.curselection()\n for file in files:\n filepath = self.view.path_list_view.get(file)\n pdf = self.pdfs[file] # using the indices to grab the filepath from the listbox\n self.pdfs.remove(pdf) # removing the filepath from the pdfs list\n self.view.path_list_view.delete(file) # delete filepath from listbox\n self.clear_feedback()\n\n # Merge Files\n #TODO\n def merge(self):\n with ExitStack() as stack:\n new_filepath = filedialog.asksaveasfilename(title=\"Select PDF\",\n initialdir=\"C:\\\\\",\n filetypes=[('pdf files', '.pdf'),('all files', '*.*')])\n if not re.search(\".pdf$\",new_filepath):\n new_filepath = new_filepath + \".pdf\"\n \n pdfMerger = PdfFileMerger()\n files = [stack.enter_context(open(pdf.filepath, 'rb')) for pdf in self.pdfs]\n #open a read binary of all the filepaths currently loaded into PMerger and merge them into the pdfMerger object\n for pdf in files:\n pdfMerger.append(pdf)\n \n #open the new filepath to as a write binary file and write all of the files that were previously loaded into pdfMerger\n with open(new_filepath, 'wb') as final_filepath:\n pdfMerger.write(final_filepath)\n self.clear_feedback()\n self.append_feedback(\"Files have been merged\")\n \n #adds text to the feedback view\n def append_feedback(self, text):\n output_text = self.view.feedback_text.get() + text\n self.view.feedback_text.set(output_text)\n\n # Clears the Feedback view of all text\n def clear_feedback(self):\n self.view.feedback_text.set(\"\")\n\n # Displays PDF obj Items in the Object list in the feedback view\n def get_info(self, event):\n for index in self.view.path_list_view.curselection():\n pdf = self.pdfs[index]\n self.clear_feedback()\n self.append_feedback(pdf.filepath)\n self.append_feedback(\"\\nNumber of Pages: {}\".format(pdf.page_num))\n self.append_feedback(\"\\nTitle: {}\".format(pdf.info.title))\n self.append_feedback(\"\\nAuthor: {}\".format(pdf.info.author))\n self.append_feedback(\"\\nCreator: {}\".format(pdf.info.creator))\n self.append_feedback(\"\\nProducer: {}\".format(pdf.info.producer))\n self.append_feedback(\"\\nSubject: {}\".format(pdf.info.subject))\n\n #move file one up the list on the file order\n def moveup(self):\n if self.view.path_list_view.curselection():\n file_index = self.view.path_list_view.curselection()[0]\n else:\n self.clear_feedback()\n self.append_feedback(\"\\nNo file has been selected\")\n return\n if file_index == 0:\n self.clear_feedback()\n self.append_feedback(\"\\nFile already at the top of the list\")\n else:\n self.pdfs.insert(file_index-1, self.pdfs.pop(file_index))\n file = self.view.path_list_view.get(file_index)\n self.view.path_list_view.delete(file_index)\n self.view.path_list_view.insert(file_index-1, file)\n self.view.path_list_view.select_set(file_index-1)\n\n #move file one down the list on the file order\n def movedown(self):\n if self.view.path_list_view.curselection():\n file_index = self.view.path_list_view.curselection()[0]\n list_len = self.view.path_list_view.size()\n else:\n self.clear_feedback()\n self.append_feedback(\"\\nNo file has been selected\")\n return\n if file_index == list_len - 1:\n self.clear_feedback()\n self.append_feedback(\"\\nFile already at the bottom of the merge file stack\")\n else:\n self.pdfs.insert(file_index+1, self.pdfs.pop(file_index))\n file = self.view.path_list_view.get(file_index)\n self.view.path_list_view.delete(file_index)\n self.view.path_list_view.insert(file_index+1, file)\n self.view.path_list_view.select_set(file_index+1)\n \n\nif __name__ == \"__main__\":\n print(\"Please run the p_merge.py file as the main program\")", "repo_name": "cortezcodes/p_merge", "sub_path": "p_controller.py", "file_name": "p_controller.py", "file_ext": "py", "file_size_in_byte": 6532, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tkinter.Tk", "line_number": 11, "usage_type": "call"}, {"api_name": "p_view.PdfView", "line_number": 12, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilenames", "line_number": 31, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 31, "usage_type": "name"}, {"api_name": "p_model.Pdf", "line_number": 35, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 37, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 41, "usage_type": "argument"}, {"api_name": "contextlib.ExitStack", "line_number": 68, "usage_type": "call"}, {"api_name": "tkinter.filedialog.asksaveasfilename", "line_number": 69, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 69, "usage_type": "name"}, {"api_name": "re.search", "line_number": 72, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileMerger", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "4124919830", "text": "import matplotlib.pyplot as plt\r\nimport pyautogui as py\r\nimport csv\r\nimport time\r\n\r\ncount=10\r\n\r\nmouse_co=[['X','Y','TIME']]\r\n\r\nx=[]\r\ny=[]\r\nztime=[]\r\nc=0\r\nfor i in range(10000):\r\n\ta=time.time()\r\n\ttime.sleep(0.001)\r\n\ttemp=[]\r\n\tmouse=py.position()\r\n\tb=time.time()\r\n\tc+=b-a\r\n\tztime.append(c)\r\n\ttemp.append(mouse.x)\r\n\tx.append(mouse.x)\r\n\ttemp.append(mouse.y)\r\n\ty.append(mouse.y)\r\n\ttemp.append(c)\r\n\tprint(f\"{i} : {temp}\")\r\n\tmouse_co.append(temp)\r\n\r\nprint(\"Out of the loop...\")\r\n\r\nfilename = f\"Mouse XY Data {count}.csv\"\r\n\t\r\nwith open(filename, 'w') as csvfile:\r\n\tcsvwriter = csv.writer(csvfile)\r\n\tcsvwriter.writerows(mouse_co)\r\n\r\nprint(\"Done Writing...\")\r\nax = plt.figure().add_subplot(projection='3d')\r\n\r\nax.plot(x, y, ztime)\r\nax.set_xlabel('X')\r\nax.set_ylabel('Y')\r\nax.set_zlabel('Time')\r\n# ax.savefig(f'Mouse XY Graph {count}.png')\r\nplt.show()\r\n", "repo_name": "joabeliot/JERRY", "sub_path": "Module/Mouse Movements/MouseMovementDataCollector.py", "file_name": "MouseMovementDataCollector.py", "file_ext": "py", "file_size_in_byte": 842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "pyautogui.position", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "6604833604", "text": "import cv2\r\nimport numpy as np\r\n\r\n# 0 means black\r\n# 1 means white\r\n\r\nimg_black = np.zeros((640,480)) # this is a grayscale img since it has only 2 dimensions\r\nprint(img_black.shape)\r\n\r\nimg_black = np.zeros((640,480,3),np.uint8) # this is a not a grayscale img since it has only 3 dimensions\r\n# here 640 is the height and 480 is the width\r\nprint(img_black.shape)\r\n# cv2.imshow('Black img',img_black)\r\n\r\nimg = np.zeros((640,480,3),np.uint8)\r\nimg[:] = 255,0,0 # : means that we want the entire img to be of that color\r\n# we can also color a particular part of the img by specifying the height and width range\r\ncv2.imshow('image',img)\r\n\r\ncv2.line(img_black,(0,0),(300,300),(0,255,0),3)\r\n# this means that a green line of thickness 3 is to be drawn from (0,0) to (300,300)\r\n\r\ncv2.line(img,(0,0),(img.shape[1],img.shape[0]),(0,255,0),3)\r\n# here since we are passing coordinates we have to follow the format (width,height)\r\n\r\ncv2.rectangle(img,(0,0),(300,400),(0,0,255),2)\r\n\r\nimg_filled = np.zeros((640,480,3),np.uint8)\r\ncv2.rectangle(img_filled,(0,0),(300,400),(0,0,255),cv2.FILLED)\r\ncv2.imshow('Filled image',img_filled)\r\n\r\ncv2.circle(img,(400,50),30,(255,0,255),5)\r\n\r\ncv2.putText(img,'OpenCV shapes & text',(50,100),cv2.FONT_HERSHEY_COMPLEX,1,(0,200,0),2)\r\n\r\ncv2.imshow('Black img',img_black)\r\ncv2.imshow('image',img)\r\ncv2.waitKey(0)\r\n", "repo_name": "arshde3p/OpenCV", "sub_path": "OpenCV/6_shapes_&_text.py", "file_name": "6_shapes_&_text.py", "file_ext": "py", "file_size_in_byte": 1332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 34, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "5700859197", "text": "from pyspark.sql import SparkSession\nimport yaml\nimport os.path\nimport src.utils.aws_utils as ut\n\nif __name__ == '__main__':\n\n os.environ[\"PYSPARK_SUBMIT_ARGS\"] = (\n '--jars \"https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/1.2.36.1060/RedshiftJDBC42-no-awssdk-1.2.36.1060.jar\"\\\n --packages \"org.apache.spark:spark-avro_2.11:2.4.2,io.github.spark-redshift-community:spark-redshift_2.11:4.0.1,org.apache.hadoop:hadoop-aws:2.7.4\" pyspark-shell'\n )\n\n # Create the SparkSession\n sparkSession = SparkSession \\\n .builder \\\n .appName(\"Read from enterprise applications\") \\\n .master('local[*]') \\\n .getOrCreate()\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n appConfigFilePath = os.path.abspath(current_dir + \"/../../../../\"+\"application.yml\")\n\n with open(appConfigFilePath) as conf:\n doc = yaml.load(conf,Loader=yaml.FullLoader)\n\n # Setup spark to use s3\n hadoop_conf = sparkSession.sparkContext._jsc.hadoopConfiguration()\n hadoop_conf.set(\"fs.s3.impl\", \"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n hadoop_conf.set(\"fs.s3a.access.key\", doc[\"s3_conf\"][\"access_key\"])\n hadoop_conf.set(\"fs.s3a.secret.key\", doc[\"s3_conf\"][\"secret_access_key\"])\n hadoop_conf.set(\"fs.s3a.endpoint\", \"s3-eu-west-1.amazonaws.com\")\n\n print(\"\\nCreating Dataframe from txn_fact dataset,\")\n txnDf = sparkSession.read\\\n .option(\"header\",\"true\")\\\n .option(\"delimiter\", \"|\")\\\n .csv(\"s3a://\"+doc[\"s3_conf\"][\"s3_bucket\"]+\"/txn_fct.csv\")\n\n txnDf.show(5,False)\n\n print(\"Writing txn_fact dataframe to AWS Redshift Table >>>>>>>\")\n\n jdbcUrl = ut.getRedshiftJdbcUrl(doc)\n print(jdbcUrl)\n\n txnDf.write\\\n .format(\"io.github.spark_redshift_community.spark.redshift\") \\\n .option(\"url\", jdbcUrl) \\\n .option(\"tempdir\", \"s3a://\"+doc[\"s3_conf\"][\"s3_bucket\"]+\"/temp\") \\\n .option(\"forward_spark_s3_credentials\", \"true\") \\\n .option(\"dbtable\", \"PUBLIC.TXN_FCT\") \\\n .mode(\"overwrite\")\\\n .save()\n\n print(\"Completed <<<<<<<<<\")\n\n", "repo_name": "roshith-mambatta/dataframe-pyexamples", "sub_path": "src/dataframe/from/other/systems/Write2Redshift.py", "file_name": "Write2Redshift.py", "file_ext": "py", "file_size_in_byte": 2076, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 14, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "yaml.load", "line_number": 24, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 24, "usage_type": "attribute"}, {"api_name": "src.utils.aws_utils.getRedshiftJdbcUrl", "line_number": 43, "usage_type": "call"}, {"api_name": "src.utils.aws_utils", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "19359694485", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom datetime import datetime\n\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import Rule\n\nfrom news_all.tools.others import to_list\n\nfrom news_all.spider_models import NewsRCSpider, NewsSpider, NewsCrawlSpider\n\n\n\n\nclass ZhwSpider(NewsRCSpider):\n \"\"\"中华网\"\"\"\n name = 'zhw'\n mystart_urls = {\n 'http://military.china.com/news/index.html': 18962, # 中华网-军事-中国军情\n 'http://military.china.com/jswh/': 18965, # 中华网-军事-军事文化动态-左侧列表采集\n 'http://military.china.com/important/': 18961, # 中华网-军事-军事要闻\n # 'http://military.china.com/news/index.html': 18963, # 中华网-军事-国际军情 #重复\n 'http://ent.china.com/tv/index.html': 18959, # 中华网-娱乐-影视\n 'https://ent.china.com/star/news/': 18957, # 中华网-娱乐-明星\n 'http://ent.china.com/movie/index.html': 18958, # 中华网-娱乐-电影\n 'http://money.china.com/': 1200899, # 中华网-投资-需要确定采集区域\n 'http://culture.china.com/expo/': 18992, # 中华网-文化-博览-左侧列表抓取\n 'http://culture.china.com/art/': 18991, # 中华网-文化-艺文-左侧列表采集\n 'http://auto.china.com/zhuanzai/newcar/': 1200734, # 中华网-新车\n 'http://auto.china.com/15yuanchuang/': 18966, # 中华网-汽车-原创-左侧列表采集\n # 'http://auto.china.com/zhuanzai/newcar/': 18970, # 中华网-汽车-新车 重复\n 'http://auto.china.com/zhuanzai/hangye/': 1200735, # 中华网-汽车资讯-原创行业新车导购四块\n 'http://economy.china.com/industrial/': 18974, # 中华网-经济-产经商贸\n 'http://economy.china.com/domestic/': 18972, # 中华网-经济-国内宏观\n 'http://economy.china.com/consume/': 18975, # 中华网-经济-时尚消费-左侧列表采集\n 'http://economy.china.com/global/': 18973, # 中华网-经济-海外经济-抓取不到\n # 'http://military.china.com/news/index.html': 1200319, # 中华网军事-中国军情 重复\n 'http://military.china.com/photo/1': 1200292, # 中华网军事-军事图片-左侧列表\n # 'http://military.china.com/jswh/': 1200293, # 中华网军事-自媒体-全部抓取 #重复\n\n }\n rules = (\n # https://military.china.com/news/568/20190609/36357095.html\n #https://military.china.com/jswh/figure/11163298/20190608/36354564.html\n #https://military.china.com/important/11132797/20190612/36381445.html\n #https://ent.china.com/movie/tv/11015529/20190612/36382362.html\n #https://ent.china.com/star/news/11052670/20190612/36383637.html\n #https://ent.china.com/movie/news/205/20190612/36382020.html\n #https://money.china.com/toutiao/2019/0612/13329.html\n #https://culture.china.com/expo/11171063/20190611/36373370.html\n #https://culture.china.com/art/11159887/20190612/36381396.html\n #https://auto.china.com/new/11294.html\n #https://economy.china.com/industrial/11173306/20190612/36386194.html\n #https://economy.china.com/domestic/11173294/20190612/36385763.html\n #https://economy.china.com/consume/11173302/20190611/36371915.html\n #https://economy.china.com/global/11173292/20190611/36374785.html\n #https://military.china.com/jctp/tuku/11172988/20190531/36308184.html --图集 未解\n\n\n\n\n # todo\n Rule(LinkExtractor(allow=(r'china.com.*?/\\d+.html'),\n ), callback='parse_item',\n follow=False),\n )\n\n # todo\n #https://military.china.com/jswh/figure/11163298/20190608/36354564.html\n def parse_item(self, response):\n xp = response.xpath\n try:\n title = xp(\"//h1[@class='article-main-title']/text()\").extract_first()\n # source = xp(\"//div[@class='time-source']\")[0]\n content_div = xp(\"//div[@id='chan_newsDetail']\")[0]\n\n pubtime = xp(\"//div[@class='time-source']/span[@class='time']\").re(r'\\d{2,4}-\\d{1,2}-\\d{1,2}')[0]\n print(\"=========\"+pubtime)\n # pubtime = xp(\"//div[@class='Remark']/span/text()\").extract_first().split('|')[0]\n # \n # return\n \n \n origin_name = xp('//span[@class=\"source\"]/a/text()').extract_first('')\n except:\n\n return self.parse_item_2(response)\n\n # 过滤视频\n # if self.video_filter(content_div) or self.page_turn_filter(content_div):\n # return\n\n content, media, _, _ = self.content_clean(content_div)\n\n return self.produce_item(\n response=response,\n title=title,\n # self.get_page_title(response).split('_')[0]\n pubtime=pubtime,\n origin_name=origin_name,\n \n content=content,\n media=media\n )\n\n #https://ent.china.com/movie/tv/11015529/20190612/36382362.html\n def parse_item_2(self, response):\n \n xp = response.xpath\n try:\n title = xp(\"//h1[@id='chan_newsTitle']/text()\").extract_first()\n # source = xp(\"//div[@class='chan_newsInfo_source']\")[0]\n content_div = xp(\"//div[@id='chan_newsDetail']\")[0]\n\n pubtime = xp(\"//div[@class='chan_newsInfo_source']/span[@class='time']\").re(r'\\d{2,4}-\\d{1,2}-\\d{1,2}')[0]\n print(\"==========\"+pubtime)\n # pubtime = xp(\"//div[@class='Remark']/span/text()\").extract_first().split('|')[0]\n # \n # return\n \n \n origin_name = xp('//span[@class=\"source\"]/a/text()').extract_first('')\n except:\n\n return self.parse_item_3(response)\n\n # 过滤视频\n # if self.video_filter(content_div) or self.page_turn_filter(content_div):\n # return\n\n content, media, _, _ = self.content_clean(content_div)\n\n return self.produce_item(\n response=response,\n title=title,\n # self.get_page_title(response).split('_')[0]\n pubtime=pubtime,\n origin_name=origin_name,\n \n content=content,\n media=media\n )\n\n #https://money.china.com/toutiao/2019/0612/13329.html\n def parse_item_3(self, response):\n \n xp = response.xpath\n try:\n title = xp(\"//h1[@id='chan_newsTitle']/text()\").extract_first()\n # source = xp(\"//div[@id='chan_newsInfo']\")[0]\n content_div = xp(\"//div[@id='chan_newsDetail']\")[0]\n\n pubtime = xp(\"//div[@id='chan_newsInfo']\").re(r'\\d{2,4}-\\d{1,2}-\\d{1,2}')[0]\n # pubtime = xp(\"//div[@class='Remark']/span/text()\").extract_first().split('|')[0]\n # \n # return\n \n \n origin_name = xp('//div[@id=\"chan_newsInfo\"]/a/text()').extract_first('')\n except:\n\n return self.parse_item_4(response)\n\n # 过滤视频\n # if self.video_filter(content_div) or self.page_turn_filter(content_div):\n # return\n\n content, media, _, _ = self.content_clean(content_div)\n\n return self.produce_item(\n response=response,\n title=title,\n # self.get_page_title(response).split('_')[0]\n pubtime=pubtime,\n origin_name=origin_name,\n \n content=content,\n media=media\n )\n\n #https://auto.china.com/new/11294.html\n def parse_item_4(self, response):\n \n xp = response.xpath\n try:\n title = xp(\"//h1[@id='arti-title']/text()\").extract_first()\n # source = xp(\"//div[@id='chan_newsInfo']\")[0]\n content_div = xp(\"//div[@id='js-arti-detail']\")[0]\n\n pubtime = xp(\"//div[@class='arti-info']/span[@class='time']/text()\").extract_first()\n # pubtime = xp(\"//div[@class='Remark']/span/text()\").extract_first().split('|')[0]\n # \n # return\n \n \n origin_name = xp('//span[@class=\"source\"]/text()').extract_first('')\n except:\n return self.produce_debugitem(response, \"xpath error\")\n # return self.parse_item_4(response)\n\n # 过滤视频\n # if self.video_filter(content_div) or self.page_turn_filter(content_div):\n # return\n\n content, media, _, _ = self.content_clean(content_div)\n\n return self.produce_item(\n response=response,\n title=title,\n # self.get_page_title(response).split('_')[0]\n pubtime=pubtime,\n origin_name=origin_name,\n \n content=content,\n media=media\n )\n\n", "repo_name": "Pintrue/news_all", "sub_path": "news_all/spiders_old/zhw_all.py", "file_name": "zhw_all.py", "file_ext": "py", "file_size_in_byte": 8785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "news_all.spider_models.NewsRCSpider", "line_number": 15, "usage_type": "name"}, {"api_name": "scrapy.spiders.Rule", "line_number": 63, "usage_type": "call"}, {"api_name": "scrapy.linkextractors.LinkExtractor", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "17964881150", "text": "\"\"\"Music Generation Functionality\"\"\"\n# pylint: disable=W0718\nfrom typing import Literal, Tuple, Callable, Optional\nimport gc\nimport warnings\nimport numpy as np\nfrom tqdm.auto import tqdm\nfrom audiocraft.models import MusicGen\nimport torch\nwarnings.filterwarnings('ignore')\n\nDEFAULT_MODEL = 'medium'\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\nMODELS = ['small','medium','large','melody']\n\nclass MusicGenerator:\n \"\"\"Class for Music Generation\"\"\"\n def __init__(\n self,\n pretrained:Literal['small','medium','large','melody'] = DEFAULT_MODEL,\n device:str = DEVICE\n ) -> None:\n self.pretrained = pretrained\n self.device = device\n self.model = None\n self.loaded = False\n self.load_error = None\n self.pbar = None\n\n def load_model(\n self,\n pretrained:Literal['small','medium','large','melody'] = None,\n device:str = None\n ) -> bool:\n \"\"\"Load a pre-trained model for music generation.\n\n Args:\n pretrained (Literal['small','medium','large','melody'], optional): The size of\n the pre-trained model to load. Defaults to None.\n device (str, optional): The device to use for model inference. Defaults to None.\n\n Returns:\n bool: True if the model is successfully loaded, False otherwise.\n \"\"\"\n if self.loaded is True:\n self.delete_model()\n\n if pretrained is not None:\n self.pretrained = pretrained\n if device is not None:\n self.device = device\n try:\n pt_fullname = f\"facebook/musicgen-{self.pretrained}\"\n self.model = MusicGen.get_pretrained(pt_fullname,\\\n device=self.device)\n self.loaded = True\n except Exception as err:\n self.load_error = err\n self.loaded = False\n\n return self.loaded\n\n def delete_model(\n self\n ) -> None:\n \"\"\"Deletes the model and frees up memory.\n\n This function deletes the model object, performs garbage collection, empties the\n CUDA cache, and resets the device, model, loaded flag, load error, and progress\n bar attributes.\n\n Returns:\n None\n \"\"\"\n del self.model\n gc.collect()\n torch.cuda.empty_cache()\n self.device = None\n self.model = None\n self.loaded = False\n self.load_error = None\n self.pbar = None\n\n def reset_progress_bar(\n self\n ) -> None:\n \"\"\"Resets the progress bar.\n\n This function resets the progress bar by setting the total value to 1 and updating the\n description to 'Generating'.\n\n Returns:\n None\n \"\"\"\n self.pbar = tqdm(total=1, dynamic_ncols=True,\\\n desc='Generating')\n\n def set_progress_bar_cb(\n self,\n callback:Callable = None\n ) -> None:\n \"\"\"Sets the progress bar callback function.\n\n Args:\n callback (Callable, optional): The callback function to be set. Defaults to None.\n\n Returns:\n None\n\n Example:\n set_progress_bar_cb(callback=my_callback_function)\n \"\"\"\n if callback is None:\n self.reset_progress_bar()\n callback = self.cb_progress_bar\n self.model.set_custom_progress_callback(callback)\n\n def cb_progress_bar(\n self,\n p:int,\n t:int\n ) -> None:\n \"\"\"Updates the progress bar with the given progress and total values.\n\n Args:\n p (int): The current progress value.\n t (int): The total value.\n\n Raises:\n ValueError: If the progress bar is not initialized.\n\n Returns:\n None\n \"\"\"\n if self.pbar is None:\n raise ValueError(\"Progress Bar must be initialized first\")\n if self.pbar.total != t:\n self.pbar.total = t\n self.pbar.reset(t)\n self.pbar.update(p)\n if self.pbar.total == p:\n self.pbar.close()\n else:\n self.pbar.refresh()\n\n def set_model_params(\n self,\n use_sample:bool=True,\n top_k:int = 250,\n top_p:float = 0.0,\n temp:float = 1,\n duration:int = 8,\n cfg_coef:int = 3\n ) -> None:\n \"\"\"Sets the parameters for generating music using the model.\n\n Args:\n use_sample (bool): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int): Top-k is a setting in text and music generation models that limits the\n number of choices (tokens) the model considers at each step. A smaller\n top-k leads to more predictable outputs, while a larger top-k allows\n for more variety. Defaults to 250.\n top_p (float): Top-p, also called nucleus sampling, is a method in text generation\n where the model chooses from a set of tokens based on their combined\n likelihood, ensuring a balance between diversity and coherence in\n the output. Unlike top-k, which picks a fixed number of tokens,\n top-p's selection varies based on their probabilities. Defaults to\n 0.0, when set to 0 top_k is used.\n temp (float): Softmax temperature parameter. In music generation, the temperature\n setting controls how predictable or varied the music is. A higher\n temperature results in more random and diverse music, while a lower\n temperature creates more consistent and less varied music. Defaults\n to 1.0.\n duration (int): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (int): Coefficient used for classifier free guidance. This technique in\n music generation uses an additional classifier network to guide\n the music creation process towards specific characteristics or\n styles. It provides more detailed control, allowing users to\n influence the style and features of the generated music. Defaults\n to 3.0.\n\n Raises:\n ValueError: If the model is not loaded.\n\n Returns:\n None\n \"\"\"\n if self.model is None:\n raise ValueError(\"Model must be loaded first\")\n self.model.set_generation_params(use_sample, top_k, top_p, temp,\\\n duration, cfg_coef)\n\n def text_cond_generate(\n self,\n prompt:str\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Text conditional music generation using the loaded model.\n\n Args:\n prompt (str): The prompt text for generating the waverform.\n\n Returns:\n Tuple[int, np.ndarray]: A tuple containing the sample rate and the generated\n waveform as a numpy array.\n\n Raises:\n ValueError: If the model is not loaded.\n \"\"\"\n if self.model is None:\n raise ValueError(\"Model must be loaded first\")\n\n # Generate music based on prompt\n wav = self.model.generate([prompt], progress=True)\n wav = wav.cpu().flatten().numpy()\n\n return self.model.sample_rate, wav\n\n @staticmethod\n def prepare_input_audio(\n input_audio:Tuple[int, np.ndarray] = None\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Prepares the input audio for further processing.\n\n Args:\n input_audio (Tuple[int, np.ndarray], optional): A tuple containing the sample\n rate and waveform of the input\n audio. Defaults to None.\n\n Returns:\n Tuple[int, np.ndarray]: A tuple containing the sample rate and processed\n waveform.\n\n Note:\n - If `input_audio` is not provided, the function returns a tuple with sample\n rate 0 and waveform None.\n - If `input_audio` is provided, the function converts the waveform to a torch\n tensor and performs additional processing if necessary.\n \"\"\"\n if input_audio is not None:\n sample_rate, wav = input_audio\n wav = torch.tensor(wav)\n if wav.dtype == torch.int16:\n wav = wav.float() / 32767.0\n if wav.dim() == 2 and wav.shape[1] == 2:\n wav = wav.mean(dim=1)\n else:\n sample_rate, wav = 0, None\n\n return sample_rate, wav\n\n def melody_cond_generate(\n self,\n input_audio:Tuple[int, np.ndarray],\n prompt:Optional[str] = ''\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Melody conditional music generation on an input audio with the loaded model.\n\n Args:\n input_audio (Tuple[int, np.ndarray]): The input audio as a tuple of sample\n rate and waveform.\n prompt (Optional[str]): The prompt for generating the waveform. Defaults to\n an empty string.\n\n Returns:\n Tuple[int, np.ndarray]: A tuple containing the sample rate and the generated\n melody waveform.\n\n Raises:\n ValueError: If the model is not loaded.\n\n Note:\n - If no prompt is provided, it will be set to None.\n - The input audio will be prepared using the MusicGenerator class.\n - The generated melody will be conditioned on the input audio using the model's\n generate_with_chroma method.\n - The generated waveform will be converted to a numpy array.\n \"\"\"\n if self.model is None:\n raise ValueError(\"Model must be loaded first\")\n\n if not prompt:\n prompt = None\n sample_rate, wav = MusicGenerator.prepare_input_audio(input_audio)\n wav = wav[None].expand(1, -1, -1)\n # Generate music based on melody of existing clip (`wav`) and prompt\n wav = self.model.generate_with_chroma([prompt], wav,\\\n sample_rate, progress=True)\n wav = wav.cpu().flatten().numpy()\n\n return self.model.sample_rate, wav\n\n def cont_generate(\n self,\n input_audio:Tuple[int, np.ndarray],\n prompt:Optional[str] = ''\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Generates a music continuation to an existing audio clip based on a prompt.\n\n Args:\n input_audio (Tuple[int, np.ndarray]): The input audio clip as a tuple of\n sample rate and waveform.\n prompt (Optional[str], optional): The prompt for generating the music\n continuation. Defaults to ''.\n\n Returns:\n Tuple[int, np.ndarray]: The sample rate and waveform of the generated\n music continuation.\n\n Raises:\n ValueError: If the model is not loaded.\n\n Note:\n - If no prompt is provided, the prompt will be set to None.\n - The input audio clip is prepared using the `prepare_input_audio` method of the\n `MusicGenerator` class.\n - The generated music continuation is obtained by calling the `generate_continuation`\n method of the model.\n - The generated waveform is converted to a numpy array.\n \"\"\"\n\n if self.model is None:\n raise ValueError(\"Model must be loaded first\")\n\n if not prompt:\n prompt = None\n sample_rate, wav = MusicGenerator.prepare_input_audio(input_audio)\n wav = wav[None].expand(1, -1, -1)\n # Generate music continuation to existing clip (`wav`) based on prompt\n wav = self.model.generate_continuation(wav, sample_rate,\\\n [prompt], progress=True)\n wav = wav.cpu().flatten().numpy()\n\n return self.model.sample_rate, wav\n\n def uncond_generate(\n self\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Generates music unconditionally.\n\n This function generates music unconditionally using a pre-loaded model. It returns the\n sample rate and the generated waveform.\n\n Returns:\n Tuple[int, np.ndarray]: A tuple containing the sample rate (int) and the generated\n waveform (np.ndarray).\n\n Raises:\n ValueError: If the model is not loaded.\n \"\"\"\n\n if self.model is None:\n raise ValueError(\"Model must be loaded first\")\n\n # Generate music unconditionally (based on nothing)\n wav = self.model.generate_unconditional(1, progress=True)\n wav = wav.cpu().flatten().numpy()\n\n return self.model.sample_rate, wav\n\n def generate(\n self,\n prompt:Optional[str] = '',\n input_audio:Optional[Tuple[int, np.ndarray]] = None,\n use_sample:Optional[bool] = True,\n top_k:Optional[int] = 250,\n top_p:Optional[float] = 0.0,\n temp:Optional[float] = 1,\n duration:Optional[int] = 8,\n cfg_coef:Optional[int] = 3,\n progress:Optional[Callable] = None\n ) -> Tuple[int, np.ndarray]:\n \"\"\"Generate audio using the model.\n\n Args:\n prompt (Optional[str]): The prompt text to generate audio from. Default is an empty\n string.\n input_audio (Optional[Tuple[int, np.ndarray]]): The input audio to condition the\n generation on. Default is None.\n use_sample (Optional[bool]): Whether to use sampling during generation. Default is\n True.\n top_k (Optional[int]): The number of top tokens to consider during sampling. Default\n is 250.\n top_p (Optional[float]): The cumulative probability threshold for top-k sampling.\n Default is 0.0.\n temp (Optional[float]): The temperature value for sampling. Default is 1.\n duration (Optional[int]): The duration of the generated audio in seconds. Default\n is 8.\n cfg_coef (Optional[int]): The coefficient for controlling the model configuration.\n Default is 3.\n progress (Optional[Callable]): A callback function to track the progress of\n generation. Default is None.\n\n Returns:\n Tuple[int, np.ndarray]: A tuple containing the sample rate and the generated\n audio waveform.\n \"\"\"\n self.set_model_params(use_sample, top_k, top_p, temp, duration, cfg_coef)\n\n if progress is not None:\n def progress_callback(p, t):\n progress((p, t), desc='Generating')\n else:\n progress_callback = None\n\n self.set_progress_bar_cb(progress_callback)\n\n input_audio_not_none = input_audio is not None\n\n if input_audio_not_none and (self.pretrained == 'melody'):\n sample_rate, wav = self.melody_cond_generate(input_audio, prompt)\n elif input_audio_not_none:\n self.set_model_params(use_sample, top_k, top_p, temp, duration, cfg_coef)\n sample_rate, wav = self.cont_generate(input_audio, prompt)\n elif not prompt:\n sample_rate, wav = self.uncond_generate()\n else:\n sample_rate, wav = self.text_cond_generate(prompt)\n\n return sample_rate, wav\n", "repo_name": "smasis001/Music-GenAI-App", "sub_path": "music_generation/music_gen.py", "file_name": "music_gen.py", "file_ext": "py", "file_size_in_byte": 16237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 13, "usage_type": "attribute"}, {"api_name": "typing.Literal", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Literal", "line_number": 32, "usage_type": "name"}, {"api_name": "audiocraft.models.MusicGen.get_pretrained", "line_number": 54, "usage_type": "call"}, {"api_name": "audiocraft.models.MusicGen", "line_number": 54, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.cuda.empty_cache", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 77, "usage_type": "attribute"}, {"api_name": "tqdm.auto.tqdm", "line_number": 95, "usage_type": "call"}, {"api_name": "typing.Callable", "line_number": 100, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 196, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 220, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 220, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.int16", "line_number": 242, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 221, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 221, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 253, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 253, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 254, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 255, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 255, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 294, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 294, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 296, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 337, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 337, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 362, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 363, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 363, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 363, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 364, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 365, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 366, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 367, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 368, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 369, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 370, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 370, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 371, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 371, "usage_type": "attribute"}]} +{"seq_id": "72178440164", "text": "from advanced_bot import AdvancedBot, Colors\nfrom pid import PID\nfrom ev3dev2.motor import OUTPUT_B, OUTPUT_C, OUTPUT_A\nfrom ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_4\n\n\nif __name__ == \"__main__\":\n regulator = PID(kp=1.2, ki=0, kd=0.2, integral_reset_count=5)\n bot = AdvancedBot(OUTPUT_C, OUTPUT_B, OUTPUT_A, INPUT_2, INPUT_1, INPUT_4, regulator, False, False)\n colors = [Colors.BLUE, Colors.GREEN]\n\n try:\n bot.run_loader_job(colors)\n except KeyboardInterrupt:\n bot.turn_off_all_motors()\n except Exception as e:\n print(e)\n\n bot.turn_off_all_motors()\n print(\"Stopped\")\n", "repo_name": "VisteK528/WR", "sub_path": "SemiAdvancedBot/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pid.PID", "line_number": 8, "usage_type": "call"}, {"api_name": "advanced_bot.AdvancedBot", "line_number": 9, "usage_type": "call"}, {"api_name": "ev3dev2.motor.OUTPUT_C", "line_number": 9, "usage_type": "argument"}, {"api_name": "ev3dev2.motor.OUTPUT_B", "line_number": 9, "usage_type": "argument"}, {"api_name": "ev3dev2.motor.OUTPUT_A", "line_number": 9, "usage_type": "argument"}, {"api_name": "ev3dev2.sensor.INPUT_2", "line_number": 9, "usage_type": "argument"}, {"api_name": "ev3dev2.sensor.INPUT_1", "line_number": 9, "usage_type": "argument"}, {"api_name": "ev3dev2.sensor.INPUT_4", "line_number": 9, "usage_type": "argument"}, {"api_name": "advanced_bot.Colors.BLUE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "advanced_bot.Colors", "line_number": 10, "usage_type": "name"}, {"api_name": "advanced_bot.Colors.GREEN", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "19024356390", "text": "import cv2\nimport numpy as np\nimport math\nimport random\n\n\nclass ValueNoise:\n def __init__(self, height, width, lattice, seed):\n self.height = height\n self.width = width\n self.lattice = lattice\n random.seed(seed)\n ilen = int(self.height / self.lattice) + 1\n jlen = int(self.width / self.lattice) + 1\n self.suppleMat = [[0 for i in range(ilen)] for i in range(jlen)]\n for i in range(ilen):\n for j in range(jlen):\n self.suppleMat[i][j] = (int)(255 * random.random())\n\n def fade_old(self, t):\n return t * t * (3 - 2 * t)\n\n def fade(self, t):\n return t * t * t * (t * (t * 6 - 15) + 10)\n\n def value_noise(self, smooth=0):\n dst = np.zeros((self.height, self.width), np.uint8)\n for i in range(0, self.height):\n for j in range(0, self.width):\n x = i / self.lattice\n y = j / self.lattice\n\n sx = math.floor(x)\n sy = math.floor(y)\n fx = x - sx\n fy = y - sy\n if smooth == 0:\n cbufx = [math.floor((1.0 - fx) * 2048)]\n cbufx.append(2048 - cbufx[0])\n cbufy = [math.floor((1.0 - fy) * 2048)]\n cbufy.append(2048 - cbufy[0])\n elif smooth == 1:\n cbufx = [math.floor(self.fade_old(1.0 - fx) * 2048)]\n cbufx.append(2048 - cbufx[0])\n cbufy = [math.floor(self.fade_old(1.0 - fy) * 2048)]\n cbufy.append(2048 - cbufy[0])\n else:\n cbufx = [math.floor(self.fade(1.0 - fx) * 2048)]\n cbufx.append(2048 - cbufx[0])\n cbufy = [math.floor(self.fade(1.0 - fy) * 2048)]\n cbufy.append(2048 - cbufy[0])\n dst[i, j] = (self.suppleMat[sx][sy] * cbufx[0] * cbufy[0] +\n self.suppleMat[sx + 1][sy] * cbufx[1] * cbufy[0] +\n self.suppleMat[sx][sy + 1] * cbufx[0] * cbufy[1] +\n self.suppleMat[sx + 1][sy + 1] * cbufx[1] * cbufy[1]) >> 22\n return dst\n\n def show_pic(self):\n value_noise_no_fade = self.value_noise()\n value_noise_old_fade = self.value_noise(1)\n value_noise_new_fade = self.value_noise(2)\n cv2.imshow(\"value_noise_no_fade\", value_noise_no_fade)\n cv2.imshow(\"value_noise_old_fade\", value_noise_old_fade)\n cv2.imshow(\"value_noise_new_fade\", value_noise_new_fade)\n\n\nclass PerlinNoise:\n def __init__(self, height, width, lattice):\n self.height = height\n self.width = width\n self.lattice = lattice\n self.p = []\n self.premutation = []\n '''\n self.premutation = [151, 160, 137, 91, 90, 15,\n 131, 13, 201, 95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21,\n 10,\n 23,\n 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32, 57, 177,\n 33,\n 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27,\n 166,\n 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40,\n 244,\n 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200,\n 196,\n 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124,\n 123,\n 5, 202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189,\n 28, 42,\n 223, 183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155, 167, 43, 172, 9,\n 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97,\n 228,\n 251, 34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239,\n 107,\n 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127, 4, 150, 254,\n 138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180]\n '''\n for i in range(0, 256):\n self.premutation.append(i)\n random.seed(0)\n random.shuffle(self.premutation)\n for i in range(0, 512):\n self.p.append(self.premutation[i % 256])\n\n def fade_old(self, t):\n return t * t * (3 - 2 * t)\n\n def fade(self, t):\n return t * t * t * (t * (t * 6 - 15) + 10)\n\n def lerp(self, t, a, b):\n return a + t * (b - a)\n\n def grad(self, has, x, y):\n h = has & 3\n if h == 0:\n return x\n elif h == 1:\n return -x\n elif h == 2:\n return y\n elif h == 3:\n return -y\n else:\n return 0\n\n def grad_old(self, has, x, y):\n h = has & 7\n if h == 0:\n return x\n elif h == 1:\n return -x\n elif h == 2:\n return y\n elif h == 3:\n return -y\n elif h == 4:\n return x + y\n elif h == 5:\n return x - y\n elif h == 6:\n return -x + y\n elif h == 7:\n return -x - y\n return 0\n\n def perlin_noise(self, x, y, smooth, dir):\n X = math.floor(x) & 255\n Y = math.floor(y) & 255\n x -= math.floor(x)\n y -= math.floor(y)\n if smooth == 0:\n u = self.fade(x)\n v = self.fade(y)\n else:\n u = self.fade_old(x)\n v = self.fade_old(y)\n\n AA = self.p[self.p[X] + Y]\n AB = self.p[self.p[X] + Y + 1]\n BA = self.p[self.p[X + 1] + Y]\n BB = self.p[self.p[X + 1] + Y + 1]\n if dir == 0:\n x1 = self.lerp(u, self.grad(AA, x, y), self.grad(BA, x - 1, y))\n x2 = self.lerp(u, self.grad(AB, x, y - 1), self.grad(BB, x - 1, y - 1))\n else:\n x1 = self.lerp(u, self.grad_old(AA, x, y), self.grad_old(BA, x - 1, y))\n x2 = self.lerp(u, self.grad_old(AB, x, y - 1), self.grad_old(BB, x - 1, y - 1))\n y = self.lerp(v, x1, x2)\n return (y + 1) / 2\n\n def octave_perlin_noise(self, x, y, octaves, persistence,smooth,dir):\n total = 0.0\n frequency = 4.0\n amplitude = 128.0\n maxValue = 0.0\n for i in range(0, octaves):\n total += self.perlin_noise(x * frequency, y * frequency,smooth,dir) * amplitude\n maxValue += amplitude\n amplitude *= persistence\n frequency *= 2\n return total / maxValue\n\n def perlin_noise_main(self, smooth=0, dir=0):\n dst = np.zeros((self.height, self.width), np.uint8)\n for i in range(0, self.height):\n for j in range(0, self.width):\n x = i / lattice\n y = j / lattice\n dst[i, j] = 255 * self.octave_perlin_noise(x, y, 6,0.5,smooth, dir)\n return dst\n\n def show_pic(self):\n # perlin_noise_old_fade = self.perlin_noise_main(1)\n # perlin_noise_old_dir = self.perlin_noise_main(0, 1)\n # perlin_noise_old = self.perlin_noise_main(1, 1)\n perlin_noise_new = self.perlin_noise_main()\n # cv2.imshow(\"perlin_noise_old_fade\", perlin_noise_old_fade)\n # cv2.imshow(\"perlin_noise_old_dir\", perlin_noise_old_dir)\n # cv2.imshow(\"perlin_noise_old\", perlin_noise_old)\n cv2.imshow(\"perlin_noise_new\", perlin_noise_new)\n\n\n# All_Used\ndstWidth = 600\ndstHeight = 600\nlattice = 300\nvn = ValueNoise(dstHeight, dstWidth, lattice, 0)\n# vn.show_pic()\n\npn = PerlinNoise(dstHeight, dstWidth, lattice)\npn.show_pic()\ncv2.waitKey(0)\n# for lattice in range(40,100):\n# pn = PerlinNoise(256,256,lattice)\n# pn.show_pic()\n# print(\"lattice:\",lattice)\n# cv2.waitKey(600)\n", "repo_name": "Alethu/Easy2DPerlinNoisePicGeneration", "sub_path": "PerlinNoise.py", "file_name": "PerlinNoise.py", "file_ext": "py", "file_size_in_byte": 8279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.seed", "line_number": 12, "usage_type": "call"}, {"api_name": "random.random", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 27, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 33, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 34, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 38, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 40, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 43, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 45, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 48, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 64, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 101, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 102, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 149, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 150, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 151, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 186, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 202, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 214, "usage_type": "call"}]} +{"seq_id": "29125465189", "text": "import uvicorn\nfrom fastapi import FastAPI\nfrom fastapi_sqlalchemy import DBSessionMiddleware, db\n\nfrom schemas.schema import Book as SchemaBook\nfrom schemas.schema import Author as SchemaAuthor\n\nfrom schemas.schema import Book\nfrom schemas.schema import Author\n\nfrom models.models import Book as ModelBook\nfrom models.models import Author as ModelAuthor\n\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv('.env')\n\n\napp = FastAPI()\n\n# to avoid csrftokenError\napp.add_middleware(DBSessionMiddleware, db_url='postgresql://postgres:postgres@localhost/FastAPIDB')\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"CICD Implementation\"}\n\n\n@app.post('/book/', response_model=SchemaBook)\nasync def book(book: SchemaBook):\n db_book = ModelBook(title=book.title, rating=book.rating, author_id = book.author_id)\n db.session.add(db_book)\n db.session.commit()\n return db_book\n\n@app.get('/book/')\nasync def book():\n book = db.session.query(ModelBook).all()\n return book\n\n\n \n@app.post('/author/', response_model=SchemaAuthor)\nasync def author(author:SchemaAuthor):\n db_author = ModelAuthor(name=author.name, age=author.age)\n db.session.add(db_author)\n db.session.commit()\n return db_author\n\n@app.get('/author/')\nasync def author():\n author = db.session.query(ModelAuthor).all()\n return author\n\n\n# To run locally\nif __name__ == '__main__':\n uvicorn.run(app, host='0.0.0.0', port=8000)\n", "repo_name": "jitendra-meena/FastAPI", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 17, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 20, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.DBSessionMiddleware", "line_number": 23, "usage_type": "argument"}, {"api_name": "schemas.schema.Book", "line_number": 31, "usage_type": "name"}, {"api_name": "models.models.Book", "line_number": 32, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session.add", "line_number": 33, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 33, "usage_type": "name"}, {"api_name": "fastapi_sqlalchemy.db.session.commit", "line_number": 34, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 34, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 34, "usage_type": "name"}, {"api_name": "schemas.schema.Book", "line_number": 30, "usage_type": "name"}, {"api_name": "fastapi_sqlalchemy.db.session.query", "line_number": 39, "usage_type": "call"}, {"api_name": "models.models.Book", "line_number": 39, "usage_type": "argument"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 39, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 39, "usage_type": "name"}, {"api_name": "schemas.schema.Author", "line_number": 45, "usage_type": "name"}, {"api_name": "models.models.Author", "line_number": 46, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session.add", "line_number": 47, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 47, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 47, "usage_type": "name"}, {"api_name": "fastapi_sqlalchemy.db.session.commit", "line_number": 48, "usage_type": "call"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 48, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 48, "usage_type": "name"}, {"api_name": "schemas.schema.Author", "line_number": 44, "usage_type": "name"}, {"api_name": "fastapi_sqlalchemy.db.session.query", "line_number": 53, "usage_type": "call"}, {"api_name": "models.models.Author", "line_number": 53, "usage_type": "argument"}, {"api_name": "fastapi_sqlalchemy.db.session", "line_number": 53, "usage_type": "attribute"}, {"api_name": "fastapi_sqlalchemy.db", "line_number": 53, "usage_type": "name"}, {"api_name": "uvicorn.run", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "4815313890", "text": "from difflib import get_close_matches\nimport json\ndata = json.load(open(\"data.json\"))\n\ndef search(word):\n if word in data:\n \n result= data[word]\n if len(result)>0:\n if len(result)==1:\n return result\n else:\n for i in range(0,len(result)-1):\n print (result[i], \"/n\")\n return (result[i+1])\n\n \n elif len(get_close_matches(word,data.keys())) > 0:\n print (\"Did you mean: \" , get_close_matches(word,data.keys())[0])\n decision = input(\"Enter Y if yes, N if no: \")\n if decision == \"Y\":\n result = data[get_close_matches(word,data.keys())[0]]\n if len(result)>0:\n if len(result)==1:\n return result\n else:\n for i in range(0,len(result)-1):\n print (result[i], \"/n\")\n return (result[i+1])\n elif decision == \"N\":\n return (\"Sorry, I'm unable to find the word that you were searchning for\")\n else: \n return (\"Sorry, I'm unable to find the word that you were searchning for\")\n\nword=input(\"Enter the word to search: \")\nword = str.lower(word)\nprint(search(word))", "repo_name": "vigneshethiraj/Dictionary", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 3, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 18, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 19, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "70469388005", "text": "\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\n\ndef getTargetedActivation(model,layerDict,layerName,inputTensor,Image):\n layerOutput = layerDict[layerName].output\n iterate = K.function([inputTensor], [layerOutput])\n return iterate([Image])\n\nfrom keras.layers import Input, Lambda\n\n \ndef getIterateFunctionStyle(model,layerDict, layerName, inputTensor,Image,a,b,c,styleIndex):\n \n layerOutput = layerDict[layerName].output[0,:,:,:]\n arrayTarget = np.squeeze(getTargetedActivation(model,layerDict,layerName,inputTensor,Image)[0])\n print(\"max target\",np.max(abs(arrayTarget)))\n layerTarget = tf.convert_to_tensor(arrayTarget)\n print(layerOutput)\n print(layerTarget)\n print(layerOutput.shape)\n print(layerTarget.shape)\n #subtract_layer = Lambda(lambda inputs: inputs[0] - inputs[1],output_shape=lambda shapes: shapes[0])\n #subtract = K.function([inputTensor],[subtract_layer([inputTensor,layerTarget])])\n #difference = subtract[layerOutput]\n difference=tf.math.subtract(layerOutput,layerTarget)\n #difference=K.bias_add(layerOutput[:,:,:,:],-1*getTargetedActivation(model,layerDict,layerName,inputTensor,Image))\n loss_content=K.sum(K.square(difference))/(128*259*64)\n \n loss_style= - tf.math.log(K.mean(model.output[:,styleIndex]))\n \n Regularization_term = K.mean(K.square(inputTensor))\n \n loss=a*loss_content+b*loss_style+c*Regularization_term\n #loss=loss_content\n # compute the gradient of the input picture wrt this loss\n grads = K.gradients(loss, inputTensor)[0]\n\n\n \n iterate = K.function([inputTensor], [loss, grads,loss_content,loss_style,Regularization_term])\n \n return iterate\n \ndef getIterateFunction(model,layerDict, layerName, inputTensor,Image):\n \n layerOutput = layerDict[layerName].output[0,:,:,:]\n arrayTarget = np.squeeze(getTargetedActivation(model,layerDict,layerName,inputTensor,Image)[0])\n layerTarget = tf.convert_to_tensor(arrayTarget)\n\n difference=tf.math.subtract(layerOutput,layerTarget)\n\n loss=K.sum(K.square(difference))/(128*259*64)\n\n grads = K.gradients(loss, inputTensor)[0]\n\n iterate = K.function([inputTensor], [loss, grads])\n \n return iterate\n\n\n##first attempt : kind of Adagrad\ndef gradientDescent(iterate, inputImgData, step):\n k=5\n lossValue=2000\n for i in range(1,300):\n if(lossValue/512>=3):\n lossValue, gradsValue = iterate([inputImgData])\n gradsValue /= (K.sqrt(K.max(K.square(gradsValue))) + 1e-10)/20\n print(lossValue,np.max(abs(gradsValue))*step/(1+k*i))\n inputImgData+=gradsValue*step/(1+k*i)*(-1)\n return lossValue\n\ndef SGD_Nesterov(iterate,img,lr0,momentum,early_stopping,factor=0.5,epochs=20,niter=200):\n #initialization\n lossValue=early_stopping+1\n v=np.zeros(img.shape)\n lr=lr0\n #update of img\n for i in range(1,niter):\n if(lossValue>early_stopping):\n lossValue, gradsValue = iterate([img])\n #step decay for learning rate\n lr=lr*(factor**(i%epochs==0))\n #Nesterov momentum + SGD\n v_prec=v.copy()\n v=momentum*v-lr*gradsValue\n img+= (-1*momentum)*v_prec + (1+momentum)*v\n print(i,lossValue,np.max(abs(v)),np.max(abs(gradsValue)))\n \n return(lossValue)\n \ndef Adam(iterate,img,lr0,early_stopping,factor,epochs,niter,beta1=0.9,beta2=0.999,eps=1e-8):\n #initialization\n lossValue=early_stopping+1\n v=np.zeros(img.shape)\n m=np.zeros(img.shape)\n lr=lr0\n\n #update of img\n for i in range(1,niter):\n if(lossValue>early_stopping):\n liste=iterate([img]) \n longueur=len(liste)\n if(longueur==5):\n lossValue, gradsValue,loss_content,loss_style,reg = liste\n elif(longueur==2):\n lossValue,gradsValue=liste\n #step decay for learning rate\n lr=lr*(factor**(i%epochs==0))\n #ADAM\n m=beta1*m + (1-beta1)*gradsValue\n mi=m/(1-beta1**i)\n v=beta2*v + (1-beta2)*(gradsValue**2)\n vi=v/(1-beta2**i)\n img+= (-1*lr)*mi / (np.sqrt(vi)+eps)\n if(i%1==0):\n print(i,lossValue,np.max(abs((-1*lr)*mi / (np.sqrt(vi)+eps))))\n if(longueur==5):\n print(loss_content,loss_style,reg)\n \n return(lossValue)", "repo_name": "clementpiat/Audio-style-transfer", "sub_path": "optimization_content_extraction.py", "file_name": "optimization_content_extraction.py", "file_ext": "py", "file_size_in_byte": 4404, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras.backend.function", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.math.subtract", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 27, "usage_type": "attribute"}, {"api_name": "keras.backend.sum", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 29, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.math.log", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 31, "usage_type": "attribute"}, {"api_name": "keras.backend.mean", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.backend.mean", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 33, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.backend.gradients", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 38, "usage_type": "name"}, {"api_name": "keras.backend.function", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 42, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.math.subtract", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 52, "usage_type": "attribute"}, {"api_name": "keras.backend.sum", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 54, "usage_type": "name"}, {"api_name": "keras.backend.square", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.backend.gradients", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 56, "usage_type": "name"}, {"api_name": "keras.backend.function", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 58, "usage_type": "name"}, {"api_name": "keras.backend.sqrt", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 70, "usage_type": "name"}, {"api_name": "keras.backend.max", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.backend.square", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "71230414886", "text": "import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"k8s-healthcheck\",\n version=\"0.0.1\",\n author=\"Chris McCoy\",\n author_email=\"chris@chr.is\",\n description=\"A quick and dirty health checker\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/chris-mccoy/k8s-healthcheck\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n\n", "repo_name": "chris-mccoy/k8s-healthcheck", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "36154462176", "text": "#!/usr/bin/python3\n\"\"\"This module defines a base class for all models in our hbnb clone\"\"\"\nimport uuid\nfrom datetime import datetime\n\n\nclass BaseModel:\n \"\"\"A base class for all hbnb models\"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Instatntiates a new model\"\"\"\n from models import storage\n self.id = str(uuid.uuid4())\n self.created_at = self.updated_at = datetime.now()\n if len(kwargs) != 0:\n self._assign_att(kwargs)\n storage.new(self)\n else:\n storage.new(self)\n def _assign_att(self, att):\n \"\"\"\n Assigns in attrubutes for kwargs\n \"\"\"\n str_format = \"%Y-%m-%dT%H:%M:%S.%f\"\n for key, value in att.items():\n if key == \"created_at\" or key == \"updated_at\":\n t = self.__dict__[key]\n t = datetime.strptime(value, str_format)\n else:\n t = value\n\n def __str__(self):\n \"\"\"Returns a string representation of the instance\"\"\"\n cls = (str(type(self)).split('.')[-1]).split('\\'')[0]\n return '[{}] ({}) {}'.format(cls, self.id, self.__dict__)\n\n def save(self):\n \"\"\"Updates updated_at with current time when instance is changed\"\"\"\n from models import storage\n self.updated_at = datetime.now()\n storage.save()\n \n def delete(self):\n \"\"\"\n Delete instance from storage by calling its delete method\n \"\"\"\n models.storage.delete(self)\n\n def to_dict(self):\n \"\"\"Convert instance into dict format\"\"\"\n dictionary = {}\n dictionary.update(self.__dict__)\n dictionary.update({\"__class__\": (str(type(self)).split(\".\")[-1]).split(\"'\")[0]})\n dictionary['created_at'] = self.created_at.isoformat()\n dictionary['updated_at'] = self.updated_at.isoformat()\n return dictionary\n", "repo_name": "Tassuo7/AirBnB_clone_v2", "sub_path": "models/base_model.py", "file_name": "base_model.py", "file_ext": "py", "file_size_in_byte": 1866, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "uuid.uuid4", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "name"}, {"api_name": "models.storage.new", "line_number": 16, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 16, "usage_type": "name"}, {"api_name": "models.storage.new", "line_number": 18, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "models.storage.save", "line_number": 40, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 40, "usage_type": "name"}, {"api_name": "models.storage.delete", "line_number": 46, "usage_type": "call"}, {"api_name": "models.storage", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "5293071335", "text": "#Note - reuses some code from chapter 3\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkContext\nfrom pyspark.sql.types import StringType\nfrom pyspark.sql import functions as f\n\n\n# Create spark context\nsc = SparkContext(\"local\", \"Ch6BasicExampleApp\")\n# Get spark session\nspark = SparkSession.builder.getOrCreate()\n\n# Get the data and place it in a spark dataframe\ndata = spark.read.format(\"csv\").option(\"sep\", \";\").option(\"inferSchema\", \"true\").option(\"header\", \"true\").load(\n \"../chapter1/stream-classifier/data/bank/bank.csv\")\n\n# map target to numerical category\n#data = data.withColumn('label', f.when((f.col(\"y\") == \"yes\"), 1).otherwise(0))\n\ndata.printSchema()\n\ndata.show()\n\n# UDF\nimport datetime\ndef month_as_int(month):\n month_number = datetime.datetime.strptime(month, \"%b\").month\n return month_number\n\nspark.udf.register(\"monthAsInt\", month_as_int, StringType())\n\n\n# Apply in spark sql\ndata.createOrReplaceTempView('bank_data_view')\n\nspark.sql('''\nselect *, monthAsInt(month) as month_as_int from bank_data_view\n''').show()\n\n\n# Apply on dataframe\nfrom pyspark.sql.functions import udf\nmonth_as_int_udf = udf(month_as_int, StringType())\n\ndf = spark.table(\"bank_data_view\")\ndf.withColumn('month_as_int', month_as_int_udf(\"month\")).show()\n\n# Create with decorator syntax\n@udf(\"string\")\ndef month_as_int_udf(month):\n month_number = datetime.datetime.strptime(month, \"%b\").month\n return month_number\n\ndf.withColumn('month_as_int', month_as_int_udf(\"month\")).show()\n\n\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType\n\n# @pandas_udf('string')\n# def month_as_int(month_series):\n# return datetime.datetime.strptime()", "repo_name": "PacktPublishing/Machine-Learning-Engineering-with-Python", "sub_path": "Chapter06/spark_example_udfs.py", "file_name": "spark_example_udfs.py", "file_ext": "py", "file_size_in_byte": 1658, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 143, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyspark.SparkContext", "line_number": 9, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.getOrCreate", "line_number": 11, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 30, "usage_type": "call"}, {"api_name": "pyspark.sql.functions.udf", "line_number": 43, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pyspark.sql.functions.udf", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "23142590457", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport re\nfrom lxml.html import fromstring\nfrom lxml.etree import tostring\nfrom lxml.cssselect import CSSSelector\nfrom tag_exceptions import DoesNotHaveTagException, FoundManyTagsException\n\nclass HtmlSpec(object):\n html = None\n tree = None\n node = None\n\n def __init__(self, html, node=None):\n self.html = html\n self.tree = fromstring(str(html))\n self.node = node\n \n if(self.node == None):\n self.node = self.tree\n\n def __find__(self, tag_name):\n return self.tree.cssselect(tag_name)\n\n def has(self, tag_name, count=1):\n result = self.__find__(tag_name)\n\n if len(result) == 0:\n raise DoesNotHaveTagException('Html does not have tag %s' % tag_name)\n elif len(result) != count:\n raise FoundManyTagsException('Expected %d founded %d' % (count, len(result)))\n\n if len(result) == 1:\n self.node = result[0]\n return HtmlSpec(tostring(self.node), node = self.node)\n else:\n html_specs = []\n for r in result:\n html_specs.append(HtmlSpec(tostring(r), node=r))\n return html_specs\n \n def with_tag(self, tag_name):\n self.__find__(tag_name)\n\n if self.node != None:\n return HtmlSpec(self.html)\n else:\n raise DoesNotHaveTagException('Html does not have tag %s' % tag_name)\n", "repo_name": "rafaelmws/html_spec", "sub_path": "html_spec/html_spec.py", "file_name": "html_spec.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "lxml.html.fromstring", "line_number": 17, "usage_type": "call"}, {"api_name": "tag_exceptions.DoesNotHaveTagException", "line_number": 30, "usage_type": "call"}, {"api_name": "tag_exceptions.FoundManyTagsException", "line_number": 32, "usage_type": "call"}, {"api_name": "lxml.etree.tostring", "line_number": 36, "usage_type": "call"}, {"api_name": "lxml.etree.tostring", "line_number": 40, "usage_type": "call"}, {"api_name": "tag_exceptions.DoesNotHaveTagException", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "72316570725", "text": "import logging\nimport sys\n\nLOG_FILE = 'log.txt'\nLOG_FORMAT = '[%(asctime)s] %(levelname)-8s %(message)s'\n\nLOGGER = None\n\n\ndef load_logger():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(LOG_FORMAT))\n\n logger.addHandler(console_handler)\n return logger\n\n\ndef get_logger():\n global LOGGER\n if not LOGGER:\n LOGGER = load_logger()\n return LOGGER\n", "repo_name": "k-joel/yangbot", "sub_path": "logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "860717896", "text": "from datetime import *\nfilenya = open('dataperpus.txt','w+')\nisian = []\nwhile True:\n kode = input('Masukkan Kode Member\t: ')\n name = input('Masukkan Nama Member\t: ')\n judul = input('Masukkan Judul Buku\t: ')\n option = input('Ulangi lagi (y/n)\t: ')\n gabung = kode + \"|\" + name + \"|\" + judul + \"|\" + str(datetime.date(datetime.now())) + \"|\" + str((datetime.date(datetime.now()+timedelta(days=7))))\n isian.append(gabung)\n if option in ('n','N'):\n for i in range(len(isian)):\n filenya.write(str(isian[i]) + '\\n')\n filenya.close()\n break", "repo_name": "Dzazyy/python_project", "sub_path": "Chapter 11/Python Project 2.py", "file_name": "Python Project 2.py", "file_ext": "py", "file_size_in_byte": 585, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.date", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "18740631415", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib import rcParams\n\ndf = pd.read_csv(\"ShivamStudySchedule.csv\")\n#print(df)\nqsn = df[\"NoOfQuestions\"]\n#print(qsn)\ntopic = df[\"MathsTopic\"]\n#print(topic)\ndate = df[\"Date\"]\n#print(date)\nsns.set_theme(style=\"whitegrid\")\nrcParams[\"figure.figsize\"]= 10,10\nsns.barplot(x='Date', y='NoOfQuestions', hue='MathsTopic', data=df)\n\nx_range = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]\nplt.yticks(x_range)\n\nplt.xlabel(\"Dates\", fontsize = 16)\nplt.ylabel(\"Number of questions\", fontsize = 16)\n\nplt.show()\n\n\n", "repo_name": "Pragati8626/shivanstudy", "sub_path": "ShivamStudySchedule_barChart.py", "file_name": "ShivamStudySchedule_barChart.py", "file_ext": "py", "file_size_in_byte": 573, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "seaborn.set_theme", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.rcParams", "line_number": 15, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "20872698426", "text": "# coding:utf-8\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.datasets import fetch_mldata\r\nfrom chainer import cuda, Variable, FunctionSet,\\\r\n optimizers,Chain,serializers\r\nimport chainer.functions as F\r\nimport sys\r\n\r\nimport CNNcifar10Train\r\n\r\n\r\nif __name__==\"__main__\":\r\n\r\n unitSet = [512] #中間層ユニット数\r\n\r\n N = 1000 #テストするサンプル数\r\n N_train = 500 #訓練データの数\r\n N_test = N - N_train #テストデータの数\r\n\r\n tate = 4\r\n yoko = 4\r\n n_config = tate*yoko\r\n\r\n category= [\"Airplane\",\"Automobile\",\r\n \"Bird\",\"Cat\",\"Deer\",\"Dog\",\r\n \"Frog\",\"Horse\",\"Ship\",\"Truck\"]\r\n\r\n KeepTimeList = [\"2016-10-03-21-54\",\r\n \"2016-10-04-00-32\",\r\n \"2016-10-04-03-21\",\r\n \"2016-10-04-06-34\",\r\n \"2016-10-04-09-50\",\r\n \"2016-10-04-13-12\",\r\n \"2016-10-05-13-07\"]\r\n\r\n for val in range(6,7):\r\n if val == 2:\r\n NetName = \"conv2\"\r\n Title=NetName\r\n KeepTime = \"\"\r\n elif val == 7:\r\n NetName = \"Cifar_Liner\"\r\n KeepTime = KeepTimeList[val-1]\r\n else:\r\n NetName = \"Cifar_conv%d\"%(val)\r\n KeepTime = KeepTimeList[val-1]\r\n\r\n Cifar10 = CNNcifar10Train.Cifar10CNN(N,N_train,N_test,unitSet,NetName)\r\n serializers.load_npz\\\r\n ('./modelkeep/'+NetName+'_Model_'+KeepTime,Cifar10.model)\r\n #Cifar10.draw_1image(0)\r\n Cifar10.draw_answerChack(category,tate,yoko)\r\n #Cifar10.predict(n_config)\r\n plt.show()\r\n", "repo_name": "ysys1005/Gon", "sub_path": "CNNcifar10Test.py", "file_name": "CNNcifar10Test.py", "file_ext": "py", "file_size_in_byte": 1656, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "CNNcifar10Train.Cifar10CNN", "line_number": 49, "usage_type": "call"}, {"api_name": "chainer.serializers.load_npz", "line_number": 50, "usage_type": "call"}, {"api_name": "chainer.serializers", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "13097366415", "text": "from sys import stdin\nfrom itertools import product\n\n\ndef solution(N, board):\n\n def domain_length(domain):\n return domain[1] - domain[0]\n\n def color_check(row_domain, col_domain):\n ref_color = board[row_domain[0]][col_domain[0]]\n for x, y in product(range(*row_domain), range(*col_domain)):\n if board[x][y] != ref_color:\n return False\n return ref_color\n\n def compress(row_domain, col_domain):\n is_mono = color_check(row_domain, col_domain)\n if is_mono:\n return is_mono\n\n string = '('\n half_row_len, half_col_len = domain_length(row_domain)//2, domain_length(col_domain)//2\n for i in range(4):\n # check how output should be ordered and decide placing 'i//2', 'i%2'\n row_part = row_domain[0] + half_row_len*(i//2), row_domain[0] + half_row_len*(i//2+1)\n col_part = col_domain[0] + half_col_len*(i%2), col_domain[0] + half_col_len*(i%2+1)\n string += compress(row_part, col_part)\n return string + ')'\n\n return compress((0, N), (0, N))\n\n\nN = int(stdin.readline())\nboard = [list(stdin.readline().strip()) for _ in range(N)]\n\nprint(solution(N, board))\n", "repo_name": "grasshopperTrainer/coding_practice", "sub_path": "baekjoon/accepted/1992 쿼드트리.py", "file_name": "1992 쿼드트리.py", "file_ext": "py", "file_size_in_byte": 1208, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.product", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 34, "usage_type": "name"}, {"api_name": "sys.stdin.readline", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "24474501005", "text": "from rest_framework import serializers\n\nfrom openapi.models import GraphQuery\nfrom openapi.models import PredictionGraph\nfrom openapi.models import PredictionGraphPoint\n\nfrom openapi.models import HeatMapLocalQuery\nfrom openapi.models import HeatMapLocal\nfrom openapi.models import HeatMapLocalPoint\n\nfrom openapi.models import HeatMapPostcodeQuery\nfrom openapi.models import HeatMapPostcode\nfrom openapi.models import HeatMapPostcodePoint\n\nfrom openapi.models import GridSquareData\n\nfrom openapi.models import PROPERTY_CHOICES, ESTATE_CHOICES\n\n# Serialzers for the Models, using DRF Serializers\n\n#-------------------------------------------------------------------------------\nclass HeatMapPostcodePointSerializer(serializers.ModelSerializer):\n class Meta:\n model = HeatMapPostcodePoint\n fields = ('postcode', 'value') \n\nclass HeatMapPostcodeSerializer(serializers.ModelSerializer):\n points = HeatMapPostcodePointSerializer(many=True, read_only=True)\n class Meta:\n model = HeatMapPostcode\n fields = ('id','points')\n\nclass HeatMapPostcodeQuerySerializer(serializers.ModelSerializer): \n heat_map = HeatMapPostcodeSerializer(read_only=True) \n class Meta:\n model = HeatMapPostcodeQuery\n fields = ('loaded', 'estate_type','property_type', 'date','heat_map')\n\nclass HeatMapLocalPointSerializer(serializers.ModelSerializer):\n class Meta:\n model = HeatMapLocalPoint\n fields = ('longitude','latitude', 'value') \n\nclass HeatMapLocalPointPlusSerializer(serializers.ModelSerializer):\n class Meta:\n model = HeatMapLocalPoint\n fields = ('longitude','latitude', 'date','estate_type','property_type', 'value') \n\nclass HeatMapLocalSerializer(serializers.ModelSerializer):\n points = HeatMapLocalPointSerializer(many=True, read_only=True)\n class Meta:\n model = HeatMapLocal\n fields = ('points',)\n\nclass HeatMapLocalQuerySerializer(serializers.ModelSerializer): \n heat_map = HeatMapLocalSerializer(read_only=True) \n class Meta:\n model = HeatMapLocalQuery\n fields = ('loaded','longitude','latitude','estate_type','property_type', 'date', 'radius','heat_map')\n\n\n\n#-------------------------------------------------------------------------------\nclass PredictionGraphPointSerializer(serializers.ModelSerializer):\n class Meta:\n model = PredictionGraphPoint\n fields = ('point_type', 'price', 'sigma', 'time') \n\nclass PredictionGraphSerializer(serializers.ModelSerializer):\n points = PredictionGraphPointSerializer(many=True, read_only=True)\n class Meta:\n model = PredictionGraph\n fields = ('id','points')\n\nclass GraphQuerySerializer(serializers.ModelSerializer): \n graph = PredictionGraphSerializer(read_only=True) \n class Meta:\n model = GraphQuery\n fields = ('id','loaded','longitude','latitude','estate_type','property_type','graph')\n \n \n#-------------------------------------------------------------------------------\n\nclass GridSquareDataSerializer(serializers.ModelSerializer):\n class Meta:\n model = GridSquareData\n fields = ('id', 'longitude', 'latitude')\n", "repo_name": "suryarastogi/ImperialProjects", "sub_path": "Group Project/ghostapi/openapi/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 3173, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapPostcodePoint", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 27, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapPostcode", "line_number": 30, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 33, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapPostcodeQuery", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 39, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapLocalPoint", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 44, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapLocalPoint", "line_number": 46, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 49, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 49, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapLocal", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 55, "usage_type": "name"}, {"api_name": "openapi.models.HeatMapLocalQuery", "line_number": 58, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 64, "usage_type": "name"}, {"api_name": "openapi.models.PredictionGraphPoint", "line_number": 66, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 69, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 69, "usage_type": "name"}, {"api_name": "openapi.models.PredictionGraph", "line_number": 72, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 75, "usage_type": "name"}, {"api_name": "openapi.models.GraphQuery", "line_number": 78, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 84, "usage_type": "name"}, {"api_name": "openapi.models.GridSquareData", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "9019487810", "text": "import requests\nimport time\n\n#criar decorator calclar tempo\ndef calcular_tempo(funcao):\n def wrapper():\n tempo_inicial = time.time()\n print(\"vou pegar a cotacao\")\n funcao()\n print(\"peguei a cotacao\")\n tempo_final = time.time()\n print(f\"tempo de operacao foi:{tempo_final - tempo_inicial} segundos\")\n return wrapper\n\n\n@calcular_tempo\ndef pegar_cotacao_dolar():\n link = f'https://economia.awesomeapi.com.br/last/USD-BRL'\n requisicao = requests.get(link)\n requisicao = requisicao.json()\n print(requisicao['USDBRL']['bid'])\n\npegar_cotacao_dolar()\n", "repo_name": "RicardoSilvaSoares/python", "sub_path": "src/Flask/Decorator_aprendendo.py", "file_name": "Decorator_aprendendo.py", "file_ext": "py", "file_size_in_byte": 603, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "47949676308", "text": "# to open/load image\r\nfrom PIL import Image,ImageFilter\r\n# to render image\r\nimport matplotlib.pyplot as plt\r\n# to make kernels and noise matrix\r\nimport numpy as np\r\n\r\n# A helper function to show images side by\r\n\r\ndef show_side_by_side(im1,title1,im2,title2,gray=False):\r\n CMAP = 'viridis' if not gray else 'gray'\r\n plt.subplot(1,2,1)\r\n plt.title(title1)\r\n plt.imshow(im1,cmap=CMAP)\r\n\r\n plt.subplot(1,2,2)\r\n plt.title(title2)\r\n plt.imshow(im2,cmap=CMAP)\r\n \r\n plt.show()\r\n\r\nim = Image.open('lenna.png')\r\nrows,cols = im.size\r\n# Okay now lets create some noise\r\nnoise = np.random.normal(0,15,size=(rows,cols,3)).astype(np.uint8)\r\n\r\nnoised_im_arr = im + noise\r\n\r\nplt.figure(figsize=(7,7))\r\n\r\nshow_side_by_side(im,'Original',noised_im_arr,'Noised Image')\r\n\r\n# Now lets filter the noise\r\n# mean filtering\r\nnoised_im = Image.fromarray(noised_im_arr)\r\n\r\nkernel = np.ones((5,5))/36\r\nkernel_filter = ImageFilter.Kernel((5,5),kernel.flatten())\r\n\r\nfiltered_img = noised_im.filter(kernel_filter)\r\n\r\nshow_side_by_side(noised_im,'Noised Image',filtered_img,'Filtered Image')\r\n\r\n# Smaller kernel can make a sharp image but trade off the noise reduction capabilities\r\nsmall_kernel = np.ones((3,3))/9\r\nsmall_kernel_filte = ImageFilter.Kernel((3,3),small_kernel.flatten())\r\n\r\nfiltered_img_small_kernel = noised_im.filter(small_kernel_filte)\r\n\r\nshow_side_by_side(filtered_img,'5 x 5 kernel',filtered_img_small_kernel,'3 x 3 kernel')\r\n# It is clearly visible that her shoulder is now sharper but the green spots are brighter too\r\n\r\n# Noise reduction can be achieved with Gaussian Blur too\r\n# ImageFilter.GaussianBlur\r\n# Default Radius is 2\r\n\r\ngaussian_img = noised_im.filter(ImageFilter.GaussianBlur)\r\nshow_side_by_side(noised_im,'Noised Image',gaussian_img,'Gaussian Blurred Img')\r\n\r\n# Sharpening Images\r\n# For image sharpening we will use commonly used kernel\r\n# -1 -1 -1\r\n# -1 9 -1\r\n# -1 -1 -1\r\n\r\nsharpen_kernel = np.array([[-1,-1,-1],\r\n [-1, 9,-1],\r\n [-1,-1,-1]])\r\n\r\nsharpen_filter = ImageFilter.Kernel((3,3),sharpen_kernel.flatten())\r\n\r\nsharped_gaussian = gaussian_img.filter(sharpen_filter)\r\nsharpen_img = im.filter(sharpen_filter)\r\nshow_side_by_side(gaussian_img,'Gaussian blurred Img',sharped_gaussian,'Sharpened Gaussian Img')\r\nshow_side_by_side(im,'orginal image',sharpen_img,'Sharpened Image')\r\n\r\n# There's a SHARPEN filter provided by PIL by default\r\nPIL_sharpen_img = im.filter(ImageFilter.SHARPEN)\r\nshow_side_by_side(sharpen_img,'Sharpen by manual',PIL_sharpen_img,'Sharpen by PIL')\r\n\r\n# Edge Detection\r\n# Before Finding Edge Use ImageFilter.ENHANCE_EDGE filter to enhance the edges of an images so that you can find more and more edges\r\nimg = Image.open('barbara.png')\r\nenhanced_egde = img.filter(ImageFilter.EDGE_ENHANCE)\r\n\r\nshow_side_by_side(img,'Original',enhanced_egde,'Enhanced Edges',gray=True)\r\n\r\n# Now we have enhanced edges we can start finding the edges in the image\r\n\r\nfind_edge = img.filter(ImageFilter.FIND_EDGES)\r\nshow_side_by_side(img,'Original Image',find_edge,'Edges in the Image',gray=True)\r\n\r\n# Median Filter\r\n# Median Filter takes the median of the neighbouring pixel and replace the middle one with median\r\n\r\nim2 = Image.open('cameraman.jpeg')\r\n\r\nmedian_filtered = im2.filter(ImageFilter.MedianFilter)\r\n# median filters are good for increasing the segmentation within the images\r\nshow_side_by_side(im2,'Original',median_filtered,'Median Filtered')\r\n", "repo_name": "zee-1/ComputerVision", "sub_path": "linearFilteringPIL.py", "file_name": "linearFilteringPIL.py", "file_ext": "py", "file_size_in_byte": 3479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 38, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 46, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.GaussianBlur", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 70, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.SHARPEN", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 78, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 83, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 83, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.EDGE_ENHANCE", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.FIND_EDGES", "line_number": 90, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 90, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 96, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 96, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.MedianFilter", "line_number": 98, "usage_type": "attribute"}, {"api_name": "PIL.ImageFilter", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "22030813547", "text": "from typing import Tuple, Iterator\nfrom contextlib import contextmanager\nfrom pprint import pprint\nimport os\nimport signal\nimport subprocess\nimport time\n\nimport boggart\nimport bugzoo\nimport rooibos\n\n\n@contextmanager\ndef test_environment(port_bugzoo: int = 6060,\n port_rooibos: int = 8888,\n port_boggart: int = 8000,\n verbose: bool = True\n ) -> Iterator[Tuple[boggart.Client, bugzoo.Client]]:\n url_bugzoo = \"http://127.0.0.1:{}\".format(port_bugzoo)\n with boggart.server.ephemeral_stack(port_boggart=port_boggart,\n port_rooibos=port_rooibos,\n port_bugzoo=port_bugzoo,\n verbose=verbose) as client_boggart:\n client_bugzoo = bugzoo.Client(url_bugzoo)\n print(client_boggart)\n print(client_bugzoo)\n yield client_boggart, client_bugzoo\n\n\ndef generate_mutant():\n with test_environment() as (client_boggart, client_bugzoo):\n snapshot = client_bugzoo.bugs[\"tse2012:gcd\"]\n location = boggart.FileLocationRange.from_string(\"gcd.c@10:3::10:15\")\n mutation = boggart.Mutation(\"NEGATE_IF_CONDITION_CSTYLE\", 0, location, {})\n mutations = [mutation]\n mutant = client_boggart.mutate(snapshot, mutations)\n print(mutant)\n print(mutant.uuid)\n print(mutant.to_dict())\n\n\ndef generate_mutations():\n with test_environment() as (client_boggart, client_bugzoo):\n snapshot = client_bugzoo.bugs[\"tse2012:gcd\"]\n filepath = \"gcd.c\"\n for mutation in client_boggart.mutations(snapshot, filepath):\n pprint(mutation.to_dict())\n\n\nif __name__ == '__main__':\n generate_mutations()\n", "repo_name": "squaresLab/boggart", "sub_path": "examples/mutant.py", "file_name": "mutant.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "boggart.server.ephemeral_stack", "line_number": 21, "usage_type": "call"}, {"api_name": "boggart.server", "line_number": 21, "usage_type": "attribute"}, {"api_name": "bugzoo.Client", "line_number": 25, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 19, "usage_type": "name"}, {"api_name": "boggart.Client", "line_number": 19, "usage_type": "attribute"}, {"api_name": "bugzoo.Client", "line_number": 19, "usage_type": "attribute"}, {"api_name": "boggart.FileLocationRange.from_string", "line_number": 34, "usage_type": "call"}, {"api_name": "boggart.FileLocationRange", "line_number": 34, "usage_type": "attribute"}, {"api_name": "boggart.Mutation", "line_number": 35, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "14687632630", "text": "import repackage\n\nrepackage.up()\n\nimport os\n\nimport json\n\nimport random\n\nimport datetime\nimport time\n\nimport discord\nfrom discord.ext import commands\n\nimport genanki\n\nimport tempfile\n\nfrom libs.GoogleDriveManager import GoogleDriveManager\nfrom libs.tools import replace_latex_delimiters, remove_text\n\nwith open(\"config.json\", \"r\") as f:\n config = json.load(f)\n\nintents = discord.Intents.default()\nintents.message_content = True\n\nbot = commands.Bot(command_prefix=config[\"bot_prefix\"], intents=intents)\n\ngoogle_drive_manager = GoogleDriveManager(\n credentials_file_path=config[\"credentials_file_path\"],\n data_folder_path=config[\"data_folder_path\"],\n)\n\n\n@bot.event\nasync def on_ready() -> None:\n print(\"------\")\n print(\"Discord Bot: Logged in as\")\n print(bot.user.name)\n print(bot.user.id)\n print(\"------\")\n\n\n@bot.command()\nasync def ping(ctx) -> None:\n await ctx.send(\"pong\")\n\n\n@bot.command()\nasync def update(ctx) -> None:\n google_drive_manager.get_files(folder_id=config[\"folder_id\"])\n\n embed = discord.Embed(\n title=\"Database updated\",\n color=discord.Color.purple(),\n )\n\n embed.set_footer(text=\"Made with 💜 by bonsainoodle\")\n\n await ctx.send(embed=embed)\n\n\n@bot.command()\nasync def rev(ctx, num_cards: int, *args: str) -> None:\n with open(f\"{config['data_folder_path']}/data.json\", \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n\n random.shuffle(data)\n\n selected_cards = []\n tags = []\n backlinks = []\n include_tags = False\n include_backlinks = False\n\n for arg in args:\n arg = arg.replace(\"=\", \" \")\n arg = arg.lower()\n\n if arg.isdigit():\n num_cards = int(arg)\n elif arg.startswith(\"#\"):\n tags.append(arg[1:])\n elif arg.startswith(\"[[\"):\n backlinks.append(arg[2:-2])\n elif arg == \"include_tags\":\n include_tags = True\n elif arg == \"include_backlinks\":\n include_backlinks = True\n\n for card in data:\n if len(args) == 0:\n selected_cards.append(card)\n\n if (not tags or any(tag in card[\"tags\"] for tag in tags)) and (\n not backlinks\n or any(backlink in card[\"backlinks\"] for backlink in backlinks)\n ):\n if (include_tags and any(tag in card[\"tags\"] for tag in tags)) or (\n include_backlinks\n and any(backlink in card[\"backlinks\"] for backlink in backlinks)\n ):\n selected_cards.append(card)\n\n if len(selected_cards) >= num_cards:\n break\n\n if len(selected_cards) == 0:\n embed = discord.Embed(\n title=\"No card found\",\n description=\"Try to change your parameters\",\n color=discord.Color.purple(),\n )\n\n embed.set_footer(text=\"Made with 💜 by bonsainoodle\")\n\n await ctx.send(embed=embed)\n\n return\n\n now = datetime.datetime.now()\n file_name = f\"{now.strftime('%Y-%m-%d-%H-%M-%S')}.apkg\"\n\n anki_model = genanki.Model(\n random.randrange(1 << 30, 1 << 31),\n file_name,\n fields=[\n {\"name\": \"ContentBefore\"},\n {\"name\": \"ContentAfter\"},\n ],\n templates=[\n {\n \"name\": \"Card 1\",\n \"qfmt\": \"{{ContentBefore}}\",\n \"afmt\": '{{FrontSide}}
{{ContentAfter}}',\n },\n ],\n )\n\n anki_deck = genanki.Deck(random.randrange(1 << 30, 1 << 31), file_name)\n\n for card in selected_cards:\n my_note = genanki.Note(\n model=anki_model,\n fields=[\n remove_text(replace_latex_delimiters(card[\"content_before\"])),\n remove_text(replace_latex_delimiters(card[\"content_after\"])),\n ],\n tags=card[\"tags\"],\n )\n\n anki_deck.add_note(my_note)\n\n anki_package = genanki.Package(anki_deck)\n\n temp_dir = tempfile.mkdtemp(prefix=\"anki_export_\")\n\n output_file_path = os.path.join(temp_dir, file_name)\n\n anki_package.write_to_file(output_file_path)\n\n with open(output_file_path, \"rb\") as apkg_file:\n apkg_content = apkg_file.read()\n\n os.remove(output_file_path)\n\n os.rmdir(temp_dir)\n\n google_drive_manager.upload_file(\n parent_folder_id=config[\"rev_folder_id\"],\n file_name=file_name,\n content=apkg_content,\n )\n\n time.sleep(1)\n\n file_link = google_drive_manager.get_file_link(\n parent_folder_id=config[\"rev_folder_id\"], file_name=file_name\n )\n\n embed = discord.Embed(\n title=f\"Created a new file with {len(selected_cards)} cards\",\n description=f\"[{file_name}]({file_link})\",\n color=discord.Color.purple(),\n )\n\n embed.set_footer(text=\"Made with 💜 by bonsainoodle\")\n\n await ctx.send(embed=embed)\n\n\nbot.run(config[\"bot_token\"])\n", "repo_name": "bonsainoodle/obsidianToAnki", "sub_path": "bot/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 4833, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "repackage.up", "line_number": 3, "usage_type": "call"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.Intents.default", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 30, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 30, "usage_type": "name"}, {"api_name": "libs.GoogleDriveManager.GoogleDriveManager", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 56, "usage_type": "call"}, {"api_name": "discord.Color.purple", "line_number": 58, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 58, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 69, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 71, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 112, "usage_type": "call"}, {"api_name": "discord.Color.purple", "line_number": 115, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 115, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 124, "usage_type": "attribute"}, {"api_name": "genanki.Model", "line_number": 127, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 128, "usage_type": "call"}, {"api_name": "genanki.Deck", "line_number": 143, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 143, "usage_type": "call"}, {"api_name": "genanki.Note", "line_number": 146, "usage_type": "call"}, {"api_name": "libs.tools.remove_text", "line_number": 149, "usage_type": "call"}, {"api_name": "libs.tools.replace_latex_delimiters", "line_number": 149, "usage_type": "call"}, {"api_name": "libs.tools.remove_text", "line_number": 150, "usage_type": "call"}, {"api_name": "libs.tools.replace_latex_delimiters", "line_number": 150, "usage_type": "call"}, {"api_name": "genanki.Package", "line_number": 157, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 159, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 168, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 170, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 178, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 184, "usage_type": "call"}, {"api_name": "discord.Color.purple", "line_number": 187, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 187, "usage_type": "attribute"}]} +{"seq_id": "72446906085", "text": "import sys \nimport subprocess\nimport os\nimport configparser\nimport re\nimport glob\nfrom PyQt5.QtWidgets import QApplication, QGraphicsOpacityEffect, QDialog, QLabel, QLineEdit\nfrom PyQt5 import QtCore\nfrom PyQt5.Qt import Qt\n\no = \"\"\ncmd = \"\"\n\nclass Example(QDialog):\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setWindowOpacity(0)\n\t\tself.rpath = None\n\t\tself.path = None\n\t\tself.appname = None\n\t\tself.lineEntry = QLineEdit(self)\n\t\tself.lineEntry.returnPressed.connect(self.run)\n\n\t\tself.lineEntry.move(16,16)\n\t\tself.lineEntry.resize(200,40)\n\n\t\tself.qlabel = QLabel(self)\n\t\tself.qlabel.move(16,64)\n\n\t\tself.lineEntry.textChanged.connect(self.onChanged)\n\t\t\n\t\tself.setGeometry(50,50,320,200)\n\t\tself.setWindowTitle(\"QLineEdit Example\")\n\t\tself.show()\n\t\tself.paths()\n\t\t\n\t\n\tdef paths(self):\n\t\tself.qlabel.adjustSize()\t\t\n\t\tconfig = configparser.RawConfigParser()\n\t\tprint(config.sections())\n\t\t#saves all .desktop files to an array\n\t\tpath = glob.glob(f'/usr/share/applications/*.desktop')\n\t\tnames = []\n\t\tfor e in path:\n\t\t\tconfig.read(e)\n\t\t\tname = (config['Desktop Entry']['Name'])\n\t\t\ta = f'{name }={e}'\n\t\t\tnames.append(a)\n\t\tsnames = '\\n'.join(names)\n\t\tprint(snames)\n\t\tself.paths = snames\n\t\t\n\n\tdef onChanged(self, text):\n\t\trpath=\"\"\n\t\tappname = \"\"\n\t\tprint(self.paths)\n\t\tsnames = self.paths\n\t\t\n\t\tcinput = text.capitalize()\n\t\tfor line in snames.split('\\n'):\n\t\t\tif cinput in line:\n\t\t\t\tt = re.sub(r'^.*?=', '=', line)\n\t\t\t\trpath = t.replace(\"=\", \"\") \n\t\t\t\tprint(rpath)\n\t\t\t\tbreak\n\t\tfor line in snames.split('\\n'):\n\t\t\tif cinput in line:\n\t\t\t\tsline = ''.join(line)\n\t\t\t\tformat = sline.split(\"=\", 1)\n\t\t\t\tappname = format[0] \n\t\t\t\tprint(appname)\n\t\t\t\tbreak\n\t\tself.rpath = rpath\n\t\tself.appname = appname\n\t\t\n\t\tpath = os.path.join('/tmp', 'ls.txt')\n\t\tfile = open(path, \"w\")\n\t\tfile.write(self.appname)\n\t\tos.system('sh ~/code/appLauncher/barupdate.sh ')\n\n\tdef bar(self):\n\t\tpath = os.path.join('/tmp', 'ls.txt')\n\t\tfile = open(path, \"w\")\n\t\tfile.write(self.appname)\n\t\tos.system('echo hook:module/demo2 >>/tmp/polybar_mqueue.*')\n\n\tdef run(self):\n\t\tconfig = configparser.RawConfigParser()\n\t\tprint(self.rpath)\n\t\tconfig.read(self.rpath)\n\t\texec = (config['Desktop Entry']['Exec'])\n\t\tprint(exec)\n\t\tsubprocess.run(f'{exec} & disown', shell=True)\n\t\texit()\n\t\t\n\nif __name__ == '__main__':\n\tapp = QApplication(sys.argv)\n\tex = Example()\n\tsys.exit(app.exec_())", "repo_name": "becker63/PolyLauncher", "sub_path": "applauncher.py", "file_name": "applauncher.py", "file_ext": "py", "file_size_in_byte": 2314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QLineEdit", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 28, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 41, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 88, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 96, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 101, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 101, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "10445010448", "text": "from rest_framework import serializers\n\nfrom apps.visums.models import LinkedCategory\nfrom apps.visums.models.enums import CheckState\nfrom apps.visums.serializers import CategorySerializer, LinkedSubCategorySerializer\n\n\n# LOGGING\nimport logging\nfrom scouts_auth.inuits.logging import InuitsLogger\n\nlogger: InuitsLogger = logging.getLogger(__name__)\n\n\nclass LinkedCategorySerializer(serializers.ModelSerializer):\n\n parent = CategorySerializer()\n sub_categories = LinkedSubCategorySerializer(many=True)\n\n class Meta:\n model = LinkedCategory\n exclude = [\"category_set\"]\n\n def to_representation(self, obj: LinkedCategory) -> dict:\n if obj.is_archived:\n return None\n\n # logger.debug(\"LINKED CATEGORY TO_REPRESENTATION: %s\", obj)\n\n data = super().to_representation(obj)\n\n # logger.debug(\"LINKED CATEGORY TO_REPRESENTATION: %s\", obj)\n\n # data[\"state\"] = CheckState.CHECKED\n # for sub_category in data.get(\"sub_categories\", []):\n # if CheckState.is_unchecked(sub_category.get(\"state\", CheckState.UNCHECKED)):\n # data[\"state\"] = CheckState.UNCHECKED\n # break\n data[\"state\"] = obj.check_state\n data[\"readable_name\"] = obj.readable_name\n\n data[\"camp\"] = {}\n data[\"visum\"] = {}\n\n visum = obj.category_set.visum\n\n data[\"camp\"][\"name\"] = visum.name\n data[\"visum\"][\"id\"] = visum.id\n\n return data\n", "repo_name": "ScoutsGidsenVL/kampvisum-backend", "sub_path": "scouts_kampvisum_api/apps/visums/serializers/linked_category_serializer.py", "file_name": "linked_category_serializer.py", "file_ext": "py", "file_size_in_byte": 1457, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scouts_auth.inuits.logging.InuitsLogger", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 15, "usage_type": "name"}, {"api_name": "apps.visums.serializers.CategorySerializer", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.visums.serializers.LinkedSubCategorySerializer", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.visums.models.LinkedCategory", "line_number": 21, "usage_type": "name"}, {"api_name": "apps.visums.models.LinkedCategory", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "8498017953", "text": "#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# 搬运色块_1\n\nimport cv2\nimport numpy as np\nimport time\nimport threading\nimport signal\n# import LeArm\n# import kinematics as kin\n# import RPi.GPIO as GPIO\ndef test(aaa):\n print(\"hello world\"+aaa)\n time.sleep(1000)\n print('aaaa')\ndebug = True\n\nstream = \"http://127.0.0.1:8080/?action=stream?dummy=param.mjpg\"\ncap = cv2.VideoCapture(stream)\n\norgFrame = None\nRunning = False\n\n# 校准按键\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nkey = 22\nGPIO.setup(key, GPIO.IN, GPIO.PUD_UP)\n# 校准标志\ncorrection_flag = False\n\n# # 要识别的颜色字典\n# color_dist = {'red': {'Lower': np.array([0, 60, 60]), 'Upper': np.array([6, 255, 255])},\n# 'blue': {'Lower': np.array([100, 80, 46]), 'Upper': np.array([124, 255, 255])},\n# 'green': {'Lower': np.array([35, 43, 35]), 'Upper': np.array([90, 255, 255])},\n# }\n# 色块颜色,位置列表\nposition_color_list = []\n# 识别到色块标志\ncv_blocks_ok = False\n# 搬运步骤\nstep = 0\nnum_random = None\n# 识别次数\ncv_count = 0\n# 用于判读色块是否稳定\nlast_blocks = []\nlast_x = 0\nstable = False\n# 存储色块, 用于判读色块 Y轴的远近, 机械臂先取近的\nstorage_blocks = []\n\n# 暂停信号的回调\ndef cv_stop(signum, frame):\n global Running\n\n print(\"Stop \")\n if Running is True:\n Running = False\n cv2.destroyAllWindows()\n\n# 继续信号的回调\ndef cv_continue(signum, frame):\n global stream\n global Running\n global cap\n\n if Running is False:\n cap = cv2.VideoCapture(stream)\n Running = True\n\n# 注册信号回调\nsignal.signal(signal.SIGTSTP, cv_stop)\nsignal.signal(signal.SIGCONT, cv_continue)\n\n# 数值映射\n# 将一个数从一个范围映射到另一个范围\ndef leMap(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min\n\ndef move_blocks(position_color_list):\n global cv_blocks_ok,step\n #\n while True:\n while cv_blocks_ok is True:\n if len(position_color_list) == 1: #\n # 数据处理\n print (position_color_list, 'pos')\n if step == 0:\n x_pix_cm = position_color_list['x_pic']#position_color_list[0][1]\n y_pix_cm = position_color_list['y_pic']#position_color_list[0][2]\n angle = position_color_list['angle']#position_color_list[0][3]\n # 数据映射\n n_x = int(leMap(x_pix_cm, 0.0, 320.0, -1250.0, 1250.0)) * 1.0\n n_y = int(leMap(240 - y_pix_cm, 0.0, 240.0, 1250, 3250.0)) * 1.0\n # 需要根据实际情况调整,偏差主要来自舵机的虚位\n if n_x < -100:\n n_x -= 120 # 偏差\n LeArm.setServo(1, 700, 500)\n time.sleep(0.5)\n step = 1\n elif step == 1:\n # 机械臂下去\n if kin.ki_move(n_x, n_y, 200.0, 1500):\n step = 2\n else:\n step = 6\n elif step == 2:\n # 根据方块的角度转动爪子\n if angle <= -45:\n angle = -(90 + angle)\n n_angle = leMap(angle, 0.0, -45.0, 1500.0, 1750.0)\n if n_x > 0:\n LeArm.setServo(2, 3000 - n_angle, 500)\n else:\n LeArm.setServo(2, n_angle, 500)\n time.sleep(0.5)\n step = 3\n elif step == 3:\n # 抓取\n print ('3 ok')\n LeArm.setServo(1, 1200, 500)\n time.sleep(0.5)\n step = 4\n elif step == 4: # 将方块提起\n print ('4 ok')\n kin.ki_move(n_x, n_y, 700.0, 1000)\n step = 5\n elif step == 5: # 抓取成功,放置方块\n print ('5 ok')\n if position_color_list['field'] == 'red':\n LeArm.runActionGroup('red', 1)\n elif position_color_list['field']== 'blue':\n LeArm.runActionGroup('blue', 1)\n elif position_color_list['field']== 'green':\n LeArm.runActionGroup('green', 1)\n step = 6\n elif step == 6: # 复位机械臂\n print ('6 ok')\n LeArm.runActionGroup('rest', 1)\n #threadLock.acquire()\n position_color_list = []\n cv_blocks_ok = False\n #threadLock.release()\n step = 0\n else:\n time.sleep(0.01)\n\n\n# 启动动作在运行线程\n# th1 = threading.Thread(target=move_blocks)\n# th1.setDaemon(True)\n# th1.start()\n#\n# # 线程锁\n# threadLock = threading.Lock()\n\n# 镜头畸变系数\nlens_mtx = np.array([\n [993.17745922, 0., 347.76412756],\n [0., 992.6210587, 198.08924031],\n [0., 0., 1.],\n ])\nlens_dist = np.array([[-2.22696961e-01, 3.34897836e-01, 1.43573965e-03, -5.99140365e-03, -2.03168813e+00]])\n\n\n# 镜头畸变调整\ndef lens_distortion_adjustment(image):\n global lens_mtx, lens_dist\n h, w = image.shape[:2]\n newcameramtx, roi = cv2.getOptimalNewCameraMatrix(lens_mtx, lens_dist, (w, h), 0, (w, h)) # 自由比例参数\n dst = cv2.undistort(image, lens_mtx, lens_dist, None, newcameramtx)\n return dst\n\n\n# 机械臂位置校准\ndef Arm_Pos_Corr():\n LeArm.setServo(1, 1200, 500)\n time.sleep(0.5)\n kin.ki_move(0, 2250, 200.0, 1500)\n\nif debug:\n Running = True\nelse:\n Running = False\n\n# 运行程序前按下KEY2,进入校准机械臂位置, 校准完成后,再按下KEY退出\nrun_corr_one = 0\n\n# 初始化机械臂位置\nLeArm.runActionGroup('rest', 1)\n# while True:\n # if GPIO.input(key) == 0:\n # time.sleep(0.1)\n # if GPIO.input(key) == 0:\n # correction_flag = not correction_flag\n # if correction_flag is False:\n # LeArm.runActionGroup('rest', 1)\n # if correction_flag is False:\n # run_corr_one = 0\n # if Running:\n # if cap.isOpened():\n # ret, orgFrame = cap.read()\n # if ret:\n # t1 = cv2.getTickCount()\n # try:\n # orgFrame = cv2.resize(orgFrame, (320,240), interpolation = cv2.INTER_CUBIC) #将图片缩放到 320*240\n # except Exception as e:\n # print(e)\n # continue\n # if orgFrame is not None :\n # orgFrame = lens_distortion_adjustment(orgFrame)\n # img_h, img_w = orgFrame.shape[:2]\n# # #\n#\n#\n\n\n\n # # 获取图像中心点坐标x, y\n # img_center_x = img_w / 2\n # img_center_y = img_h / 2\n # if cv_blocks_ok is False:\n # # 高斯模糊\n # gs_frame = cv2.GaussianBlur(orgFrame, (5, 5), 0)\n # # 转换颜色空间\n # hsv = cv2.cvtColor(gs_frame, cv2.COLOR_BGR2HSV)\n # for i in color_dist:\n # # 查找字典颜色\n # mask = cv2.inRange(hsv, color_dist[i]['Lower'], color_dist[i]['Upper'])\n # # 腐蚀\n # mask = cv2.erode(mask, None, iterations=2)\n # # 膨胀\n # kernel = np.ones((5, 5), np.uint8)\n # mask = cv2.dilate(mask, kernel, iterations=2)\n # # 查找轮廓\n # # cv2.imshow('mask', mask)\n # cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n # if len(cnts) > 0:\n # # 找出最大的区域\n # c = max(cnts, key=cv2.contourArea)\n # # 返回的值 中心坐标(x, y),(w,h),角度\n # rect = cv2.minAreaRect(c)\n # # 获取最小外接矩形的4个顶点\n # box = cv2.boxPoints(rect)\n # # 数据类型转换\n # # 绘制轮廓\n # cv2.drawContours(orgFrame, [np.int0(box)], -1, (0, 255, 255), 2)\n # # 找色块中心点\n # c_x, c_y = rect[0]\n # h, w = rect[1]\n # c_angle = rect[2]\n # if h * w >= 1350: # 色块面积限制\n # # 绘制中心点\n # cv2.circle(orgFrame, (int(c_x), int(c_y)), 3, (216, 0, 255), -1)\n # # print 'nnn', int(c_x), int(c_y)\n # # print c_angle\n # # 存储用于判读X是否稳定的列表\n # last_blocks.append([int(c_x), i])\n # if stable:\n # # 存储 稳定后的数据\n # storage_blocks.append((int(c_y), int(c_x), i, int(c_angle)))\n # '''\n # 检测色块位置步骤:\n # 1、 判读色块 X 轴数据是否稳定\n # 2、 稳定后,存储数据\n # 3、 判读稳定后的数据, 找出Y轴色块 距离机械臂最近的\n # '''\n # stable = False\n # if len(last_blocks) > 0:\n # if -10 <= int(last_blocks[len(last_blocks) - 1][0] - last_x) <= 10: # 只判读最后一个方块是否稳定\n # print (cv_count)\n # cv_count += 1\n # else:\n # cv_count = 0\n # last_x = int(last_blocks[len(last_blocks) - 1][0])\n # last_blocks = []\n # if cv_count >= 5:\n # cv_count = 0\n # stable = True # 数据稳定后,开始取数据\n # # 稳定���的数据发送给搬运进程\n # if len(storage_blocks) > 0:\n # max_y = storage_blocks.index(max(storage_blocks))\n # # 存储稳定后的数据, 颜色, X, Y, 色块角度\n # position_color_list.append((storage_blocks[max_y][2], storage_blocks[max_y][1],\n # storage_blocks[max_y][0], storage_blocks[max_y][3]))\n # storage_blocks = []\n # cv_blocks_ok = True # 开始搬运\n # # 画图像中心点\n # cv2.line(orgFrame, (int(img_w / 2) - 20, int(img_h / 2)), (int(img_w / 2) + 20, int(img_h / 2)), (0, 0, 255), 1)\n # cv2.line(orgFrame, (int(img_w / 2), int(img_h / 2) - 20), (int(img_w / 2), int(img_h / 2) + 20), (0, 0, 255), 1)\n # t2 = cv2.getTickCount()\n # time_r = (t2 - t1) / cv2.getTickFrequency()\n # fps = 1.0/time_r\n # if debug:\n # cv2.putText(orgFrame, \"fps:\" + str(int(fps)),\n # (10, orgFrame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)#(0, 0, 255)BGR\n # cv2.imshow(\"orgFrame\", orgFrame)\n # cv2.waitKey(1)\n # else:\n # time.sleep(0.01)\n # else:\n # if correction_flag and run_corr_one == 0:\n # run_corr_one += 1\n # Arm_Pos_Corr()\n # else:\n # time.sleep(0.01)\n #\n #\n", "repo_name": "zsyu9779/machine", "sub_path": "Handling_color_blocks.py", "file_name": "Handling_color_blocks.py", "file_ext": "py", "file_size_in_byte": 12524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 70, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 74, "usage_type": "call"}, {"api_name": "signal.SIGTSTP", "line_number": 74, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 75, "usage_type": "call"}, {"api_name": "signal.SIGCONT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 118, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 124, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.getOptimalNewCameraMatrix", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.undistort", "line_number": 173, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "1703820477", "text": "from gi.repository import Gtk\n# from .spec_filechoosers import ExposureLoader, FileEntryWithButton\n\nclass PleaseWaitDialog(Gtk.Dialog):\n def __init__(self, title='Data reduction running, please wait...', parent=None, flags=Gtk.DialogFlags.DESTROY_WITH_PARENT | Gtk.DialogFlags.MODAL, buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)):\n Gtk.Dialog.__init__(self, title, parent, flags, buttons)\n vb = self.get_content_area()\n self.pbar = Gtk.ProgressBar()\n self.pbar.set_text('Working...')\n self.label = Gtk.Label()\n self.label.set_line_wrap(True)\n vb.pack_start(self.pbar, False, True, 0)\n vb.pack_start(self.label, True, True, 0)\n vb.show_all()\n def set_label_text(self, msg):\n self.label.set_text(msg)\n self.pbar.pulse()\n \nclass PleaseWaitInfoBar(Gtk.InfoBar):\n def __init__(self, buttons=(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)):\n Gtk.InfoBar.__init__(self)\n for i in range(len(buttons) / 2):\n self.add_button(buttons[2 * i], buttons[2 * i + 1])\n self.set_message_type(Gtk.MessageType.INFO)\n vb = self.get_content_area()\n self.label = Gtk.Label('Data reduction running...')\n self.pbar = Gtk.ProgressBar()\n self.pbar.set_text('Working...')\n vb.pack_start(self.label, False, True, 0)\n vb.pack_start(self.pbar, False, True, 0)\n self.show_all()\n def set_label_text(self, msg):\n self.pbar.set_text(msg)\n self.pbar.pulse()\n def set_n_jobs(self, n):\n self.label.set_text('%d data reduction job(s) running...' % n)\n\n", "repo_name": "awacha/SAXSCtrl", "sub_path": "retired_code/data_reduction_setup.py", "file_name": "data_reduction_setup.py", "file_ext": "py", "file_size_in_byte": 1618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gi.repository.Gtk.Dialog", "line_number": 4, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 4, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.DialogFlags", "line_number": 5, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 5, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.STOCK_CANCEL", "line_number": 5, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 5, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Dialog.__init__", "line_number": 6, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Dialog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 6, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ProgressBar", "line_number": 8, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 8, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 10, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 10, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.InfoBar", "line_number": 19, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 19, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.STOCK_CANCEL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 20, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ResponseType", "line_number": 20, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.InfoBar.__init__", "line_number": 21, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.InfoBar", "line_number": 21, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 21, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.MessageType", "line_number": 24, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 24, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 26, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 26, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ProgressBar", "line_number": 27, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "24331088946", "text": "import time\nimport asyncio\n\nfrom bentoml.marshal.marshal import ParadeDispatcher\n\n\nasync def test_parade_dispatcher():\n MAX_LATENCY = 0.1\n\n def _test_func(i):\n return i * 2\n\n @ParadeDispatcher(MAX_LATENCY, max_size=1000)\n async def _do_sth_slow(input_list):\n await asyncio.sleep(0.1)\n return [_test_func(i) for i in input_list]\n\n inputs = [i for i in range(100)]\n time_st = time.time()\n outputs = await asyncio.gather(*(_do_sth_slow(i) for i in inputs))\n assert time.time() - time_st < 1\n assert [o == _test_func(i) for i, o in zip(inputs, outputs)]\n\n\nif __name__ == \"__main__\":\n asyncio.get_event_loop().run_until_complete(test_parade_dispatcher())\n", "repo_name": "chengs369/BentoML", "sub_path": "tests/marshal/test_marshal.py", "file_name": "test_marshal.py", "file_ext": "py", "file_size_in_byte": 701, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "asyncio.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "bentoml.marshal.marshal.ParadeDispatcher", "line_number": 13, "usage_type": "call"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "21482725961", "text": "import os\nimport pathlib\nimport tempfile\n\nfrom c3s_eqc_automatic_quality_control import dashboard\n\n\ndef test_get_logger() -> None:\n logger_name = \"test-logger\"\n logger = dashboard.get_logger(logger_name)\n assert logger.name == logger_name\n\n\ndef test_set_log_file() -> None:\n logger_name = \"test-logger\"\n logger_text = \"TEST EQC\"\n logger = dashboard.get_logger(logger_name)\n with tempfile.NamedTemporaryFile(delete=False) as t:\n name = t.name\n logger = dashboard.set_logfile(logger, pathlib.Path(name))\n logger.info(logger_text)\n with open(name) as f:\n assert logger_text in f.readline()\n os.remove(name)\n assert not os.path.exists(name)\n\n\ndef test_ensure_log_dir() -> None:\n with tempfile.TemporaryDirectory() as tempdir:\n original_eqc_etc = os.environ.get(dashboard.EQC_AQC_ENV_VARNAME)\n os.environ[dashboard.EQC_AQC_ENV_VARNAME] = tempdir\n log_dir = dashboard.ensure_log_dir()\n assert log_dir.is_dir()\n if original_eqc_etc is None:\n os.environ.pop(dashboard.EQC_AQC_ENV_VARNAME)\n else:\n os.environ[dashboard.EQC_AQC_ENV_VARNAME] = original_eqc_etc\n\n\ndef test_get_eqc_run_logger() -> None:\n logger_name = \"logger-name\"\n logger = dashboard.get_eqc_run_logger(logger_name)\n assert f\"_{logger_name}.log\" in getattr(logger.handlers[0], \"baseFilename\")\n", "repo_name": "bopen/c3s-eqc-automatic-quality-control", "sub_path": "tests/test_50_dashboard.py", "file_name": "test_50_dashboard.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "c3s_eqc_automatic_quality_control.dashboard.get_logger", "line_number": 10, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 10, "usage_type": "name"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.get_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 17, "usage_type": "name"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 18, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.set_logfile", "line_number": 20, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 20, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 29, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.EQC_AQC_ENV_VARNAME", "line_number": 30, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 30, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.EQC_AQC_ENV_VARNAME", "line_number": 31, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 31, "usage_type": "name"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.ensure_log_dir", "line_number": 32, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 32, "usage_type": "name"}, {"api_name": "os.environ.pop", "line_number": 35, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.EQC_AQC_ENV_VARNAME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 35, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.EQC_AQC_ENV_VARNAME", "line_number": 37, "usage_type": "attribute"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 37, "usage_type": "name"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard.get_eqc_run_logger", "line_number": 42, "usage_type": "call"}, {"api_name": "c3s_eqc_automatic_quality_control.dashboard", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "20884322098", "text": "import typing as tp\n\n\ndef filter_list_by_list(a: tp.Sequence[int], b: tp.Sequence[int]) \\\n -> tp.List[int]:\n \"\"\"\n Filter first sorted lists by other sorted list\n :param a: first sorted list\n :param b: second sorted list\n :return: filtered sorted list\n \"\"\"\n arr = []\n p1 = 0\n p2 = 0\n while p1 < len(a) and p2 < len(b):\n if a[p1] < b[p2]:\n arr.append(a[p1])\n p1 += 1\n elif a[p1] == b[p2]:\n p1 += 1\n else:\n p2 += 1\n else:\n while p1 < len(a):\n arr.append(a[p1])\n p1 += 1\n return arr\n", "repo_name": "PredelinaAsya/Python_course_hw", "sub_path": "filter_list_by_list/filter_list_by_list.py", "file_name": "filter_list_by_list.py", "file_ext": "py", "file_size_in_byte": 616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Sequence", "line_number": 4, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 5, "usage_type": "attribute"}]} +{"seq_id": "26789020045", "text": "import os\nfrom importlib import import_module\nfrom inspect import getmembers, isclass\nfrom yaml import safe_load\nimport warnings\nfrom collections import OrderedDict\nfrom six import string_types\n\n# FIXME: Bad practice\n# Logger settings should not be defined in a module, but once by the\n# application developer. Thus outside of basil. Otherwise multiple calls to\n# the basic config are possible. This is left here at the moment for backward\n# compatibility and since our logging format is the same everywhere (?).\nimport logging\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s\")\n\n\nclass Base(object):\n def __init__(self, conf):\n self.name = None\n self.version = None\n self.conf_path = None\n self.parent = None\n self._init = {}\n self._conf = self._open_conf(conf)\n if 'name' in self._conf:\n self.name = self._conf['name']\n if 'version' in self._conf:\n self.version = self._conf['version']\n if 'conf_path' in self._conf:\n self.conf_path = self._conf['conf_path']\n if 'parent' in self._conf:\n self.parent = self._conf['parent']\n if 'init' in self._conf:\n self._update_init(self._conf['init'])\n\n def _open_conf(self, conf):\n def isFile(f):\n return hasattr(f, 'read')\n\n conf_dict = {}\n if not conf:\n pass\n elif isinstance(conf, string_types): # parse the first YAML document in a stream\n if os.path.isfile(conf):\n with open(conf, 'r') as f:\n conf_dict.update(safe_load(f))\n conf_dict.update(conf_path=f.name)\n else: # YAML string\n try:\n conf_dict.update(safe_load(conf))\n except ValueError: # invalid path/filename\n raise IOError(\"File not found: %s\" % conf)\n elif isFile(conf): # parse the first YAML document in a stream\n conf_dict.update(safe_load(conf))\n conf_dict.update(conf_path=conf.name)\n else: # conf is already a dict\n conf_dict.update(conf)\n return conf_dict\n\n def _update_init(self, init_conf=None, **kwargs):\n init_conf = self._open_conf(init_conf)\n if init_conf:\n self._init.update(init_conf)\n self._init.update(kwargs)\n\n def init(self):\n self._initialized = True\n\n @property\n def is_initialized(self):\n if \"_initialized\" in self.__dict__ and self._initialized:\n return True\n else:\n return False\n\n def close(self):\n self._initialized = False\n\n def set_configuration(self, conf):\n raise NotImplementedError(\"set_configuration() not implemented\")\n\n def get_configuration(self):\n raise NotImplementedError(\"get_configuration() not implemented\")\n\n\nclass Dut(Base):\n '''Device\n '''\n def __init__(self, conf):\n super(Dut, self).__init__(conf)\n self._transfer_layer = None\n self._hardware_layer = None\n self._registers = None\n self.load_hw_configuration(self._conf)\n\n def init(self, init_conf=None, **kwargs):\n super(Dut, self).init()\n init_conf = self._open_conf(init_conf)\n\n def update_init(mod):\n if init_conf:\n if mod.name in init_conf:\n mod._update_init(init_conf[mod.name])\n if mod.name in kwargs:\n mod._update_init(kwargs[mod.name])\n\n def catch_exception_on_init(mod):\n try:\n mod.init()\n except NotImplementedError:\n pass\n\n for item in self._transfer_layer.values():\n update_init(item)\n catch_exception_on_init(item)\n for item in self._hardware_layer.values():\n update_init(item)\n catch_exception_on_init(item)\n for item in self._registers.values():\n update_init(item)\n catch_exception_on_init(item)\n\n def close(self):\n def catch_exception_on_close(mod):\n if mod.is_initialized:\n try:\n mod.close()\n except Exception: # if close() failed\n # restore status after close() failed\n mod._is_initialized = True\n\n for item in self._registers.values():\n catch_exception_on_close(item)\n for item in self._hardware_layer.values():\n catch_exception_on_close(item)\n for item in self._transfer_layer.values():\n catch_exception_on_close(item)\n\n def set_configuration(self, conf):\n conf = self._open_conf(conf)\n if conf:\n for item, item_conf in conf.items():\n if item != 'conf_path':\n try:\n self[item].set_configuration(item_conf)\n except NotImplementedError:\n pass\n\n def get_configuration(self):\n conf = {}\n for key, value in self._registers.items():\n try:\n conf[key] = value.get_configuration()\n except NotImplementedError:\n conf[key] = {}\n for key, value in self._hardware_layer.items():\n try:\n conf[key] = value.get_configuration()\n except NotImplementedError:\n conf[key] = {}\n for key, value in self._transfer_layer.items():\n try:\n conf[key] = value.get_configuration()\n except NotImplementedError:\n conf[key] = {}\n return conf\n\n def load_hw_configuration(self, conf, extend_config=False):\n conf = self._open_conf(conf)\n if extend_config:\n self._conf.update(conf)\n else:\n self._conf = conf\n\n if not extend_config:\n if 'name' in self._conf:\n self.name = self._conf['name']\n else:\n self.name = None\n if 'version' in self._conf:\n self.version = self._conf['version']\n else:\n self.version = None\n self._transfer_layer = OrderedDict()\n self._hardware_layer = OrderedDict()\n self._registers = OrderedDict()\n\n if 'transfer_layer' in conf:\n for intf in conf['transfer_layer']:\n intf['parent'] = self\n kargs = {}\n kargs['conf'] = intf\n self._transfer_layer[intf['name']] = self._factory('basil.TL.' + intf['type'], *(), **kargs)\n\n if 'hw_drivers' in conf:\n if conf['hw_drivers']:\n for hwdrv in conf['hw_drivers']:\n hwdrv['parent'] = self\n kargs = {}\n if 'interface' in hwdrv:\n if hwdrv['interface'].lower() == 'none':\n kargs['intf'] = None\n else:\n kargs['intf'] = self._transfer_layer[hwdrv['interface']]\n elif 'hw_driver' in hwdrv:\n kargs['intf'] = self._hardware_layer[hwdrv['hw_driver']]\n else:\n kargs['intf'] = None\n kargs['conf'] = hwdrv\n self._hardware_layer[hwdrv['name']] = self._factory('basil.HL.' + hwdrv['type'], *(), **kargs)\n\n if 'user_drivers' in conf:\n warnings.warn(\"Deprecated: user_drivers move modules to hw_drivers\", DeprecationWarning)\n if conf['user_drivers']:\n for userdrv in conf['user_drivers']:\n userdrv['parent'] = self\n kargs = {}\n kargs['intf'] = self._hardware_layer[userdrv['hw_driver']]\n kargs['conf'] = userdrv\n self._hardware_layer[userdrv['name']] = self._factory('basil.HL.' + userdrv['type'], *(), **kargs)\n\n if 'registers' in conf:\n if conf['registers']:\n for reg in conf['registers']:\n reg['parent'] = self\n kargs = {}\n if 'driver' in reg:\n if not reg['driver'] or reg['driver'].lower() == 'none':\n kargs['driver'] = None\n else:\n kargs['driver'] = self._hardware_layer[reg['driver']]\n kargs['conf'] = reg\n self._registers[reg['name']] = self._factory('basil.RL.' + reg['type'], *(), **kargs)\n elif 'hw_driver' in reg:\n kargs['driver'] = self._hardware_layer[reg['hw_driver']]\n kargs['conf'] = reg\n self._registers[reg['name']] = self._factory('basil.RL.' + reg['type'], *(), **kargs)\n else:\n raise ValueError('No driver specified for register: %s' % (reg['name'],))\n\n def _factory(self, importname, *args, **kargs):\n splitted_import_name = importname.split('.')\n\n def is_basil_base_class(item):\n return isclass(item) and issubclass(item, Base) and item.__module__ == importname\n\n try:\n mod = import_module(importname)\n except ImportError: # give it another try\n if len(splitted_import_name) > 2 and splitted_import_name[0] == 'basil':\n importname = '.'.join(splitted_import_name[2:]) # remove \"basil.RL.\" etc.\n mod = import_module(importname)\n else: # raise initial exception\n raise\n basil_base_classes = getmembers(mod, is_basil_base_class)\n cls = None\n if not basil_base_classes: # found no base class\n raise ValueError('Found no matching class in %s.' % importname)\n elif len(basil_base_classes) > 1: # found more than 1 base class\n mod_name = splitted_import_name[-1]\n for basil_base_class in basil_base_classes:\n if mod_name == basil_base_class[0]: # check for base class name\n cls = basil_base_class[1]\n break\n if cls is None:\n raise ValueError('Found more than one matching class in %s.' % importname)\n else: # found single class\n cls = basil_base_classes[0][1]\n return cls(*args, **kargs)\n\n def __getitem__(self, item):\n if item in self._registers:\n return self._registers[item]\n elif item in self._hardware_layer:\n return self._hardware_layer[item]\n elif item in self._transfer_layer:\n return self._transfer_layer[item]\n raise KeyError('Item not existing: %s' % (item,))\n\n def get_modules(self, type_name):\n '''Getting modules by type name.\n\n Parameters\n ----------\n type_name : string\n Type name of the modules to be returned.\n\n Returns\n -------\n List of modules of given type name else empty list.\n '''\n modules = []\n for module in self:\n if module.__class__.__name__ == type_name:\n modules.append(module)\n return modules\n\n def __iter__(self):\n for item in self._registers.values():\n yield item\n for item in self._hardware_layer.values():\n yield item\n for item in self._transfer_layer.values():\n yield item\n\n # TODO:\n def __setitem__(self, key, value):\n self._registers[key].set(value)\n\n def __repr__(self):\n return str(self.get_configuration())\n", "repo_name": "SiLab-Bonn/basil", "sub_path": "basil/dut.py", "file_name": "dut.py", "file_ext": "py", "file_size_in_byte": 11675, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 15, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 44, "usage_type": "argument"}, {"api_name": "os.path.isfile", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 47, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 51, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 55, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 185, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 186, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 187, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 214, "usage_type": "call"}, {"api_name": "inspect.isclass", "line_number": 246, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 249, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 253, "usage_type": "call"}, {"api_name": "inspect.getmembers", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "22680521003", "text": "from qdrant_client import QdrantClient\nfrom qdrant_client.http.models import SearchRequest\nfrom sentence_transformers import SentenceTransformer\nimport streamlit as st\nimport os\nimport logging\nfrom itertools import chain\n\n\nlogging.basicConfig(level=logging.WARNING)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.WARNING)\n\nCOLLECTION_NAME = \"restaurant_review_answers\"\n\n\n@st.cache_resource\ndef get_qdrant_connection():\n host = os.getenv(\"QDRANT_HOST\")\n port = os.getenv(\"QDRANT_PORT\")\n return QdrantClient(host, port=port)\n\n\n@st.cache_resource\ndef get_embedding_model():\n return SentenceTransformer(\"all-MiniLM-L6-v2\")\n\n\ndef search(selected_list, df):\n if not selected_list:\n return\n\n qdrant = get_qdrant_connection()\n model = get_embedding_model()\n\n # Take text\n selected_rows = df.iloc[selected_list, :]\n selected_rows_list = selected_rows[\"body\"].to_list()\n\n # Transform text\n embeddings = model.encode(selected_rows_list)\n\n logger.debug(f\"embeddings length: {len(embeddings)}\")\n\n # Prepare and send requests\n requests = [\n SearchRequest(vector=vector.tolist(), limit=1, with_payload=True)\n for vector in embeddings\n ]\n\n hits_not_flattened = qdrant.search_batch(\n collection_name=COLLECTION_NAME,\n requests=requests,\n )\n\n # Hits flatten\n hits = list(chain.from_iterable(hits_not_flattened))\n\n if logger.level == logging.DEBUG:\n logger.debug(\"PRINTING HITS\")\n for i, hit in enumerate(hits, 1):\n logger.debug(f\"hit {i}: {hit}\")\n\n result = [\n {\"Message\": text, \"Score\": hit.score, \"Answer\": hit.payload[\"answer\"]}\n for text, hit in zip(selected_rows_list, hits)\n ]\n\n if logger.level == logging.DEBUG:\n logger.debug(\"PRINTING Result\")\n for i, item in enumerate(result, 1):\n logger.debug(f\"result {i}: {item}\")\n\n return result\n", "repo_name": "lince098/MBD_NLP_Challenge_Restaurant_Reviews", "sub_path": "challenge_functions/answer_reviews.py", "file_name": "answer_reviews.py", "file_ext": "py", "file_size_in_byte": 1913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 20, "usage_type": "call"}, {"api_name": "qdrant_client.QdrantClient", "line_number": 21, "usage_type": "call"}, {"api_name": "streamlit.cache_resource", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sentence_transformers.SentenceTransformer", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlit.cache_resource", "line_number": 24, "usage_type": "attribute"}, {"api_name": "qdrant_client.http.models.SearchRequest", "line_number": 47, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 57, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 57, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 59, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 69, "usage_type": "attribute"}]} +{"seq_id": "6204096150", "text": "from torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\n\n# Convolution Auto Encoder\nclass LeNetAE28(nn.Module):\n def __init__(self):\n super(LeNetAE28, self).__init__()\n\n # Feature Extractor\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(1, 20, 5, stride=1), # (b, 20, 24, 24)\n nn.ReLU(),\n nn.MaxPool2d(2, stride=2), # (b, 20, 12, 12)\n nn.Conv2d(20, 50, 5, stride=1), # (b, 50, 8, 8)\n nn.ReLU(),\n nn.MaxPool2d(2, stride=2) # (b, 50, 4, 4)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(800, 500),\n nn.ReLU(),\n nn.Linear(500, 500),\n nn.ReLU(),\n nn.Linear(500, 10)\n )\n\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n init.xavier_uniform_(module.weight)\n\n def forward(self, x):\n feature = self.feature_extractor(x)\n feature = feature.flatten(start_dim=1)\n res = self.fc(feature)\n return res\n\nclass CUDNN(nn.Module):\n def __init__(self):\n super(CUDNN, self).__init__()\n\n # Feature Extractor\n self.feature_extractor = nn.Sequential(\n nn.Conv2d(3, 64, 5),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # (b, 64, 14, 14)\n nn.Dropout2d(.25),\n nn.Conv2d(64, 128, 3),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # (b, 128, 6, 6)\n nn.Dropout2d(.25),\n nn.Conv2d(128, 256, 3),\n nn.ReLU(),\n nn.MaxPool2d(2),\n # (b, 256, 2, 2)\n nn.Dropout2d(.25),\n nn.Conv2d(256, 128, 2),\n nn.ReLU()\n # (b, 128, 1, 1)\n )\n\n self.fc = nn.Sequential(\n nn.Linear(128, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 10)\n )\n\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n init.xavier_uniform_(module.weight)\n\n def forward(self, x):\n feature = self.feature_extractor(x)\n feature = feature.flatten(start_dim=1)\n res = self.fc(feature)\n return res\n\n\nclass IRNet(nn.Module):\n def __init__(self, fe=None, num_mac=4, inter_dim=50):\n super(IRNet, self).__init__()\n\n # Feature Extractor\n self.feature_extractor = fe\n self.mac_layer = nn.MaxPool2d(num_mac, stride=1)\n self.dim = inter_dim\n\n for module in self.modules():\n if isinstance(module, nn.Conv2d):\n init.xavier_uniform_(module.weight)\n\n def forward(self, x):\n if isinstance(x, tuple):\n x1, x2 = x\n feature_x = self.feature_extractor(x1)\n feature_y = self.feature_extractor(x2)\n mvx = self.mac_layer(feature_x).view(-1, self.dim)\n mvy = self.mac_layer(feature_y).view(-1, self.dim)\n mvx = F.normalize(mvx, p=2, dim=1)\n mvy = F.normalize(mvy, p=2, dim=1)\n\n return mvx, mvy\n else:\n feature = self.feature_extractor(x)\n mac_vector = self.mac_layer(feature)\n mac_vector = mac_vector.view(-1, self.dim)\n mac_vector = F.normalize(mac_vector, p=2, dim=1)\n\n return mac_vector\n\n\n", "repo_name": "voidstrike/AdversarialAttack", "sub_path": "src/Model.py", "file_name": "Model.py", "file_ext": "py", "file_size_in_byte": 3361, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 74, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 94, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 95, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "33643551442", "text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport time\nimport urllib\nfrom urllib.request import urlretrieve\n\nimport arxiv\nimport click\nfrom dateutil.parser import parse\nfrom logzero import logger\nfrom pybtex import database\nfrom lxml import etree\n\n\ndef reporthook(count, block_size, total_size):\n global start_time\n if count == 0:\n start_time = time.time()\n return\n duration = time.time() - start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / (1024 * duration))\n percent = int(count * block_size * 100 / total_size)\n sys.stdout.write(\n \"\\r{}%, {} KB, {} KB/s, {:.1f} seconds passed\".format(min(percent, 100), progress_size / 1024, speed, duration))\n sys.stdout.flush()\n\n\ndef download_from_arxiv(url, dirpath='.'):\n if url.endswith('.pdf'):\n paper_id = os.path.splitext(os.path.basename(url))[0]\n else:\n paper_id = os.path.basename(url)\n paper = arxiv.query(id_list=[paper_id])[0]\n\n def custom_slugify(obj):\n author_last_name = obj['authors'][0].strip().split(' ')[-1]\n year = parse(obj['published']).year\n title = obj['title'].strip().replace('\\n', '')\n logger.info('Download \"{}\" from \"{}\"'.format(title, obj['pdf_url']))\n return '[{}+{}] {}'.format(author_last_name, year, title)\n\n if not paper.get('pdf_url', ''):\n print(\"Object has no PDF URL.\")\n return\n\n path = os.path.join(dirpath, custom_slugify(paper) + '.pdf')\n urlretrieve(paper['pdf_url'], path, reporthook=reporthook)\n return path\n\n\ndef download_from_acl(url, dirpath='.'):\n if url.endswith('.pdf'):\n url = url[:-4] # strip '.pdf'\n\n # get filename\n bib_url = url.strip('\\n').rstrip('/') + '.bib'\n bib = urllib.request.urlopen(bib_url).read().decode('utf-8')\n bib_database = database.parse_string(bib, bib_format='bibtex')\n author_lastname = bib_database.entries.values()[0].persons['author'][0].last()[0]\n year = bib_database.entries.values()[0].fields['year'].strip()\n title = bib_database.entries.values()[0].fields['title'].strip()\n out_name = '[{}+{}] {}.pdf'.format(author_lastname, year, title).replace('{', '').replace('}', '')\n\n # get authorname\n path = os.path.join(dirpath, out_name)\n pdf_url = url.strip('\\n').rstrip('/') + '.pdf'\n logger.info('Download \"{}\" from \"{}\"'.format(title, pdf_url))\n urlretrieve(pdf_url, path, reporthook=reporthook)\n return path\n\n\ndef download_from_openreview(url, dirpath='.'):\n url = url.rstrip('\\n')\n if '/pdf?' in url:\n url = url.replace('/pdf?', '/forum?')\n page_source = urllib.request.urlopen(url).read().decode('utf-8')\n xml = etree.fromstring(page_source, parser=etree.HTMLParser())\n bib = xml.xpath('//a[@class=\"action-bibtex-modal\"]/@data-bibtex')[0]\n bib_database = database.parse_string(bib, bib_format='bibtex')\n author_lastname = bib_database.entries.values()[0].persons['author'][0].last()[0]\n year = bib_database.entries.values()[0].fields['year'].strip()\n title = bib_database.entries.values()[0].fields['title'].strip()\n out_name = '[{}+{}] {}.pdf'.format(author_lastname, year, title).replace('{', '').replace('}', '')\n\n path = os.path.join(dirpath, out_name)\n pdf_url = url.replace('/forum?', '/pdf?')\n logger.info('Download \"{}\" from \"{}\"'.format(title, pdf_url))\n urlretrieve(pdf_url, path, reporthook=reporthook)\n return path\n\n\n@click.command()\n@click.argument('urls', type=str, nargs=-1)\n@click.option(\n '-o',\n '--out',\n default=None,\n type=click.Path(),\n help='path to save pdf'\n)\ndef main(urls, out):\n if out is None:\n if 'ARXIV_OUT' in os.environ:\n out = os.environ['ARXIV_OUT']\n else:\n out = '.'\n logger.info('Save PDF(s) to {}'.format(out))\n\n for url in urls:\n if 'arxiv' in url:\n download_from_arxiv(url, dirpath=out)\n elif 'aclweb' in url:\n download_from_acl(url, dirpath=out)\n elif 'openreview' in url:\n download_from_openreview(url, dirpath=out)\n else:\n raise NotImplementedError\n", "repo_name": "butsugiri/download_arxiv", "sub_path": "download_arxiv.py", "file_name": "download_arxiv.py", "file_ext": "py", "file_size_in_byte": 4117, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "arxiv.query", "line_number": 35, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 39, "usage_type": "call"}, {"api_name": "logzero.logger.info", "line_number": 41, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 41, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "urllib.request.urlretrieve", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 59, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pybtex.database.parse_string", "line_number": 60, "usage_type": "call"}, {"api_name": "pybtex.database", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "logzero.logger.info", "line_number": 69, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 69, "usage_type": "name"}, {"api_name": "urllib.request.urlretrieve", "line_number": 70, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 78, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 78, "usage_type": "attribute"}, {"api_name": "lxml.etree.fromstring", "line_number": 79, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 79, "usage_type": "name"}, {"api_name": "lxml.etree.HTMLParser", "line_number": 79, "usage_type": "call"}, {"api_name": "pybtex.database.parse_string", "line_number": 81, "usage_type": "call"}, {"api_name": "pybtex.database", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "logzero.logger.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 89, "usage_type": "name"}, {"api_name": "urllib.request.urlretrieve", "line_number": 90, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 106, "usage_type": "attribute"}, {"api_name": "logzero.logger.info", "line_number": 109, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 109, "usage_type": "name"}, {"api_name": "click.command", "line_number": 94, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 95, "usage_type": "call"}, {"api_name": "click.option", "line_number": 96, "usage_type": "call"}, {"api_name": "click.Path", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "73304890405", "text": "from django.conf import settings\nfrom django.core.mail import EmailMessage\n\n\n\n# EMAIL OTP TEMPLATE - FINFLO TESTING ENV\n\n\ndef email_to(to_email,key):\n \n subject = \"OTP for Finflo Login \"\n\n message = \"\"\"\n Dear {0},\n your otp to login in finflo is {1} .\n if any support needed contact support@finflo.com\n \"\"\".format(str(to_email),str(key))\n\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [to_email]\n cc_list = [\"anand98.ar@gmail.com\"]\n\n email = EmailMessage(\n subject,\n message,\n email_from,\n recipient_list,\n cc_list,\n )\n email.send(fail_silently=False)", "repo_name": "anandrajB/procure_to_pay", "sub_path": "accounts/email.py", "file_name": "email.py", "file_ext": "py", "file_size_in_byte": 650, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.conf.settings.EMAIL_HOST_USER", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "django.core.mail.EmailMessage", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "36358775658", "text": "import logging\n\nfrom fastapi import FastAPI, Depends, HTTPException\nfrom sqlalchemy.orm import Session\nfrom starlette import status\nfrom starlette.status import HTTP_200_OK, HTTP_204_NO_CONTENT\n\nfrom config import WebConfig\nfrom database import get_db\nfrom errors import NotFoundException, WrongDifficultyException\nfrom services.category_service import CategoryService\nfrom services.question_service.question_service import QuestionService\nfrom web.schemas import CategoryListResponse, CategoryResponse, QuestionResponse, AnswerRequest, TermResponse\n\napp = FastAPI()\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\n@app.get(f\"{WebConfig.ROUTE_PREFIX}/categories\", status_code=HTTP_200_OK, response_model=CategoryListResponse)\nasync def get_categories(db: Session = Depends(get_db)):\n service = CategoryService.build(db)\n\n categories = service.get_all()\n res = [CategoryResponse(id=category.id, name=category.name) for category in categories]\n\n return CategoryListResponse(categories=res)\n\n\n@app.get(WebConfig.ROUTE_PREFIX + \"/questions/{category_id}\", status_code=HTTP_200_OK, response_model=QuestionResponse)\nasync def get_question(category_id: int, db: Session = Depends(get_db)):\n service = QuestionService.build(db)\n try:\n question = service.create_question(category_id)\n except NotFoundException:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"wrong category id\")\n\n return QuestionResponse(\n question=question.question,\n correct=TermResponse(id=question.correct.id, name=question.correct.name),\n answers=question.answers\n )\n\n\n@app.post(f\"{WebConfig.ROUTE_PREFIX}/answers\", status_code=HTTP_204_NO_CONTENT)\nasync def get_question(data: AnswerRequest, db: Session = Depends(get_db)):\n service = QuestionService.build(db)\n try:\n service.answer_question(data)\n db.commit()\n except NotFoundException:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"wrong term id\")\n finally:\n db.rollback()\n\n\n@app.get(WebConfig.ROUTE_PREFIX + \"/questions/\", status_code=HTTP_200_OK, response_model=QuestionResponse)\nasync def get_question_with_difficulty(category_id: int, difficulty: float, db: Session = Depends(get_db)):\n service = QuestionService.build(db)\n try:\n question = service.create_question_with_given_difficulty(category_id, difficulty)\n except NotFoundException:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"wrong category id\")\n except WrongDifficultyException:\n raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=\"difficulty should be between 0 and 1\")\n return QuestionResponse(\n question=question.question,\n correct=TermResponse(id=question.correct.id, name=question.correct.name),\n answers=question.answers\n )\n", "repo_name": "elacymerys/thesis", "sub_path": "thesis-api/main/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 2888, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.FastAPI", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 21, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 21, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 21, "usage_type": "argument"}, {"api_name": "services.category_service.CategoryService.build", "line_number": 22, "usage_type": "call"}, {"api_name": "services.category_service.CategoryService", "line_number": 22, "usage_type": "name"}, {"api_name": "web.schemas.CategoryResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "web.schemas.CategoryListResponse", "line_number": 27, "usage_type": "call"}, {"api_name": "config.WebConfig.ROUTE_PREFIX", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.WebConfig", "line_number": 20, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 20, "usage_type": "name"}, {"api_name": "web.schemas.CategoryListResponse", "line_number": 20, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 31, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 31, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 31, "usage_type": "argument"}, {"api_name": "services.question_service.question_service.QuestionService.build", "line_number": 32, "usage_type": "call"}, {"api_name": "services.question_service.question_service.QuestionService", "line_number": 32, "usage_type": "name"}, {"api_name": "errors.NotFoundException", "line_number": 35, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 36, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 36, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 36, "usage_type": "name"}, {"api_name": "web.schemas.QuestionResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "web.schemas.TermResponse", "line_number": 40, "usage_type": "call"}, {"api_name": "config.WebConfig.ROUTE_PREFIX", "line_number": 30, "usage_type": "attribute"}, {"api_name": "config.WebConfig", "line_number": 30, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 30, "usage_type": "name"}, {"api_name": "web.schemas.QuestionResponse", "line_number": 30, "usage_type": "name"}, {"api_name": "web.schemas.AnswerRequest", "line_number": 46, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 46, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 46, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 46, "usage_type": "argument"}, {"api_name": "services.question_service.question_service.QuestionService.build", "line_number": 47, "usage_type": "call"}, {"api_name": "services.question_service.question_service.QuestionService", "line_number": 47, "usage_type": "name"}, {"api_name": "errors.NotFoundException", "line_number": 51, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 52, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 52, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 52, "usage_type": "name"}, {"api_name": "config.WebConfig.ROUTE_PREFIX", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config.WebConfig", "line_number": 45, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_204_NO_CONTENT", "line_number": 45, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 58, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 58, "usage_type": "call"}, {"api_name": "database.get_db", "line_number": 58, "usage_type": "argument"}, {"api_name": "services.question_service.question_service.QuestionService.build", "line_number": 59, "usage_type": "call"}, {"api_name": "services.question_service.question_service.QuestionService", "line_number": 59, "usage_type": "name"}, {"api_name": "errors.NotFoundException", "line_number": 62, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 63, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_404_NOT_FOUND", "line_number": 63, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 63, "usage_type": "name"}, {"api_name": "errors.WrongDifficultyException", "line_number": 64, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 65, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_422_UNPROCESSABLE_ENTITY", "line_number": 65, "usage_type": "attribute"}, {"api_name": "starlette.status", "line_number": 65, "usage_type": "name"}, {"api_name": "web.schemas.QuestionResponse", "line_number": 67, "usage_type": "call"}, {"api_name": "web.schemas.TermResponse", "line_number": 69, "usage_type": "call"}, {"api_name": "config.WebConfig.ROUTE_PREFIX", "line_number": 57, "usage_type": "attribute"}, {"api_name": "config.WebConfig", "line_number": 57, "usage_type": "name"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 57, "usage_type": "name"}, {"api_name": "web.schemas.QuestionResponse", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "44820377788", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n视频信息爬取脚本\r\n参数为某视频的BV号\r\n因标签无法与视频信息同时获得,故需另外爬取。\r\n且MySQL数据库无法存储列表,故将标签列表放置与video_tag表中。\r\nvideo_info表与video_tag通过BV_bid进行连接\r\nget_tag_video_info为通过api获得视频标签\r\nget_tag_video_info_1_list、get_tag_video_info_1_data为通过网页原码获得视频标签\r\n\"\"\"\r\n__author__ = 'OtakuNio'\r\n\r\nimport time\r\nimport pandas as pd\r\nfrom lxml import etree\r\nfrom sqlalchemy import create_engine\r\nfrom get_response_with_proxy_ip import get_response\r\n\r\n\r\ndef video_info_main(bid):\r\n print(\"video_info\")\r\n my_url1 = 'https://api.bilibili.com/x/web-interface/view?bvid={}'.format(bid)\r\n # my_url2 = 'https://api.bilibili.com/x/tag/archive/tags?bvid={}'.format(bid)\r\n my_url3 = 'https://www.bilibili.com/video/{}'.format(bid)\r\n save_to_database_video_info(pd.DataFrame(data=get_data_video_info(get_response(my_url1).json()['data']), index=[1]),\r\n 'video_info')\r\n save_to_database_video_info(\r\n pd.DataFrame(data=get_tag_data_video_info(get_tag_list_video_info(get_response(my_url3)), bid), index=[1]),\r\n 'video_tag')\r\n print(\"video_info-finish\")\r\n\r\n\r\ndef get_data_video_info(response_json):\r\n video_info_data = {\r\n 'BV_bid': response_json['bvid'],\r\n 'aid': response_json['aid'],\r\n 'title': response_json['title'],\r\n 'uploader': response_json['owner']['name'],\r\n 'UP_mid': response_json['owner']['mid'],\r\n 'description': response_json['desc'],\r\n 'pubdate': time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(response_json['pubdate'])),\r\n 'duration_time': response_json['duration'],\r\n 'view': response_json['stat']['view'],\r\n 'danmu': response_json['stat']['danmaku'],\r\n 'reply': response_json['stat']['reply'],\r\n 'favorite': response_json['stat']['favorite'],\r\n 'coin': response_json['stat']['coin'],\r\n 'share': response_json['stat']['share'],\r\n 'like': response_json['stat']['like'],\r\n 'now_rank': response_json['stat']['now_rank'],\r\n 'history_rank': response_json['stat']['his_rank']\r\n }\r\n return video_info_data\r\n\r\n\r\ndef get_tag_video_info(response_json, BV_bid):\r\n tag_list = []\r\n for tag in response_json:\r\n tag_list.append(tag['tag_name'])\r\n while len(tag_list) < 10:\r\n tag_list.append('')\r\n tag_data = {\r\n 'BV_bid': BV_bid,\r\n 'tag0': tag_list[0],\r\n 'tag1': tag_list[1],\r\n 'tag2': tag_list[2],\r\n 'tag3': tag_list[3],\r\n 'tag4': tag_list[4],\r\n 'tag5': tag_list[5],\r\n 'tag6': tag_list[6],\r\n 'tag7': tag_list[7],\r\n 'tag8': tag_list[8],\r\n 'tag9': tag_list[9],\r\n }\r\n return tag_data\r\n\r\n\r\ndef get_tag_list_video_info(response):\r\n tag_list = []\r\n tree = etree.HTML(response.text)\r\n raw_tag_list = tree.xpath(\"//ul[@class='tag-area clearfix']/li\")\r\n count = 0\r\n for li in raw_tag_list:\r\n if len(li.xpath(\"./div/a/span/text()\")) != 0:\r\n tag_list.append((li.xpath(\"./div/a/span/text()\")[0].strip().replace(' ', '')))\r\n elif len(li.xpath(\"./a/span/text()\")) != 0:\r\n tag_list.append((li.xpath(\"./a/span/text()\")[0].strip().replace(' ', '')))\r\n elif len(li.xpath(\"./div/a/text()\")) != 0:\r\n tag_list.append((li.xpath(\"./div/a/text()\")[0].strip().replace(' ', '')))\r\n count = count + 1\r\n if count == 10:\r\n break\r\n while count < 10:\r\n tag_list.append('')\r\n count += 1\r\n return tag_list\r\n\r\n\r\ndef get_tag_data_video_info(tag_list, BV_bid):\r\n tag_data = {\r\n 'BV_bid': BV_bid,\r\n 'tag0': tag_list[0],\r\n 'tag1': tag_list[1],\r\n 'tag2': tag_list[2],\r\n 'tag3': tag_list[3],\r\n 'tag4': tag_list[4],\r\n 'tag5': tag_list[5],\r\n 'tag6': tag_list[6],\r\n 'tag7': tag_list[7],\r\n 'tag8': tag_list[8],\r\n 'tag9': tag_list[9],\r\n }\r\n return tag_data\r\n\r\n\r\ndef save_to_database_video_info(df, table_name):\r\n # conn = create_engine(\"mysql+pymysql://{}:{}@{}:{}/{}?charset={}\".format('用户名', '密码', '数据库IP地址', '端口号', '数据库名',\r\n # '字符编码'))\r\n is_table_exist = True\r\n is_data_exist = True\r\n conn = create_engine(\r\n \"mysql+pymysql://{}:{}@{}:{}/{}?charset={}\".format('root', '123456', 'localhost', '3306', 'test', 'utf8mb4'))\r\n try:\r\n sql = \"select * from {} where Bv_bid='{}';\".format(table_name, df.loc[1, 'BV_bid'])\r\n if pd.read_sql_query(sql, conn).empty:\r\n is_data_exist = False\r\n except Exception:\r\n is_table_exist = False\r\n if (is_table_exist == False) or (is_data_exist == False):\r\n df.to_sql(table_name, conn, if_exists='append', index=False)\r\n else:\r\n try:\r\n sql = \"delete from {} where Bv_bid='{}'\".format(table_name, df.loc[1, 'BV_bid'])\r\n pd.read_sql_query(sql, conn)\r\n except Exception:\r\n df.to_sql(table_name, conn, if_exists='append', index=False)\r\n", "repo_name": "OtakuNio/bilibili-crawler", "sub_path": "video_part/video_info.py", "file_name": "video_info.py", "file_ext": "py", "file_size_in_byte": 5140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 25, "usage_type": "call"}, {"api_name": "get_response_with_proxy_ip.get_response", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "get_response_with_proxy_ip.get_response", "line_number": 28, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 41, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 41, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 80, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 80, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 121, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 125, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "38428362038", "text": "from keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nimport tensorflow\nimport numpy as np\nfrom matplotlib import pyplot\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom sklearn.datasets import make_regression\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom keras.applications.vgg19 import VGG19\n# from keras.applications.densenet.DenseNet169 import DenseNet169\nfrom keras_applications import densenet\n\nfrom sklearn.decomposition import PCA\n\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n\n\n\nfrom keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.applications.vgg16 import preprocess_input\nimport numpy as np\n\n\n# def CALC_SHOW_PCA_3D(Data, STRING_MODEL):\n# pca = PCA(n_components=3)\n# DataPCA = pca.fit(Data).transform(Data)\n# target_names = ['MRI', 'CT']\n# group1 = np.full((len(ImagesOfMri)), 0)#Mri Group=0\n# group2 = np.full((len(ImagesOfCt)), 1)#ct Group=0\n# Group = np.concatenate((group1, group2), axis=0, out=None)#all Groups\n# x = [x for (x ,y , z) in DataPCA]\n# y = [y for (x ,y , z) in DataPCA]\n# z=[z for (x ,y , z) in DataPCA]\n#\n# fig = pyplot.figure()\n# ax = Axes3D(fig)\n# colors = ['red', 'blue']\n# lw = 2\n#\n# for color, i, target_name in zip(colors, [0, 1], target_names):#Draw a 3D graph by the colored groups\n# ax.scatter(DataPCA[Group == i, 0], DataPCA[Group == i, 1], DataPCA[Group == i, 2], color=color, alpha=.8, lw=lw, label=target_name)\n# plt.legend(loc='best', shadow=False, scatterpoints=1)\n# plt.title('PCA of mri/ct '+STRING_MODEL)\n#\n# pyplot.show()\n\n# model = VGG16(weights='imagenet', include_top=False)\n# model = ResNet50(weights='imagenet')\n# model = VGG19(weights='imagenet')\nmodel = densenet.DenseNet169(weights='imagenet')\n\n\ndef get_features_images(images):\n features=[]\n for img in images:\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n features.append(model.predict(x)[0])\n return features\n\n\n\n\ndf = pd.read_csv('InputFiles/dataset.csv', names=['Images', 'Questions', 'Answers']) # open csv file and rename columns\n# predictions = pd.read_csv('InputFiles/VQAM.csv', names=['Images', 'Questions', 'Answers']) # open csv file and rename columns\n\n\n# # dictionary of replaceable words\nreplace_dict = {\"magnetic resonance imaging\": \"mri\",\n \"mri scan\": 'mri',\n \"MRI\": \"mri\",\n \"shows\": \"show\",\n \"reveal\": \"show\",\n \"demonstrate\": \"show\",\n \"CT\": \"ct\",\n \"ct scan\": \"ct\",\n \"does\": \"\", \"do \": \"\", \"the\": \"\",\n # \" a \":' ',' is ':' ',\n }\ndf.replace(to_replace=replace_dict, inplace=True, regex=True) # replace word\n\n\n\nImagesOfMri = df[(~df['Questions'].str.contains('mri|ct') & df['Questions'].str.contains('what') & df[\n 'Answers'].str.contains('mri')) == True]['Images']\nImagesOfCt = df[(~df['Questions'].str.contains('mri|ct') & df['Questions'].str.contains('what') & df[\n 'Answers'].str.contains('ct')) == True]['Images']\n\nImagesMri = [image.load_img(\"images\\Train-images\\\\\" + img + \".jpg\", target_size=(224, 224)) for img in ImagesOfMri]\nImagesCt = [image.load_img(\"images\\Train-images\\\\\" + img + \".jpg\", target_size=(224, 224)) for img in ImagesOfCt]\n\n\n\n\nfeaturesMri=get_features_images(ImagesMri)\nfeaturesCt=get_features_images(ImagesCt)\n\nsizeMri=len(featuresMri)\nsizeCt=len(featuresCt)\n\n\nTrainingData = np.concatenate((featuresMri[:int(sizeMri*0.8)], featuresCt[:int(sizeCt*0.8)]), axis=0, out=None)\ny = np.full((len(featuresMri[:int(sizeMri*0.8)])), 0)\ny1 = np.full((len(featuresCt[:int(sizeCt*0.8)])), 1)\nY = np.concatenate((y, y1), axis=0, out=None)\n# print(len(featuresCt))\n\n\n\n\n\n\nsizefeatur=len(featuresCt[0])\n\n# create model\nmodel = Sequential()\nmodel.add(Dense(12, input_dim=sizefeatur, init='uniform', activation='relu'))\nmodel.add(Dense(sizefeatur, init='uniform', activation='relu'))\nmodel.add(Dense(1, init='uniform', activation='sigmoid'))\n# Compile model\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n# Fit the model-The training proces\nmodel.fit(TrainingData, Y, epochs=150, batch_size=10, verbose=2)\n# calculate predictions\npredictions = model.predict(TrainingData)\n# round predictions\nrounded = [round(x[0]) for x in predictions]\nprint(rounded)\n\n\ny = np.full((len(featuresMri[int(sizeMri*0.8):])), 0)\ny1 = np.full((len(featuresCt[int(sizeCt*0.8):])), 1)\nYreal= np.concatenate((y, y1), axis=0, out=None)\n\n# new instances where we do not know the answer\nnewData= np.concatenate((featuresMri[int(sizeMri*0.8):], featuresCt[int(sizeCt*0.8):]), axis=0, out=None)\n\n# make a prediction-predict the class\nynew = model.predict_classes(newData)\n# show the inputs and predicted outputs\n#\nfor i in range(len(newData)):\n\t# print(\"X=%s, Predicted=%s\" % (newData[i], ynew[i]))\n\tprint(\"Real=%s,Predicted=%s\" % (Yreal[i], ynew[i]))\n # if Yreal[i]==ynew[i]:\n # n+=1\nn=[1 for i in range(len(newData)) if Yreal[i]==ynew[i]]\ncorrect=sum(n)\nprint(\"%s /%s Are correct \"%(correct,len(newData)))\n\n\n\n\n\n\n\n\n# CALC_SHOW_PCA_3D(dataFeatures,\"DL-VGG16\")\n\n\n", "repo_name": "shoamco/VQA-of-CT-and-MRI-in-deep-learning", "sub_path": "Research Process/DeepLearning.py", "file_name": "DeepLearning.py", "file_ext": "py", "file_size_in_byte": 5377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras_applications.densenet.DenseNet169", "line_number": 58, "usage_type": "call"}, {"api_name": "keras_applications.densenet", "line_number": 58, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.applications.vgg16.preprocess_input", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 98, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 98, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 99, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 125, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "29262916116", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport random\n\nfrom django.conf import settings\n\nfrom libs.utils import get_hmac\nfrom projects.paths import get_project_repos_path\nfrom spawners.base import get_pod_volumes\nfrom spawners.project_spawner import ProjectSpawner\nfrom spawners.templates import constants\nfrom spawners.templates import deployments\nfrom spawners.templates import pods\nfrom spawners.templates import services\nfrom spawners.templates import ingresses\n\nlogger = logging.getLogger('polyaxon.spawners.notebook')\n\n\nclass NotebookSpawner(ProjectSpawner):\n NOTEBOOK_JOB_NAME = 'notebook'\n PORT = 8888\n\n def get_notebook_url(self):\n return self._get_service_url(self.NOTEBOOK_JOB_NAME)\n\n def get_notebook_token(self):\n return get_hmac(settings.APP_LABELS_NOTEBOOK, self.project_uuid)\n\n @staticmethod\n def get_notebook_code_volume():\n volume = pods.get_volume(volume=constants.REPOS_VOLUME,\n claim_name=settings.REPOS_CLAIM_NAME,\n volume_mount=settings.REPOS_ROOT)\n\n volume_mount = pods.get_volume_mount(volume=constants.REPOS_VOLUME,\n volume_mount=settings.REPOS_ROOT)\n return volume, volume_mount\n\n def request_notebook_port(self):\n if not self._use_ingress():\n return self.PORT\n\n labels = 'app={},role={}'.format(settings.APP_LABELS_NOTEBOOK,\n settings.ROLE_LABELS_DASHBOARD)\n ports = [service.spec.ports[0].port for service in self.list_services(labels)]\n port = random.randint(*settings.NOTEBOOK_PORT_RANGE)\n while port in ports:\n port = random.randint(*settings.NOTEBOOK_PORT_RANGE)\n return port\n\n def start_notebook(self, image, resources=None):\n ports = [self.request_notebook_port()]\n target_ports = [self.PORT]\n volumes, volume_mounts = get_pod_volumes()\n code_volume, code_volume_mount = self.get_notebook_code_volume()\n volumes.append(code_volume)\n volume_mounts.append(code_volume_mount)\n deployment_name = constants.DEPLOYMENT_NAME.format(\n project_uuid=self.project_uuid, name=self.NOTEBOOK_JOB_NAME)\n notebook_token = self.get_notebook_token()\n notebook_url = self._get_proxy_url(\n namespace=self.namespace,\n job_name=self.NOTEBOOK_JOB_NAME,\n deployment_name=deployment_name,\n port=ports[0])\n notebook_dir = get_project_repos_path(self.project_name)\n notebook_dir = '{}/{}'.format(notebook_dir, notebook_dir.split('/')[-1])\n deployment = deployments.get_deployment(\n namespace=self.namespace,\n app=settings.APP_LABELS_NOTEBOOK,\n name=self.NOTEBOOK_JOB_NAME,\n project_name=self.project_name,\n project_uuid=self.project_uuid,\n volume_mounts=volume_mounts,\n volumes=volumes,\n image=image,\n command=[\"/bin/sh\", \"-c\"],\n args=[\n \"jupyter notebook \"\n \"--no-browser \"\n \"--port={port} \"\n \"--ip=0.0.0.0 \"\n \"--allow-root \"\n \"--NotebookApp.token={token} \"\n \"--NotebookApp.trust_xheaders=True \"\n \"--NotebookApp.base_url={base_url} \"\n \"--NotebookApp.notebook_dir={notebook_dir} \".format(\n port=self.PORT,\n token=notebook_token,\n base_url=notebook_url,\n notebook_dir=notebook_dir)],\n ports=target_ports,\n container_name=settings.CONTAINER_NAME_PLUGIN_JOB,\n resources=resources,\n role=settings.ROLE_LABELS_DASHBOARD,\n type=settings.TYPE_LABELS_EXPERIMENT)\n deployment_labels = deployments.get_labels(app=settings.APP_LABELS_NOTEBOOK,\n project_name=self.project_name,\n project_uuid=self.project_uuid,\n role=settings.ROLE_LABELS_DASHBOARD,\n type=settings.TYPE_LABELS_EXPERIMENT)\n\n self.create_or_update_deployment(name=deployment_name, data=deployment)\n service = services.get_service(\n namespace=self.namespace,\n name=deployment_name,\n labels=deployment_labels,\n ports=ports,\n target_ports=target_ports,\n service_type=self._get_service_type())\n\n self.create_or_update_service(name=deployment_name, data=service)\n\n if self._use_ingress():\n annotations = json.loads(settings.K8S_INGRESS_ANNOTATIONS)\n paths = [{\n 'path': '/notebook/{}'.format(self.project_name.replace('.', '/')),\n 'backend': {\n 'serviceName': deployment_name,\n 'servicePort': ports[0]\n }\n }]\n ingress = ingresses.get_ingress(namespace=self.namespace,\n name=deployment_name,\n labels=deployment_labels,\n annotations=annotations,\n paths=paths)\n self.create_or_update_ingress(name=deployment_name, data=ingress)\n\n def stop_notebook(self):\n deployment_name = constants.DEPLOYMENT_NAME.format(project_uuid=self.project_uuid,\n name=self.NOTEBOOK_JOB_NAME)\n self.delete_deployment(name=deployment_name)\n self.delete_service(name=deployment_name)\n if self._use_ingress():\n self.delete_ingress(name=deployment_name)\n", "repo_name": "MiguelPeralvo/polyaxon", "sub_path": "polyaxon/spawners/notebook_spawner.py", "file_name": "notebook_spawner.py", "file_ext": "py", "file_size_in_byte": 5952, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "spawners.project_spawner.ProjectSpawner", "line_number": 23, "usage_type": "name"}, {"api_name": "libs.utils.get_hmac", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.settings.APP_LABELS_NOTEBOOK", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "spawners.templates.pods.get_volume", "line_number": 35, "usage_type": "call"}, {"api_name": "spawners.templates.pods", "line_number": 35, "usage_type": "name"}, {"api_name": "spawners.templates.constants.REPOS_VOLUME", "line_number": 35, "usage_type": "attribute"}, {"api_name": "spawners.templates.constants", "line_number": 35, "usage_type": "name"}, {"api_name": "django.conf.settings.REPOS_CLAIM_NAME", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 36, "usage_type": "name"}, {"api_name": "django.conf.settings.REPOS_ROOT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "spawners.templates.pods.get_volume_mount", "line_number": 39, "usage_type": "call"}, {"api_name": "spawners.templates.pods", "line_number": 39, "usage_type": "name"}, {"api_name": "spawners.templates.constants.REPOS_VOLUME", "line_number": 39, "usage_type": "attribute"}, {"api_name": "spawners.templates.constants", "line_number": 39, "usage_type": "name"}, {"api_name": "django.conf.settings.REPOS_ROOT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 40, "usage_type": "name"}, {"api_name": "django.conf.settings.APP_LABELS_NOTEBOOK", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 47, "usage_type": "name"}, {"api_name": "django.conf.settings.ROLE_LABELS_DASHBOARD", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.settings.NOTEBOOK_PORT_RANGE", "line_number": 50, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 50, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "django.conf.settings.NOTEBOOK_PORT_RANGE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 52, "usage_type": "name"}, {"api_name": "spawners.base.get_pod_volumes", "line_number": 58, "usage_type": "call"}, {"api_name": "spawners.templates.constants.DEPLOYMENT_NAME.format", "line_number": 62, "usage_type": "call"}, {"api_name": "spawners.templates.constants.DEPLOYMENT_NAME", "line_number": 62, "usage_type": "attribute"}, {"api_name": "spawners.templates.constants", "line_number": 62, "usage_type": "name"}, {"api_name": "projects.paths.get_project_repos_path", "line_number": 70, "usage_type": "call"}, {"api_name": "spawners.templates.deployments.get_deployment", "line_number": 72, "usage_type": "call"}, {"api_name": "spawners.templates.deployments", "line_number": 72, "usage_type": "name"}, {"api_name": "django.conf.settings.APP_LABELS_NOTEBOOK", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 74, "usage_type": "name"}, {"api_name": "django.conf.settings.CONTAINER_NAME_PLUGIN_JOB", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 97, "usage_type": "name"}, {"api_name": "django.conf.settings.ROLE_LABELS_DASHBOARD", "line_number": 99, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 99, "usage_type": "name"}, {"api_name": "django.conf.settings.TYPE_LABELS_EXPERIMENT", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 100, "usage_type": "name"}, {"api_name": "spawners.templates.deployments.get_labels", "line_number": 101, "usage_type": "call"}, {"api_name": "spawners.templates.deployments", "line_number": 101, "usage_type": "name"}, {"api_name": "django.conf.settings.APP_LABELS_NOTEBOOK", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 101, "usage_type": "name"}, {"api_name": "django.conf.settings.ROLE_LABELS_DASHBOARD", "line_number": 104, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 104, "usage_type": "name"}, {"api_name": "django.conf.settings.TYPE_LABELS_EXPERIMENT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 105, "usage_type": "name"}, {"api_name": "spawners.templates.services.get_service", "line_number": 108, "usage_type": "call"}, {"api_name": "spawners.templates.services", "line_number": 108, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 119, "usage_type": "call"}, {"api_name": "django.conf.settings.K8S_INGRESS_ANNOTATIONS", "line_number": 119, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 119, "usage_type": "name"}, {"api_name": "spawners.templates.ingresses.get_ingress", "line_number": 127, "usage_type": "call"}, {"api_name": "spawners.templates.ingresses", "line_number": 127, "usage_type": "name"}, {"api_name": "spawners.templates.constants.DEPLOYMENT_NAME.format", "line_number": 135, "usage_type": "call"}, {"api_name": "spawners.templates.constants.DEPLOYMENT_NAME", "line_number": 135, "usage_type": "attribute"}, {"api_name": "spawners.templates.constants", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "74421338405", "text": "# boilerplate code\nimport numpy as np\nimport random\nimport cv2\nimport tensorflow as tf\nimport DisplayImages\n\nmodel_fn = 'inception5h/tensorflow_inception_graph.pb'\n\n# creating TensorFlow session and loading the model\ngraph = tf.Graph()\nsess = tf.InteractiveSession(graph=graph)\nwith tf.gfile.FastGFile(model_fn, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\nt_input = tf.placeholder(np.float32, name='input') # define the input tensor\nimagenet_mean = 117.0\nt_preprocessed = tf.expand_dims(t_input - imagenet_mean, 0)\ntf.import_graph_def(graph_def, {'input': t_preprocessed})\n\nlayers = [op.name for op in graph.get_operations() if op.type == 'Conv2D' and 'import/' in op.name]\nfeature_nums = [int(graph.get_tensor_by_name(name + ':0').get_shape()[-1]) for name in layers]\n\nprint('Number of layers', len(layers))\nprint('Total number of feature channels:', sum(feature_nums))\n\n\n# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity\n# to have non-zero gradients for features with negative initial activations.\nlayer = 'mixed4d_3x3_bottleneck_pre_relu'\nchannel = 139 # picking some feature channel to visualize\n\n# start with a gray image with a little noise\nimg_noise = np.zeros(shape=(224, 224, 3)) + 100.0 # .random.uniform(size=(224, 224, 3)) + 100.0\n\ndisplay = DisplayImages.DisplayImages(full_screen=True)\n\ndef showarray(a):\n a = np.uint8(np.clip(a, 0, 1) * 255)\n image = display.pad_image_to_screen_aspect(a)\n display.show_image(image)\n\n\ndef T(layer):\n # Helper for getting layer output tensor'''\n return graph.get_tensor_by_name(\"import/%s:0\" % layer)\n\n\ndef tffunc(*argtypes):\n # Helper that transforms TF-graph generating function into a regular one.\n # See \"resize\" function below.\n\n placeholders = list(map(tf.placeholder, argtypes))\n\n def wrap(f):\n out = f(*placeholders)\n\n def wrapper(*args, **kw):\n return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))\n\n return wrapper\n\n return wrap\n\n\n# Helper function that uses TF to resize an image\ndef resize(img, size):\n img = tf.expand_dims(img, 0)\n return tf.image.resize_bilinear(img, size)[0, :, :, :]\n\n\nresize = tffunc(np.float32, np.int32)(resize)\n\n\ndef calc_grad_tiled(img, t_grad, tile_size=200):\n # '''Compute the value of tensor t_grad over the image in a tiled way.\n # Random shifts are applied to the image to blur tile boundaries over\n # multiple iterations.'''\n sz = tile_size\n h, w = img.shape[:2]\n sx, sy = np.random.randint(sz, size=2)\n img_shift = np.roll(np.roll(img, sx, 1), sy, 0)\n grad = np.zeros_like(img)\n for y in range(0, max(h - sz // 2, sz), sz):\n for x in range(0, max(w - sz // 2, sz), sz):\n sub = img_shift[y:y + sz, x:x + sz]\n g = sess.run(t_grad, {t_input: sub})\n grad[y:y + sz, x:x + sz] = g\n return np.roll(np.roll(grad, -sx, 1), -sy, 0)\n\n\ndef render_deepdream(t_obj, img0=img_noise,\n iter_n=100, step=1.5, octave_n=4, octave_scale=1.4):\n t_score = tf.reduce_mean(t_obj) # defining the optimization objective\n t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!\n\n # split the image into a number of octaves\n img = img0\n octaves = []\n for i in range(octave_n - 1):\n hw = img.shape[:2]\n lo = resize(img, np.int32(np.float32(hw) / octave_scale))\n hi = img - resize(lo, hw)\n img = lo\n octaves.append(hi)\n\n # generate details octave by octave\n for octave in range(octave_n):\n if octave > 0:\n hi = octaves[-octave]\n img = resize(img, hi.shape[:2]) + hi\n for i in range(iter_n):\n g = calc_grad_tiled(img, t_grad)\n img += g * (step / (np.abs(g).mean() + 1e-7))\n print('.', end=' ')\n showarray(img / 255.0)\n\n return img\n\n\ndef get_image(path):\n img0 = cv2.imread(path)\n img0 = np.float32(img0)\n return img0\n\n\n# img0 = get_image('ImagesIn/profile800.jpg')\n# render_deepdream(T(layer)[:,:,:,139], img0)\n# render_deepdream(tf.square(T('mixed4c')), img0)\n\nimg_profile = get_image('ImagesIn/profile740.jpg')\nimg_parasol = get_image('ImagesIn/parasolSmall.jpg')\nimg_eye = get_image('ImagesIn/eye740.jpg')\nimg_leaves = get_image('ImagesIn/leaves.jpg')\nimg_roses = get_image('ImagesIn/roses.jpg')\n\nchannels = [1, 16, 18, 7, 24, 4, 11, 19, 23, 111, 30, 36, 31, 42, 41, 44, 123, 108, 109, 47, 45, 51, 52, 127, 128, 134,\n 57, 58, 53, 60, 139, 140, 112,\n 61, 75, 101, 100, 98, 90, 136, 114, 122, 115, 82, 70, 138, 97, 116, 117, 87, 141, 86, 83, 143]\n\nwhile True:\n for channel in channels:\n which_image = random.randint(1, 5)\n\n if which_image == 1:\n render_deepdream(T(layer)[:, :, :, channel], img_eye)\n elif which_image == 2:\n render_deepdream(T(layer)[:, :, :, channel], img_parasol)\n elif which_image == 3:\n render_deepdream(T(layer)[:, :, :, channel], img_leaves)\n elif which_image == 4:\n render_deepdream(T(layer)[:, :, :, channel], img_roses)\n else:\n render_deepdream(T(layer)[:, :, :, channel], img_profile)\n\n # path = 'ImagesOut/mixed' + str(i) + '.jpg'\n # print(path)\n # cv2.imwrite(path, img)\n", "repo_name": "chipgarner/DreamsTensorflow", "sub_path": "MyDream.py", "file_name": "MyDream.py", "file_ext": "py", "file_size_in_byte": 5356, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.Graph", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.InteractiveSession", "line_number": 12, "usage_type": "call"}, {"api_name": "tensorflow.gfile.FastGFile", "line_number": 13, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.GraphDef", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.import_graph_def", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "DisplayImages.DisplayImages", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.expand_dims", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.image.resize_bilinear", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.roll", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.roll", "line_number": 89, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.gradients", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 123, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "35203754303", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jul 24 00:08:47 2016\r\n\r\n@author: aditya\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom dateutil.parser import parse\r\nimport dateutil.parser\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nimport pandas as pd\r\n\r\ntest = pd.read_csv('D:\\Projects\\The S R\\Test.csv')\r\n\r\ntest['Application_Receipt_Date'] = test['Application_Receipt_Date'].apply(parse)\r\ntest['App_Receipt_Month'] = test['Application_Receipt_Date'].dt.month\r\ntest.groupby('App_Receipt_Month').ID.nunique().plot()\r\nQ_Res = []\r\nfor i in test['App_Receipt_Month']:\r\n if i <= 3:\r\n Q_Res.append('Q4')\r\n elif i <=6:\r\n Q_Res.append('Q1')\r\n elif i <=9:\r\n Q_Res.append('Q2')\r\n elif i <=12:\r\n Q_Res.append('Q3')\r\ntest['Applied_Quarter'] = Q_Res\r\ndel test['Application_Receipt_Date']\r\ndel test['App_Receipt_Month']\r\n\r\n\r\ntest.groupby('Office_PIN').ID.nunique().plot()\r\noffice = pd.DataFrame(test.groupby('Office_PIN').ID.nunique())\r\noffice['Office_PIN'] = office.index\r\n\r\noffice.columns = ('No_Offices','Office')\r\nadi = pd.DataFrame(office.loc[test['Office_PIN'],'No_Offices'])\r\nadi = adi.reset_index()\r\ntest['Tot_Applications'] = adi['No_Offices']\r\nOff_Cat = []\r\nfor i in test['Tot_Applications']:\r\n if i <= 93:\r\n Off_Cat.append('D')\r\n elif i <=123:\r\n Off_Cat.append('C')\r\n elif i <= 172:\r\n Off_Cat.append('B')\r\n elif i <=397:\r\n Off_Cat.append('A')\r\ntest['Company_Level'] = Off_Cat \r\ndel test['Tot_Applications']\r\n\r\ntest.groupby('Applicant_Gender').ID.nunique()\r\ntest['Applicant_Gender'] = test['Applicant_Gender'].fillna('unknown')\r\n\r\n\r\ntest['Applicant_BirthDate'] = test['Applicant_BirthDate'].fillna('7/24/2016')\r\ntest['Applicant_BirthDate'] = test['Applicant_BirthDate'].apply(parse)\r\nage_applicant = (datetime.today().date()-test.Applicant_BirthDate)\r\nage_ap = np.ravel(age_applicant)\r\nage_ap = age_ap.astype('timedelta64[D]')\r\nage_int_years = np.int64(age_ap/365)\r\ntest['Age_Applicant'] = age_int_years\r\nmedian = test['Age_Applicant'].median()\r\n\r\nAge_app = []\r\nfor i in test['Age_Applicant']:\r\n if i == 0:\r\n Age_app.append(38)\r\n else:\r\n Age_app.append(i)\r\ntest['Age_Applicant'] = Age_app\r\nage = []\r\ntest['Age_Applicant'].describe()\r\ntest['Age_Applicant'].mean()\r\nfor i in test['Age_Applicant']:\r\n if i <=33 :\r\n age.append('L33')\r\n elif i <= 38 :\r\n age.append('L38')\r\n elif i < 65 :\r\n age.append('L46')\r\n elif i >=65 :\r\n age.append('L86')\r\npd.DataFrame.to_csv(test,'test.csv')\r\nage = pd.DataFrame(age)\r\n\r\ntest['Applicant_Age_Cat'] = age\r\n\r\ntest['Applicant_City_PIN'] = test['Applicant_City_PIN'].fillna('Missing')\r\n\r\nlocal = []\r\nfor i in test.index:\r\n if test.Applicant_City_PIN[i] == test.Office_PIN[i]:\r\n local.append('Local')\r\n elif test.Applicant_City_PIN[i] == 'Missing':\r\n local.append('Missing')\r\n else:\r\n local.append('Non_local')\r\n \r\ntest['local'] = local\r\n\r\ntest['Manager_Grade'] = test['Manager_Grade'].fillna('Direct')\r\ntest['Manager_Current_Designation'] = test['Manager_Current_Designation'].fillna('Direct')\r\ntest['Manager_Status'] = test['Manager_Status'].fillna('Direct')\r\ntest['Manager_Gender'] = test['Manager_Gender'].fillna('Direct')\r\n\r\ntest['Manager_Joining_Designation'] = test['Manager_Joining_Designation'].fillna('Direct')\r\ntest.groupby('Manager_Joining_Designation').ID.nunique()\r\n\r\ncurrent_d = []\r\nfor i in test.index:\r\n if test.Manager_Joining_Designation[i] == 'Level 6':\r\n current_d.append('Level 5')\r\n elif test.Manager_Joining_Designation[i] == 'Level 7':\r\n current_d.append('Level 5')\r\n elif test.Manager_Joining_Designation[i] == 'Level 5':\r\n current_d.append('Level 5')\r\n elif test.Manager_Joining_Designation[i] == 'Level 4':\r\n current_d.append('Level 4')\r\n elif test.Manager_Joining_Designation[i] == 'Level 3':\r\n current_d.append('Level 3')\r\n elif test.Manager_Joining_Designation[i] == 'Level 2':\r\n current_d.append('Level 2')\r\n elif test.Manager_Joining_Designation[i] == 'Level 1':\r\n current_d.append('Level 1')\r\n else: \r\n current_d.append('Direct')\r\n \r\ntest['Manager_Joining_Designation'] = current_d\r\n\r\n#Manager_Grade Feature\r\n\r\ntest['Manager_Grade'] = test['Manager_Grade'].fillna('Direct')\r\ngrade = []\r\nfor i in test.index:\r\n if test.Manager_Grade[i] <= 2:\r\n grade.append('L2')\r\n elif test.Manager_Grade[i] == 3:\r\n grade.append('L3')\r\n elif test.Manager_Grade[i] <= 5:\r\n grade.append('L5')\r\n elif test.Manager_Grade[i] <= 10:\r\n grade.append('L10') \r\n else: \r\n grade.append('Direct')\r\ntest['Manager_Grade'] = grade\r\n\r\n#Applicant_Occupation feature\r\n\r\ntest['Applicant_Occupation'] = test['Applicant_Occupation'].fillna('Others')\r\ntest.groupby('Applicant_Occupation').ID.nunique()\r\n\r\n\r\n#Managers Age\r\n\r\ntest['Manager_DoB'] = test['Manager_DoB'].fillna('7/24/2016')\r\ntest['Manager_DoB'] = test['Manager_DoB'].apply(parse)\r\nage_manager = (datetime.today().date()-test.Manager_DoB)\r\nage_mgr = np.ravel(age_manager)\r\nage_mgr = age_mgr.astype('timedelta64[D]')\r\nage_int_years = np.int64(age_mgr/365)\r\ntest['Age_Manager'] = age_int_years\r\ntest['Age_Manager'].median()\r\nAge_man = []\r\nfor i in test['Age_Manager']:\r\n if i == 0:\r\n Age_man.append(42)\r\n else:\r\n Age_man.append(i)\r\ntest['Age_Manager'] = Age_man\r\ntest.groupby('Age_Manager').ID.nunique()\r\ntest['Age_Manager'].describe\r\nage = []\r\nfor i in test['Age_Manager']:\r\n if i <=35 :\r\n age.append('L35')\r\n elif i <= 40 :\r\n age.append('L40')\r\n elif i <= 45 :\r\n age.append('L45')\r\n elif i <=50 :\r\n age.append('L50')\r\n elif i <=55 :\r\n age.append('L55')\r\n elif i <=60 :\r\n age.append('L60')\r\n else:\r\n age.append('G60')\r\n\r\ntest['Manager_Age_Cat'] = age\r\n\r\n\r\n#Managers Experience in years\r\n\r\ntest['Manager_DOJ'] = test['Manager_DOJ'].fillna('7/24/2016')\r\ntest['Manager_DOJ'] = test['Manager_DOJ'].apply(parse)\r\nexp_manager = (datetime.today().date()-test.Manager_DOJ)\r\nexp_mgr = np.ravel(exp_manager)\r\nexp_mgr = exp_mgr.astype('timedelta64[D]')\r\nexp_int_years = np.int64(exp_mgr/365)\r\ntest['Exp_Manager'] = exp_int_years\r\ntest['Exp_Manager'].median()\r\nExp_man = []\r\nfor i in test['Exp_Manager']:\r\n if i == 0:\r\n Exp_man.append(9)\r\n else:\r\n Exp_man.append(i)\r\ntest['Exp_Manager'] = Exp_man\r\ntest.groupby('Exp_Manager').ID.nunique()\r\n\r\nexp = []\r\n\r\nfor i in test['Exp_Manager']:\r\n if i <=10 :\r\n exp.append('L10')\r\n elif i <=12 :\r\n exp.append('L12')\r\n elif i <=14 :\r\n exp.append('L14')\r\n else:\r\n exp.append('G14')\r\n \r\ntest['Exp_Mgr'] = exp\r\n\r\n#Manager No_of Producsts feature\r\ntest['Manager_Num_Products'].median()\r\ntest['Manager_Num_Products'] = test['Manager_Num_Products'].fillna(2)\r\ntest.Manager_Num_Products.describe()\r\ntest.groupby('Manager_Num_Products').ID.nunique()\r\n\r\npro = []\r\n\r\nfor i in test['Manager_Num_Products']:\r\n if i == 0 :\r\n pro.append('NoSale')\r\n elif i <=4 :\r\n pro.append('L4')\r\n elif i == 5 :\r\n pro.append('L5')\r\n elif i <=15 :\r\n pro.append('L15')\r\n elif i <=25 :\r\n pro.append('L25')\r\n elif i <=40 :\r\n pro.append('L40')\r\n else:\r\n pro.append('G40')\r\n\r\ntest['product1'] = pro\r\n\r\n#Manager No_of Producsts feature in last 3 months\r\n\r\ntest.Manager_Num_Products2.describe()\r\ntest.groupby('Manager_Num_Products2').ID.nunique()\r\ntest['Manager_Num_Products2'] = test['Manager_Num_Products2'].fillna(5)\r\n\r\npro = []\r\nfor i in test['Manager_Num_Products2']:\r\n if i == 0 :\r\n pro.append('NoSale')\r\n elif i <=4 :\r\n pro.append('L4')\r\n elif i == 5 :\r\n pro.append('L5')\r\n elif i <=10 :\r\n pro.append('L10')\r\n elif i <=15 :\r\n pro.append('L15')\r\n else:\r\n pro.append('G15')\r\n\r\ntest['product2'] = pro\r\n\r\n\r\n## Number of agents sourced by Manager\r\n\r\ntest.Manager_Num_Application.describe()\r\ntest.groupby('Manager_Num_Application').ID.nunique()\r\ntest['Manager_Num_Application'] = test['Manager_Num_Application'].fillna(2)\r\n\r\npro = []\r\n\r\nfor i in test['Manager_Num_Application']:\r\n if i == 0 :\r\n pro.append('None')\r\n elif i == 1 :\r\n pro.append('One')\r\n elif i == 2 :\r\n pro.append('Two')\r\n elif i == 3 :\r\n pro.append('Three')\r\n elif i <= 5 :\r\n pro.append('L5')\r\n elif i <= 10 :\r\n pro.append('L10')\r\n else:\r\n pro.append('G10')\r\n\r\ntest['ManagerTotalAgent'] = pro\r\n\r\ntest.groupby('ManagerTotalAgent').ID.nunique()\r\n\r\n\r\n## Number of agents sourced by Manager in last 3 months\r\n\r\ntest.Manager_Num_Coded.describe()\r\ntest.groupby('Manager_Num_Coded').ID.nunique()\r\ntest['Manager_Num_Coded'] = test['Manager_Num_Coded'].fillna(1)\r\n\r\npro = []\r\n\r\nfor i in test['Manager_Num_Coded']:\r\n if i == 0 :\r\n pro.append('None')\r\n elif i == 1 :\r\n pro.append('One')\r\n elif i == 2 :\r\n pro.append('Two')\r\n elif i <= 4 :\r\n pro.append('L4')\r\n else:\r\n pro.append('G4')\r\n\r\ntest['ManagerTotalAgentL3'] = pro\r\n\r\ntest.groupby('ManagerTotalAgentL3').ID.nunique()\r\n\r\n## Manager business last 3 months\r\n\r\ntest.Manager_Business.mean()\r\ntest.Manager_Business.describe()\r\ntest.groupby('Manager_Business').ID.nunique()\r\ntest['Manager_Business'] = test['Manager_Business'].fillna(111575)\r\ntest['Manager_Business'] = abs(test['Manager_Business'])\r\n\r\npro = []\r\n\r\nfor i in test['Manager_Business']:\r\n if i == 0 :\r\n pro.append('No_business')\r\n elif i <= 100000 :\r\n pro.append('L1')\r\n elif i <= 500000 :\r\n pro.append('L5')\r\n elif i <= 1000000 :\r\n pro.append('L10')\r\n elif i <= 1500000 :\r\n pro.append('L15') \r\n else:\r\n pro.append('G15')\r\n\r\ntest['Manager_Business'] = pro\r\n\r\ntest.groupby('Manager_Business').ID.nunique()\r\n\r\n#Applicant Qualification feature\r\n\r\ntest.groupby('Applicant_Qualification').ID.nunique()\r\ntest['Applicant_Qualification'] = test['Applicant_Qualification'].fillna('unknown')\r\nqual = pd.DataFrame(test.groupby('Applicant_Qualification').ID.nunique())\r\n\r\nqual = qual.reset_index()\r\n\r\nqual0 = qual.Applicant_Qualification[0]\r\nqual1 = qual.Applicant_Qualification[1]\r\nqual2 = qual.Applicant_Qualification[2]\r\nqual3 = qual.Applicant_Qualification[3]\r\nqual4 = qual.Applicant_Qualification[4]\r\nqual5 = qual.Applicant_Qualification[5]\r\nqual6 = qual.Applicant_Qualification[6]\r\nqual7 = qual.Applicant_Qualification[7]\r\nqual8 = qual.Applicant_Qualification[8]\r\n\r\ntest['Applicant_Qualification1'] = test['Applicant_Qualification'] \r\n\r\n\r\nqualific = []\r\n\r\nfor i in test['Applicant_Qualification']:\r\n if i == qual0 :\r\n qualific.append('Prof_Q') \r\n elif i == qual1 :\r\n qualific.append('Prof_Q')\r\n elif i == qual7 :\r\n qualific.append('Prof_Q')\r\n elif i == qual2 :\r\n qualific.append(qual2)\r\n elif i == qual3 :\r\n qualific.append(qual3)\r\n elif i == qual4 :\r\n qualific.append(qual4)\r\n elif i == qual5 :\r\n qualific.append(qual5)\r\n elif i == qual6 :\r\n qualific.append(qual6)\r\n elif i == qual8 :\r\n qualific.append(qual8)\r\n\r\ntest['Applicant_Qualification'] = qualific\r\n\r\n", "repo_name": "aditya436/The_SR_AV", "sub_path": "Test_Clean.py", "file_name": "Test_Clean.py", "file_ext": "py", "file_size_in_byte": 11163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 17, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 61, "usage_type": "argument"}, {"api_name": "datetime.datetime.today", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.ravel", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame.to_csv", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 89, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 161, "usage_type": "argument"}, {"api_name": "datetime.datetime.today", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "name"}, {"api_name": "numpy.ravel", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 165, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 200, "usage_type": "argument"}, {"api_name": "datetime.datetime.today", "line_number": 201, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 201, "usage_type": "name"}, {"api_name": "numpy.ravel", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 365, "usage_type": "call"}]} +{"seq_id": "77182858", "text": "from PIL import Image\nimport sys, os\nfrom struct import pack as pk\n\nSPLASH_SCREEN_WIDTH = 1280\nSPLASH_SCREEN_HEIGHT = 720\n\nSPLASH_SCREEN_STRIDE = 768\n\ndef convert_image(image_fn):\n splash = Image.open(image_fn, 'r')\n w, h = splash.size\n if w == 1280 and h == 720:\n splash = splash.transpose(Image.ROTATE_90)\n w, h = splash.size\n assert w == 720\n assert h == 1280\n splash = splash.convert('RGBA')\n splash_bin = bytearray()\n for row in range(SPLASH_SCREEN_WIDTH):\n for col in range(SPLASH_SCREEN_HEIGHT):\n r, g, b, a = splash.getpixel((col, row))\n splash_bin += pk(' len(cur_cols):\n for i, l_col in enumerate(last_cols):\n for col in cur_cols:\n if col.get(\"x0\") == l_col.get(\"x0\") and i > 0:\n last_txt_cols[i] = last_txt_cols[i] + \"\\n\" + col.get(\"text\")\n break\n last_cols[i] = cur_cols\n\n\nif __name__ == \"__main__\":\n path = u'test.pdf'\n line_list = list()\n paragraph_list = list()\n table_list = list()\n header = []\n col_line_list = []\n row_line_list = []\n cols = []\n rows = []\n with pdfplumber.open(path) as pdf:\n print(pdf.pages[36].extract_tables()[0])\n # table = pdf.pages[3].extract_tables()[0]\n header = get_header(pdf.pages)\n first_page = pdf.pages[36]\n first_page.extract_text() # 将页面的所有字符对象整理到一个字符串中。添加x1一个字符的字符与下一个字符的字符之间的差x0大于的空格x_tolerance。添加换行符doctop,其中一个字符的字符与下一个字符的字符之间的差doctop大于y_tolerance\n # rects = first_page.within_bbox()\n page_width = first_page.width\n print(\"width:\", first_page.width)\n print(\"height:\", first_page.height)\n im = first_page.to_image(resolution=300)\n # dict1 = {'x0': int('10'), 'x1': int('550'), 'top': int('54'), 'bottom': int('57')}\n # dict1 = {'x0': Decimal('10'), 'x1': Decimal('550.983'), 'top': Decimal('53.850'), 'bottom': Decimal('56.850')}\n # (x0, top, x1, bottom)\n # bbox1 = (Decimal('10'), Decimal('53.850'), Decimal('550.983'), Decimal('56.850'))\n bbox1 = (Decimal('50.66'), Decimal('249.819'), Decimal('245.420'), Decimal('255.379'))\n result = first_page.within_bbox(bbox1) # 裁剪坐标内的文本\n print(result)\n # print(first_page.extract_words()[0])\n im.draw_rect(bbox1, stroke_width=1)\n im.save(\"D:\\\\a.png\", format=\"PNG\")\n # exit()\n # print(first_page.lines)\n # print(first_page.objects.get(\"line\", ''))\n line_dict, table_pos = {}, []\n table_lines = {}\n table_line_in = False\n # align_list, align = [], {}\n words = first_page.extract_words()\n l_x0, l_x1, l_top, bottom, line_width, line, paragraph = 0, 0, 0, 0, 0, \"\", \"\"\n for index, word in enumerate(list(words)):\n print(word)\n word_text = word.get(\"text\")\n if filter_header(word, header, page_width): # 页码和表头退出\n continue\n # l_top: 上一个word的top值\n # 当前的wod的top值和上一个word的top差值小于2,说明就在同一行\n if (word.get(\"top\") - l_top) < 2 or l_x0 == 0: # 拼接同一行的数据\n # x0:word的最小left值,\n # l_x1:上一个word的最大left值,即word.left + word.width\n # word.get(\"x0\") 当前word的left值\n #\n is_need_blank = bool(re.search(\"^[A-Za-z]+$\", word_text) or (word.get(\"x0\") - l_x1) / 2 > 7 and l_x1 > 0)\n line = line + (\" \" if is_need_blank else \"\") + word_text\n line_width = line_width + (\n word.get(\"x1\") - word.get(\"x0\")) # (word.get(\"x1\") - word.get(\"x0\")) 计算当前word的width\n if l_x0 == 0:\n line_dict.update({\"x0\": word.get(\"x0\")})\n else:\n if len(line) > 0:\n line_dict.update({\"x1\": l_x1, \"line\": line, \"line_width\": line_width})\n line_list.append(line_dict)\n line_dict = {}\n line_dict.update({\"x0\": word.get(\"x0\")})\n line = word_text\n line_width = (word.get(\"x1\") - word.get(\"x0\"))\n l_x0 = word.get(\"x0\")\n l_x1 = word.get(\"x1\")\n l_top = word.get(\"top\")\n bottom = word.get(\"bottom\")\n\n pre_word = words[index - 1]\n pre_x0 = pre_word.get(\"x0\")\n pre_x1 = pre_word.get(\"x1\")\n pre_top = pre_word.get(\"top\")\n if (word.get(\"top\") - pre_top) < 2 and not re.search(title_pattern, line) and not table_line_in: # 拼接同一行的数据\n if word.get(\"x0\") - pre_x1 > 15: # 表格行的处理,word之间间隔大于15就当表格处理\n table_line_in = True\n table_lines.update({word.get(\"top\"): [words[index - 1], word]}) # 当前值和左边值\n elif table_line_in and word.get(\"top\") - pre_top <= 20:\n col_list = table_lines.get(word.get(\"top\")) # 同一行的数据\n if col_list:\n col_index = len(col_list) + 1\n pre_key = list(table_lines.keys())[-2]\n pre_col_list = table_lines.get(pre_key)\n if tmp_word.get(\"x0\") > word.get(\"x1\"): # tmp_word 距离最近的数据\n pre_col_list.insert(len(col_list), {})\n elif tmp_word.get(\"x1\") < word.get(\"x0\"):\n pre_col_list.insert(len(col_list) + 1, {}) # col_list:和当前word同一水平的全部数据\n col_list.append(word)\n table_lines.update({word.get(\"top\"): col_list})\n else:\n pre_key = list(table_lines.keys())[-1]\n pre_col_list = table_lines.get(pre_key)\n tmp_word = pre_col_list[0] # 倒着读的,所以要取最左边的数据\n # if relation([tmp_word.get(\"x0\"), tmp_word.get(\"x1\")], [word.get(\"x0\"), word.get(\"x1\")]) == 1:\n if tmp_word.get(\"x0\") > word.get(\"x1\"):\n pre_col_list.insert(0, {})\n elif tmp_word.get(\"x1\") < word.get(\"x0\"):\n pre_col_list.insert(1, {})\n table_lines.update({word.get(\"top\"): [word]})\n\n\n elif word.get(\"top\") - pre_top > 20:\n table_line_in = False\n\n # col_line_list.append(words[index - 1])\n # cols.append(words[index - 1].get(\"text\"))\n # next_word = words[index + 1]\n # if next_word.get(\"top\") - l_top > 2: # 最后一个了\n # col_line_list.append(word)\n # cols.append(word.get(\"text\"))\n # row_line_list.append(col_line_list)\n # rows.append(cols)\n # col_line_list, cols = [], []\n # if len(row_line_list) > 1: reset_cols(row_line_list, rows)\n\n print(\"\\n\")\n\n print(\"****************************************\")\n print(rows)\n for i, line in enumerate(line_list):\n print(str(i) + \"------->>>>\", line)\n\n paragraph = \"\"\n for i, line_dict in enumerate(line_list):\n line = line_dict.get(\"line\")\n line_width = line_dict.get(\"line_width\")\n x0 = line_dict.get(\"x0\")\n\n if line_width / page_width < 0.7 and i == 0:\n paragraph = line\n paragraph_list.append(paragraph)\n paragraph = \"\"\n else:\n pre_dict = line_list[i - 1]\n pre_x0 = pre_dict.get(\"x0\")\n pre_line = pre_dict.get(\"line\")\n pre_line_width = pre_dict.get(\"line_width\")\n find_list = re.search(title_pattern, line)\n if line_width / page_width < 0.7 and pre_line_width / page_width < 0.7 \\\n or \".....\" in line and str(line[-1]).isdigit(): # 上下超短行、目录行\n if bool(paragraph):\n paragraph_list.append(paragraph)\n paragraph = \"\"\n continue\n # paragraph = line\n if i == len(line_list) - 1 or not bool(paragraph): # 最后一个多输出一个\n paragraph_list.append(line)\n elif bool(find_list) and bool(abs(x0 - pre_x0) > 18):\n if bool(paragraph):\n paragraph_list.append(paragraph)\n if line_width / page_width < 0.7:\n paragraph_list.append(line)\n paragraph = \"\"\n else:\n paragraph = line\n elif line_width / page_width > 0.7 and bool(abs(x0 - pre_x0) > 18) and re.search(\"[。.;]$\", pre_line):\n if bool(paragraph):\n paragraph_list.append(paragraph)\n paragraph = line\n elif line_width / page_width < 0.7 and re.search(\"[。.;]$\", line): # 结尾行\n paragraph = paragraph + line\n paragraph_list.append(paragraph)\n paragraph = \"\"\n else:\n paragraph = paragraph + line\n\n # paragraph = paragraph + line\n\n print(\"-------------------------------------\")\n for i, p in enumerate(paragraph_list):\n print(str(i) + \"------->>>>\", p)\n", "repo_name": "zengzih/analysisPdf", "sub_path": "pdf_extractor.py", "file_name": "pdf_extractor.py", "file_ext": "py", "file_size_in_byte": 14128, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pdfplumber.open", "line_number": 115, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 130, "usage_type": "call"}, {"api_name": "re.search", "line_number": 157, "usage_type": "call"}, {"api_name": "re.search", "line_number": 180, "usage_type": "call"}, {"api_name": "re.search", "line_number": 244, "usage_type": "call"}, {"api_name": "re.search", "line_number": 262, "usage_type": "call"}, {"api_name": "re.search", "line_number": 266, "usage_type": "call"}]} +{"seq_id": "35099113308", "text": "from enum import Enum\n\n\nclass Headers(Enum):\n FileUploadHeaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/95.0.4638.69 Safari/537.36',\n }\n\n CommonHeaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/95.0.4638.69 Safari/537.36',\n 'Content-Type': 'application/json;charset=UTF-8',\n }\n\n\nclass AnswerPathAndUploadUrl(Enum):\n items = {\n 'url': {\n 'loginUrl': 'https://yk.myunedu.com/yunkai/sys/identification/login',\n 'courseListSearchUrl': 'https://yk.myunedu.com/yunkai/web/study/userPracticeScheme/overview',\n 'homeworkListSearchUrl': 'https://yk.myunedu.com/yunkai/web/student/task/list',\n 'uploadUrl': 'https://yk.myunedu.com/yunkai/file/upload',\n 'answerUrl': 'https://yk.myunedu.com/yunkai/web/student/task/submit/answer',\n 'infoUrl': 'https://yk.myunedu.com/yunkai/web/student/task/info',\n },\n \"课程实践\": {\n 'commonFilename': \"小组讨论.pdf\",\n 'imgFilename': '小组讨论.png',\n },\n \"小组学习\": {\n 'commonFilename': \"小组讨论.pdf\",\n 'imgFilename': '小组讨论.png',\n },\n \"线下作业1\": {\n 'commonFilename': \"线下作业1.docx\",\n 'imgFilename': '小组讨论.png',\n },\n \"线下作业2\": {\n 'commonFilename': \"线下作业2.docx\",\n 'imgFilename': '小组讨论.png',\n },\n \"线下作业3\": {\n 'commonFilename': \"线下作业3.docx\",\n 'imgFilename': '小组讨论.png',\n },\n \"线下作业4\": {\n 'commonFilename': \"线下作业4.docx\",\n 'imgFilename': '小组讨论.png',\n },\n '离线作业1': {\n 'commonFilename': \"离线作业1.docx\",\n 'imgFilename': '小组讨论.png',\n },\n '离线作业2': {\n 'commonFilename': \"离线作业2.docx\",\n 'imgFilename': '小组讨论.png',\n },\n '离线作业3': {\n 'commonFilename': \"离线作业3.docx\",\n 'imgFilename': '小组讨论.png',\n },\n '离线作业4': {\n 'commonFilename': \"离线作业4.docx\",\n 'imgFilename': '小组讨论.png',\n },\n \"线上作业\": {\n 'startUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/start',\n 'updateUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/update',\n 'submitUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/submit',\n },\n '在线作业': {\n 'startUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/start',\n 'updateUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/update',\n 'submitUrl': 'https://yk.myunedu.com/yunkai/web/examPaper/submit',\n }\n }\n\n info_find_items = {\n 'answerSuccessUrl': 'https://yk.myunedu.com/yunkai/web/student/task/info'\n }\n\n\nclass BrushClassUrl(Enum):\n items = {\n 'add_progress': 'https://yk.myunedu.com/yunkai/admin/userstudyrecord/addVideoProgress',\n 'add_time': 'https://yk.myunedu.com/yunkai/admin/userstudyrecord/addVideoTime',\n 'course_list': 'https://yk.myunedu.com/yunkai/web/study/userPracticeScheme/overview',\n 'course_video_info': 'https://yk.myunedu.com/yunkai/web/charterSection/charterSectionList',\n 'live_video_info': 'https://yk.myunedu.com/yunkai/web/study/liveLessons'\n }\n\n\nclass TaskReturnMessage(Enum):\n ImgTextFindSuccess = {'imgStatus': None, 'textStatus': None, 'taskStatus': True, 'msg': '图片、文字填充-查询成功'}\n ImgTextFindFailing = {'taskStatus': False, 'msg': '图片、文字填充查询-连续请求不成功'}\n ImgTextFindFailingDetail = {'taskStatus': False, 'msg': '图片、文字填充查询-数据缺失'}\n\n FileNotFound = {'taskStatus': False, 'msg': f\"文件不存在:\"}\n FileExists = {'taskStatus': True, 'msg': f\"文件存在\"}\n\n FileUploadSuccess = {'taskStatus': True, 'msg': '文件上传成功'}\n FileUploadFailing = {'taskStatus': False, 'msg': '文件上传失败 连续请求不成功'}\n\n LastFileUploadSuccess = {'taskStatus': True, 'msg': '总提交成功-pdf、word、img答案提交阶段'}\n LastFileUploadFailing = {'taskStatus': False, 'msg': '总提交失败-pdf、word、img答案提交阶段 连续请求不成功'}\n\n LastTextUploadSuccess = {'taskStatus': True, 'msg': f'总提交成功-文本填充阶段'}\n LastTextUploadFailing = {'taskStatus': False, 'msg': f'总提交失败-文本填充阶段 连续请求不成功'}\n\n LastOnlineHomeWorkSuccess = {'taskStatus': True, 'msg': '总提交成功-线上作业'}\n LastOnlineHomeWorkFailing = {'taskStatus': False, 'msg': '总提交失败-线上作业 连续请求不成功'}\n LastOnlineHomeWorkFailingDetail = {'taskStatus': False, 'msg': '总提交失败-线上作业'}\n\n ResponseDataLost = {'taskStatus': False, 'msg': '题目答案搜索-缺失答案数据'}\n\n ChildHomeWorkFailing = {'taskStatus': False, 'msg': '子问题 连续请求不成功'}\n ChildHomeWorkSuccess = {'taskStatus': True, 'msg': '子问题 提交成功'}\n\n OnlineHomeWorkFinished = {'taskStatus': True, 'msg': '选择题已完成'}\n\n QuestionAnswerSearchDataLost = {'taskStatus': False, 'msg': '线上作业 答案搜索-缺失答案数据'}\n QuestionAnswerSearchRequestFailing = {'taskStatus': False, 'msg': '线上作业 答案搜索-请求不成功'}\n\n\nclass LogMessage(Enum):\n CourseListSearchLogItem = {\n 'content': \"{} courseListSearch 连续请求不成功!\\n\",\n 'loggerItem': {\n 'one': None, 'two': 'null', 'three': 'null', 'four': '连续请求不成功', 'five': '课程列表搜索阶段'\n }\n }\n\n HomeworkListSearchLogItem = { # 日志对象\n 'content': \"{} homeworkListSearch 连续请求不成功!\\n\",\n 'loggerItem': {\n 'one': None, 'two': None, 'three': 'null', 'four': '连续请求不成功', 'five': '子作业列表搜索阶段'\n }\n }\n\n ChildHomeWorkFinished = {\n 'content': \"已经完成:{} {} {}\\n\",\n 'loggerItem': {\n 'one': None, 'two': None, 'three': None, 'four': '已经完成', 'five': '科目子作业存储阶段'\n }\n }\n\n ChildHomeWorkSaved = {\n 'content': \"已经完成:{} {} {}\\n\",\n 'loggerItem': {\n 'one': None, 'two': None, 'three': None, 'four': '存储成功', 'five': '科目子作业存储阶段'\n }\n }\n\n\n# print(Headers.FileUploadHeaders.value)\n# # AnswerPathAndUploadUrl.items.value['url']['uploadUrl'] += '13rqwrqr'\n# # print(AnswerPathAndUploadUrl.items.value['url']['uploadUrl'])\n# # print(TaskReturnMessage.LastOnlineHomeWorkFailingDetail.value['msg'].format(\"xiaoz\"))\n# items = TaskReturnMessage.LastOnlineHomeWorkFailingDetail.value\n# items['msg'] = items['msg'].format('xiaozhou')\n# print(items)\n\n", "repo_name": "Exixiaozhou/YkStudyAnswer", "sub_path": "MyLib/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 7326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 4, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 17, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 84, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 94, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "28170603871", "text": "from dungeon_level.dungeon_tiles import Tiles\nfrom scipy.ndimage.measurements import label as label_connected_components\nimport numpy as np\n\nclass LevelTweaker:\n @staticmethod\n def tweak_level(level, tweaker_aesthetic):\n if tweaker_aesthetic.should_fill_unused_space:\n LevelTweaker.fill_unused_space(level)\n\n @staticmethod\n def fill_unused_space(level):\n start_position = np.argwhere(level.upper_layer == Tiles.player)[0]\n non_wall_mask = (level.upper_layer != Tiles.wall).astype(int)\n connected_components, component_count = label_connected_components(non_wall_mask)\n used_space_component = connected_components[tuple(start_position)]\n level.upper_layer[connected_components != used_space_component] = Tiles.wall", "repo_name": "bjatkin/dungeon-design", "sub_path": "generation/level_tweaker.py", "file_name": "level_tweaker.py", "file_ext": "py", "file_size_in_byte": 778, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.argwhere", "line_number": 13, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.player", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 13, "usage_type": "name"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.wall", "line_number": 14, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 14, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 15, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.wall", "line_number": 17, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "16186909422", "text": "# -*- coding: utf-8 -*-\n__author__ = 'dizcza'\n\n# THIS IS A FILE TO TUN THE PROJECT JourneyClassifier\n# For more info go to https://github.com/dizcza/JourneyClassifier\nimport time\nimport csv\nfrom datetime import datetime\n\n# run a timer and import data from journeyhandler.py\ntime_start = time.time()\nimport journeyhandler\nfrom journeyhandler import COUNTRY_BOUNDARIES as BOUNDARIES\n\n\ndef classify(years, journey_zones):\n \"\"\"\n\t Classify data into group of journeys with the same year and the same zone (country).\n\t All countries borders must be stored in 'borders'.\n\t\"\"\"\n try:\n data_length = journeyhandler.get_data_length([years, journey_zones])\n if data_length == 0:\n raise Exception\n except:\n print(\"\\n\\t(!) %s: Invalid data input.\\n\" % journeyhandler.get_func_name())\n return\n\n group_list = [0 for dummy_checkin in range(data_length)]\n\n # Set first unique IDs for each set of journeys, taken during one year.\n ids = 1\n first_year = min(years)\n last_year = max(years)\n for year in range(last_year, first_year - 1, -1):\n for country_name in BOUNDARIES.keys():\n # If data sample with current zone in current year is found, checker 'isfound' becomes True.\n isfound = False\n for checkin in range(data_length - 1, -1, -1):\n # Loop from the end to the beginning of data samples for each year and possible localization zone.\n if journey_zones[checkin] != 'usa' and journey_zones[checkin] == country_name and years[\n checkin] == year:\n group_list[checkin] = ids\n isfound = True\n if isfound:\n # If matching is found on the last steps, look for another zone\n # in the current year. Thus, we must change the ID to the new\n # travelling group and reset found matches.\n ids += 1\n\n print(\"OUTPUT: Have found %d groups of travellings.\" % max(group_list))\n return group_list\n\n\nprint(\"\\n\" + \"*\" * 20 + \" Begin to operate data. \" + \"*\" * 20)\n\ncsvfile = open('checkins.csv', 'r')\ndata = csv.reader(csvfile, delimiter=',')\n\nID = []\ntimeunix = []\nyears = []\nlatitude = []\nlongitude = []\n\nfor checkin, row in enumerate(data):\n ID.append(str(row[0]))\n timeunix.append(int(row[1]))\n year = int(datetime.fromtimestamp(timeunix[-1]).strftime('%Y'))\n years.append(year)\n latitude.append(float(row[2]))\n longitude.append(float(row[3]))\ncsvfile.close()\n\n# Make data immutable.\nlatitude = tuple(latitude)\nlongitude = tuple(longitude)\nNUM_OF_CHECKINS = len(ID)\n\n# Finding home location.\nhomeloc, native_country_name = journeyhandler.figure_out_home([latitude, longitude])\nprint(\"Home location: %s \\t\" % native_country_name, homeloc)\n\n# Visualize the data, if you want.\njourneyhandler.visualize_data(latitude, longitude, home=homeloc)\n\n# Journey zones classifier.\njourney_zones = journeyhandler.setzones([latitude, longitude])\n\n# Classify data into set of journeys with unique IDs\ngroup_list = classify(years, journey_zones)\n\n# Wrap data into 'checkins_upd.csv'\ntry:\n csvfile = open('checkins_upd.csv', 'wb')\n outputfile = csv.writer(csvfile, delimiter=',')\n for checkin in range(NUM_OF_CHECKINS):\n outputfile.writerow(\n [ID[checkin], timeunix[checkin], latitude[checkin], longitude[checkin], group_list[checkin]])\nexcept:\n print(\"\\nOUTPUT ERROR: Cannot write the results into the output file.\\n\")\nfinally:\n csvfile.close()\n\ntime_end = time.time()\nprint(\"*\" * 25 + \" THE END. \" + \"*\" * 25 + \"\\n\")\nprint(\"Execution time: %g sec.\" % (time_end - time_start))\n", "repo_name": "dizcza/JourneyClassifier", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "journeyhandler.get_data_length", "line_number": 22, "usage_type": "call"}, {"api_name": "journeyhandler.get_func_name", "line_number": 26, "usage_type": "call"}, {"api_name": "journeyhandler.COUNTRY_BOUNDARIES.keys", "line_number": 36, "usage_type": "call"}, {"api_name": "journeyhandler.COUNTRY_BOUNDARIES", "line_number": 36, "usage_type": "name"}, {"api_name": "csv.reader", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "journeyhandler.figure_out_home", "line_number": 81, "usage_type": "call"}, {"api_name": "journeyhandler.visualize_data", "line_number": 85, "usage_type": "call"}, {"api_name": "journeyhandler.setzones", "line_number": 88, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 96, "usage_type": "call"}, {"api_name": "time.time", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "75336566563", "text": "import logging\nimport logging.handlers\n\nfrom flask import Flask, render_template\n\nfrom . import web\nfrom . import apiv1\n\nfrom .manager import Manager\n\n\nclass Server():\n\n '''\n This is the database server Class. \n It initializes flask app, database backends, logging system and so on.\n '''\n \n def __init__(self, addr='127.0.0.1:5000', backend='sqlite:///test.db',\n enable_retry=False, debug=False, logfile='', email_config=None, slack_config =None):\n pos = addr.rfind(':')\n self.host, self.port = addr[:pos], addr[pos+1:]\n self.port = int(self.port)\n self.addr = addr if addr.startswith('http') else 'http://' + addr\n self.debug = debug\n self.backend = backend\n self.logfile = logfile\n self.email_config = email_config\n self.slack_config = slack_config\n self.enable_retry = enable_retry\n \n self._init_manager()\n self._init_app()\n\n \n def _init_manager(self):\n Manager.set(backend=self.backend, enable_retry=self.enable_retry, logfile=self.logfile, email=self.email_config, slack=self.slack_config)\n Manager.get().logger.info('using {} as backend'.format(self.backend))\n\n def _init_app(self):\n self.app = Flask(__name__)\n self.app.config['addr'] = self.addr\n @self.app.route('/')\n def index():\n \t return render_template('index.html')\n self.app.register_blueprint(web.bp, url_prefix='/web')\n self.app.register_blueprint(apiv1.bp, url_prefix='/api/v1')\n Manager.get().logger.info('start database service at {}'.format(self.addr))\n \n def run(self):\n self.app.run(host=self.host, port=self.port, debug=True)\n\n", "repo_name": "janelia-flyem/neutuse", "sub_path": "neutuse/database/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "manager.Manager.set", "line_number": 37, "usage_type": "call"}, {"api_name": "manager.Manager", "line_number": 37, "usage_type": "name"}, {"api_name": "manager.Manager.get", "line_number": 38, "usage_type": "call"}, {"api_name": "manager.Manager", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "manager.Manager.get", "line_number": 48, "usage_type": "call"}, {"api_name": "manager.Manager", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "17801555022", "text": "#\n# classify iris_data from 2 different features with different Algorithms.\n# Logistic Regression\n# Random Forest\n# SVM.SVC\n# EnsembleVoteClassifier\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport itertools\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom mlxtend.classifier import EnsembleVoteClassifier\nfrom mlxtend.data import iris_data\nfrom mlxtend.plotting import plot_decision_regions\n\n# Initializing Classifiers\nclf1 = LogisticRegression(random_state=0)\nclf2 = RandomForestClassifier(random_state=0)\nclf3 = SVC(random_state=0, probability=True)\neclf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3],\n weights=[2, 1, 1], voting='soft')\n\n# Loading some example data\nX, y = iris_data()\n#X = X[:,[0, 2]] # Select 2 features, since visulization is 2D which just require 2 axes\nXX = [X[:,[i,j]] for i in range(4) for j in range(i+1,4)] # combinations of X[:0:4]\n\n# Plotting Decision Regions\n\ngs = gridspec.GridSpec(2, 2)\nfig = plt.figure(figsize=(10, 8))\n\nclfs = [clf1, clf2, clf3, eclf]\nlabels = ['Logistic Regression',\n 'Random Forest',\n 'RBF kernel SVM',\n 'Ensemble']\n\nfor row,X in enumerate(XX):\n for clf, lab, grd in zip(clfs,\n labels,\n itertools.product([0, 1],\n repeat=2)):\n clf.fit(X, y)\n ax = plt.subplot(gs[grd[0], grd[1]])\n fig = plot_decision_regions(X=X, y=y,\n clf=clf, legend=2)\n plt.title(lab) \n plt.show()\n", "repo_name": "Paul0M/Machine-Learning", "sub_path": "Classifier/Diff_Algorithm.py", "file_name": "Diff_Algorithm.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 23, "usage_type": "call"}, {"api_name": "mlxtend.classifier.EnsembleVoteClassifier", "line_number": 24, "usage_type": "call"}, {"api_name": "mlxtend.data.iris_data", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "mlxtend.plotting.plot_decision_regions", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "21044756387", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 23 13:51:41 2021\r\n\r\n@author: Adi Kurhade\r\n\"\"\"\r\n\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndef shorten_categories(categories, cutoff):\r\n categorical_map = {}\r\n for i in range(len(categories)):\r\n if categories.values[i] >= cutoff:\r\n categorical_map[categories.index[i]] = categories.index[i]\r\n else:\r\n categorical_map[categories.index[i]] = \"others\"\r\n return categorical_map\r\n\r\n\r\ndef clean_experience(years):\r\n if years == \"Less than 1 year\":\r\n return 0.5\r\n if years == \"More than 50 years\":\r\n return 50\r\n return float(years)\r\n\r\n\r\ndef clean_education(x):\r\n if 'Bachelor’s degree' in x:\r\n return 'Bachelor’s degree'\r\n if 'Master’s degree' in x:\r\n return 'Master’s degree'\r\n if 'Professional degree' in x or 'Other doctoral' in x:\r\n return 'Post grad'\r\n return 'Less than a Bachelors'\r\n\r\n@st.cache\r\ndef load_data():\r\n df = pd.read_csv(\"survey_results_public.csv\")\r\n df = df[[\"Country\",\r\n \"EdLevel\",\r\n \"YearsCodePro\",\r\n \"Employment\",\r\n \"ConvertedCompYearly\"\r\n ]]\r\n df = df.rename({\"ConvertedCompYearly\":\"Salary\"}, axis=1)\r\n df = df[df.Salary.notnull()]\r\n df = df.dropna()\r\n df1 = df[df[\"Employment\"]== \"Employed full-time\"]\r\n df1.drop(['Employment'],axis=1, inplace=True)\r\n \r\n country_map = shorten_categories(df1.Country.value_counts(), 400)\r\n df1['Country'] = df1['Country'].map(country_map)\r\n df1 = df1[df1[\"Salary\"] <= 250000]\r\n df1 = df1[df1[\"Salary\"] >= 10000]\r\n df1 = df1[df1['Country'] != 'Other']\r\n \r\n df1['YearsCodePro'] = df1['YearsCodePro'].apply(clean_experience)\r\n df1['Education'] = df1['EdLevel'].apply(clean_education)\r\n df1.drop(\"EdLevel\",axis =1, inplace=True)\r\n return df1\r\n\r\ndf = load_data()\r\n\r\ndef show_explore_page():\r\n st.title(\"Explore Software Engineer Salary\")\r\n st.write(\"\"\"\r\n ### Stack Overflow Developer Survey 2021\r\n \"\"\"\r\n )\r\n data = df['Country'].value_counts()\r\n\r\n fig1, ax1 = plt.subplots()\r\n ax1.pie(data, labels = data.index, autopct=\"%1.1f%%\",shadow = True,\r\n startangle =90)\r\n ax1.axis(\"equal\")\r\n\r\n st.write(\"\"\"### Number of Data from different Countries\"\"\")\r\n st.pyplot(fig1)\r\n\r\n st.write(\r\n \"\"\"\r\n ### Mean Salary based on country\r\n \"\"\"\r\n )\r\n\r\n data = df.groupby([\"Country\"])[\"Salary\"].mean().sort_values(ascending=True)\r\n st.bar_chart(data)\r\n \r\n st.write(\"\"\"\r\n ### Mean salary based on Experience\r\n \"\"\")\r\n data = df.groupby([\"YearsCodePro\"])[\"Salary\"].mean().sort_values(ascending=True)\r\n st.line_chart(data)\r\n \r\n \r\n \r\n\r\n", "repo_name": "Adityakurhade/stackoverflow_data_salary_prediction", "sub_path": "explore_page.py", "file_name": "explore_page.py", "file_ext": "py", "file_size_in_byte": 2777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 39, "usage_type": "attribute"}, {"api_name": "streamlit.title", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "streamlit.write", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 81, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 83, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 90, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 92, "usage_type": "call"}, {"api_name": "streamlit.line_chart", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "640506339", "text": "#!/usr/bin/env python3.7\n__author__ = \"Emiliano Sauvisky\"\n__version__ = \"1.0\"\n__license__ = \"MIT\"\n\nimport argparse\nimport asyncio\nimport random\nimport re\nimport scipy.stats\nimport time\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n\n\nfrom aiorun import run\nfrom logzero import logger\nfrom pyautogui import hotkey, press, typewrite\nfrom num2words import num2words\n\nRE_INTEGER_RANGE = r'[0-9]+\\.\\.\\.[0-9]+'\n\n\ndef trunc_gauss_int(bottom, top):\n '''\n Generates a random integer following a personal gaussian distribution,\n using the mean of both values as mu, and 0.7 as the standard deviation\n whilst truncating values outside the range.\n '''\n lower = -1\n upper = 1\n mu = 0 # 422516lu\n sigma = 0.7\n\n total_possibilities = abs(top - bottom) / 2\n median_value = (bottom + top) / 2\n\n return int(median_value + (total_possibilities * scipy.stats.truncnorm.rvs((lower-mu)/sigma,(upper-mu)/sigma,loc=mu,scale=sigma,size=1)))\n\nclass AnswerWasAlreadyUsed(Exception):\n pass\n\nclass Main(object):\n def __init__(self, args):\n self.items = args.items\n self.ignore_spaces = args.ignore_spaces\n self.time_interval = float(args.time)\n self.write_for_extense = args.write_for_extense\n self.leading_zeroes = args.leading_zeroes\n self.diminishing_time = args.diminishing_time\n self.previous_checks = []\n self.start_time = time.time() - self.time_interval\n\n self.words = []\n with open(args.file, 'r') as file:\n for line in file:\n if not line.startswith('#') or not line.strip():\n self.words.append(line.strip('\\n '))\n\n def generate_random_word(self):\n return random.choice(self.words).casefold()\n\n def generate_random_number(self, full_range):\n try:\n first_number, last_number = str(full_range).split('...', 1)\n except:\n logger.error('I cannot understand what you mean with: ' + str(full_range))\n quit()\n else:\n first_number = min(int(first_number), int(last_number))\n last_number = max(int(first_number), int(last_number))\n\n if self.write_for_extense:\n return str(num2words(trunc_gauss_int(first_number, last_number))).replace('-', ' ').replace(',', '')\n if self.leading_zeroes:\n integer = trunc_gauss_int(first_number, last_number)\n ret_len = max(len(str(first_number)), len(str(last_number)))\n return str(\"{:0{precision}d}\".format(integer, precision=ret_len))\n else:\n return str(trunc_gauss_int(first_number, last_number))\n\n async def compile_new_answer(self):\n while True:\n answer_items = []\n for item in self.items:\n if re.match(RE_INTEGER_RANGE, item):\n answer_items.append(self.generate_random_number(item))\n elif item == '...':\n answer_items.append(self.generate_random_word())\n else:\n answer_items.append(item)\n\n if self.ignore_spaces:\n answer = ''.join(answer_items)\n else:\n answer = ' '.join(answer_items)\n\n # Bails out if answer was already used and it's on memory (so we don't have to load the file again)\n if answer in self.previous_checks:\n logger.error('Answer {} was already used before (memory)'.format(answer))\n continue\n\n # Otherwise load messages.log and check if anyone has sent an identical message before.\n # If so, bail out.\n with open('messages.log', 'r') as file:\n try:\n for line in file:\n if answer.casefold() == line.casefold().strip():\n self.previous_checks.append(answer)\n logger.error('Answer {} was already used before (file)'.format(answer))\n raise Exception\n # elif answer.casefold() in line.casefold():\n # self.previous_checks.append(answer)\n # logger.error('Answer {} was already used before, but partially (file)'.format(answer))\n # # print(answer, line)\n # # print(answer.casefold(), line.casefold())\n # raise Exception\n except:\n continue\n\n logger.warning('Generated answer: ' + answer)\n self.previous_checks.append(answer)\n return answer\n\n async def send_answer(self, answer):\n hotkey('ctrl', 'a')\n typewrite(answer)\n hotkey('Enter')\n\n async def start(self):\n # while True:\n while True:\n # print(time.time() - self.start_time)\n if time.time() - self.start_time >= self.time_interval:\n # Get a brand new answer that wasn't used before\n self.start_time = time.time()\n answer = await self.compile_new_answer()\n await self.send_answer(answer)\n if self.diminishing_time and self.time_interval > 1.5:\n self.time_interval = self.time_interval * 0.95\n\n\nif __name__ == '__main__':\n def cmdline_args():\n p = argparse.ArgumentParser()\n\n p.add_argument('-f', '--file', help='File from where to pick answers.', default='answers.txt')\n p.add_argument('-x', '--write-for-extense', help='Write numbers for extense.', action='store_true')\n p.add_argument('-z', '--leading-zeroes', help='Keep leading zeroes', action='store_true')\n p.add_argument('-n', '--ignore-spaces', help='Ignore spaces between items (i.e.: \"100...300 5\" would output numbers from 1000 to 3000 ending in 5.', action='store_true')\n p.add_argument('-d', '--diminishing-time', help='Diminish [default = until 1.5s]', action='store_true')\n p.add_argument('-t', '--time', help='Minimum time to wait between each message (in seconds). Accepts floats. [default = 2]', default=2)\n p.add_argument('items', nargs='+', help='List of items to input. Use three dots (...) as a placeholder for a randomly chosen item.'\n + ' If one item is of the format of int...int (e.g.: 1000...3000), then the script will fetch a'\n + ' random integer between the two adjacent numbers.')\n\n res = p.parse_args()\n return res\n\n args = cmdline_args()\n asyncio.run(Main(args).start())\n", "repo_name": "esauvisky/GiveawayPwner", "sub_path": "giveaway.py", "file_name": "giveaway.py", "file_ext": "py", "file_size_in_byte": 6580, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.stats.stats.truncnorm.rvs", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats.stats", "line_number": 39, "usage_type": "attribute"}, {"api_name": "scipy.stats", "line_number": 39, "usage_type": "name"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 62, "usage_type": "call"}, {"api_name": "logzero.logger.error", "line_number": 68, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 68, "usage_type": "name"}, {"api_name": "num2words.num2words", "line_number": 75, "usage_type": "call"}, {"api_name": "re.match", "line_number": 87, "usage_type": "call"}, {"api_name": "logzero.logger.error", "line_number": 101, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 101, "usage_type": "name"}, {"api_name": "logzero.logger.error", "line_number": 111, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 111, "usage_type": "name"}, {"api_name": "logzero.logger.warning", "line_number": 122, "usage_type": "call"}, {"api_name": "logzero.logger", "line_number": 122, "usage_type": "name"}, {"api_name": "pyautogui.hotkey", "line_number": 127, "usage_type": "call"}, {"api_name": "pyautogui.typewrite", "line_number": 128, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 129, "usage_type": "call"}, {"api_name": "time.time", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 137, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 146, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "31912904971", "text": "from numpy import ones_like\nfrom numpy.random import seed, normal\nfrom scipy.stats import linregress\nfrom sklearn.metrics import r2_score\nfrom matplotlib import pyplot as plt\n\n\nSTRIDE = 4\n\n\ndef data():\n # creates the data set\n seed(0x2AFFFFFF)\n x = normal(3, 1, 256)\n y = normal(150, 40, 256) / x\n X = 1/x\n return (X, y)\n\n\ndef test():\n # creates the testing set\n X, y = data()\n test_X = X[::STRIDE]\n test_y = y[::STRIDE]\n return (test_X, test_y)\n\n\ndef train():\n # creates the training set\n X, y = data()\n\n mask = ones_like(X)\n mask = (mask == 1)\n mask[::STRIDE] = False\n\n train_X = X[mask]\n train_y = y[mask]\n return (train_X, train_y)\n\n\ndef linearFit(X, y):\n slope, intercept, r, p, stderr = linregress(X, y)\n return (slope, intercept, r, p, stderr)\n\n\ndef linearModel(X, y):\n slope, intercept, r, p, stderr = linearFit(X, y)\n return (lambda x: slope * x + intercept)\n\n\n\nX, y = data()\ntrain_X, train_y = train()\ntest_X, test_y = test()\n\n\nplt.close('all')\nplt.ion()\nfig, ax = plt.subplots()\nax.scatter(X, y)\nax.set_title('original set')\n\n\nfig, ax = plt.subplots()\nax.scatter(test_X, test_y)\nax.set_title('testing set')\n\n\nfig, ax = plt.subplots()\nax.scatter(train_X, train_y)\nax.set_title('training set')\n\n\n# determines the usefulness of the linear model for predicting future values\nslope, intercept, r, p, stderr = linearFit(train_X, train_y)\n\nmodel = linearModel(train_X, train_y)\nr2 = r2_score(train_y, model(train_X))\n\n# expected output R**2 ~ 0.8 (good enough)\nout = (\n f'Training Set:\\n'\n f'R**2: {r**2} from the linear regression\\n'\n f'R**2: {r**2} by the r2-score of sklean\\n'\n)\n\nprint(out)\n\n\n# determines the usefulness of the linear model for predicting future values\nslope, intercept, r, p, stderr = linearFit(test_X, test_y)\n\nmodel = linearModel(test_X, test_y)\nr2 = r2_score(test_y, model(test_X))\n\n# expected output R**2 ~ 0.8 (good enough)\nout = (\n f'Testing Set:\\n'\n f'R**2: {r**2} from the linear regression\\n'\n f'R**2: {r**2} by the r2-score of sklean\\n'\n)\n\nprint(out)\n", "repo_name": "misael-diaz/machine-learning", "sub_path": "training-and-testing/train-test.py", "file_name": "train-test.py", "file_ext": "py", "file_size_in_byte": 2073, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 32, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 78, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "566308160", "text": "# coding: utf-8\n\n__author__ = \"Korantin\"\n\n\nimport ply.lex as lex\n\ntokens = (\n 'UNSPEC',\n 'AFFIX',\n 'CLITIC',\n 'REDUPL',\n 'GINFIX',\n 'DINFIX',\n 'CIRCONF1',\n 'ABLAUT',\n 'POLYSE',\n 'GMORPH0',\n 'DMORPH0',\n 'PORTMA',\n 'GINHER',\n 'DINHER',\n 'PHRASE',\n 'TRAIT',\n 'LEMME',\n\t'CIRCONF2'\n)\n\n\nt_UNSPEC = r'\\:'\nt_AFFIX = r'\\-'\nt_CLITIC = r'\\='\nt_REDUPL = r'\\~'\nt_GINFIX = r'\\<'\nt_DINFIX = r'\\>'\nt_CIRCONF1 = r'[0-9]'\nt_ABLAUT = r'\\\\'\nt_POLYSE = r'\\/'\nt_GMORPH0 = r'\\['\nt_DMORPH0 = r'\\]'\nt_PORTMA = r'\\.'\nt_GINHER = r'\\('\nt_DINHER = r'\\)'\nt_PHRASE = r'\\_'\nt_CIRCONF2 = r'\\+'\n\ndef t_LEMME(t):\n r'[a-zéàè][a-zéàè]*'\n return t\n\ndef t_TRAIT(t):\n r'[A-Z1-9][A-Z]*'\n return t\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\nt_ignore = ' \\t'\n\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\nlex.lex(optimize=1,lextab=\"gloseur\")\n\n# data = 'pomme(M)-P'\n#\n# lexer.input(data)\n#\n#\n# while True:\n# tok = lexer.token()\n# if not tok:\n# break # No more input\n# print(tok)", "repo_name": "Krolov18/Languages", "sub_path": "Gloses/MoodleProject/Gloseur/GloseurLexer.py", "file_name": "GloseurLexer.py", "file_ext": "py", "file_size_in_byte": 1136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ply.lex.lex", "line_number": 65, "usage_type": "call"}, {"api_name": "ply.lex", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "42097653849", "text": "from interaction.protocols.baseprotocol import BaseProtocol\nfrom datetime import datetime\nimport struct\n\n\nclass Modbus(BaseProtocol):\n\n def form_request(self):\n addr = self.dev.addr\n self.mess += addr.to_bytes(1, 'big')\n func = self.qry.func\n self.mess += bytes.fromhex(func)\n req = self.qry.req\n self.mess += bytes.fromhex(req)\n self.mess += self.calc_crc(self.mess)\n\n def response_processing(self):\n if self.check_response():\n func = getattr(self, 'resp_' + self.qry.handler, None)\n if func is not None:\n func()\n else:\n self.db_logger.warning(f'Method resp_{self.qry.handler} for class {self.__class__.__name__} not found')\n else:\n self.db_logger.error(f'Device {self.dev.id} request {self.qry.id}. Response error')\n\n def calc_crc(self, data: bytes) -> bytes:\n crc = 0xFFFF\n poly = 0xA001\n for byte in data:\n crc ^= byte\n for _ in range(8):\n temp = crc & 0x0001\n crc >>= 1\n if temp:\n crc ^= poly\n\n crc16modbus = crc.to_bytes(2, 'little')\n return crc16modbus\n\n def check_response(self) -> bool:\n check = False\n lenresp = len(self.resp)\n if lenresp > 2:\n check = self.calc_crc(self.resp[:lenresp-2]) == self.resp[-2:]\n return check\n\n def rounding(self, factor):\n return len(str(factor).split('.')[1]) if (factor - int(factor)) != 0 else 0\n\n def resp_uint4(self):\n res = self.resp[3:3 + self.resp[2]]\n value = struct.unpack('>I', res[2:] + res[:2])[0]\n if self.qry.factor is not None and self.qry.factor != 1.0:\n value *= self.qry.factor\n value = round(value, self.rounding(self.qry.factor))\n self.tosave = str(value)\n\n def resp_sint2(self):\n res = self.resp[3:3 + self.resp[2]]\n value = struct.unpack('>h', res)[0]\n if self.qry.factor is not None and self.qry.factor != 1.0:\n value *= self.qry.factor\n value = round(value, self.rounding(self.qry.factor))\n self.tosave = str(value)\n\n def resp_datetime4(self):\n res = self.resp[3:3 + self.resp[2]]\n value = struct.unpack('>I', res[2:] + res[:2])[0]\n self.tosave = datetime.utcfromtimestamp(value).strftime('%d.%m.%Y %H:%M:%S')\n", "repo_name": "KonstantinQQ/metersmonitor", "sub_path": "interaction/protocols/modbus.py", "file_name": "modbus.py", "file_ext": "py", "file_size_in_byte": 2424, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "interaction.protocols.baseprotocol.BaseProtocol", "line_number": 6, "usage_type": "name"}, {"api_name": "struct.unpack", "line_number": 53, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 61, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "33033330500", "text": "from flask import Flask\nfrom flask_cors import CORS\nfrom flask_restful import reqparse, abort, Api, Resource\nimport datetime as dt\n\nfrom pymongo import MongoClient\nimport pymongo\nimport pandas as pd\nimport unicodedata\n\nMongoIPClient = '34.238.43.143'\nPort = 27025\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\n# des listes permettant de tester la coherences des donnees en entree\nCountry_list = ['AF', 'AL', 'DZ', 'AS', 'AD', 'AO', 'AI', 'AQ', 'AG', 'AR', 'AM', 'AW', 'AU', 'AT', 'AZ', 'BS', 'BH', 'BD', 'BB', 'BY',\n 'BE', 'BZ', 'BJ', 'BM', 'BT', 'BO', 'BQ', 'BA', 'BW', 'BV', 'BR', 'IO', 'BN', 'BG', 'BF', 'BI', 'KH', 'CM', 'CA', 'CV',\n 'KY', 'CF', 'TD', 'CL', 'CN', 'CX', 'CC', 'CO', 'KM', 'CG', 'CD', 'CK', 'CR', 'HR', 'CU', 'CW', 'CY', 'CZ', 'CI', 'DK',\n 'DJ', 'DM', 'DO', 'EC', 'EG', 'SV', 'GQ', 'ER', 'EE', 'ET', 'FK', 'FO', 'FJ', 'FI', 'FR', 'GF', 'PF', 'TF', 'GA', 'GM',\n 'GE', 'DE', 'GH', 'GI', 'GR', 'GL', 'GD', 'GP', 'GU', 'GT', 'GG', 'GN', 'GW', 'GY', 'HT', 'HM', 'VA', 'HN', 'HK', 'HU', \n 'IS', 'IN', 'ID', 'IR', 'IQ', 'IE', 'IM', 'IL', 'IT', 'JM', 'JP', 'JE', 'JO', 'KZ', 'KE', 'KI', 'KP', 'KR', 'KW', 'KG', \n 'LA', 'LV', 'LB', 'LS', 'LR', 'LY', 'LI', 'LT', 'LU', 'MO', 'MK', 'MG', 'MW', 'MY', 'MV', 'ML', 'MT', 'MH', 'MQ', 'MR', \n 'MU', 'YT', 'MX', 'FM', 'MD', 'MC', 'MN', 'ME', 'MS', 'MA', 'MZ', 'MM', 'NR', 'NP', 'NL', 'NC', 'NZ', 'NI', 'NE', 'NG', \n 'NU', 'NF', 'MP', 'NO', 'OM', 'PK', 'PW', 'PS', 'PA', 'PG', 'PY', 'PE', 'PH', 'PN', 'PL', 'PT', 'PR', 'QA', 'RO', 'RU', \n 'RW', 'RE', 'BL', 'SH', 'KN', 'LC', 'MF', 'PM', 'VC', 'WS', 'SM', 'ST', 'SA', 'SN', 'RS', 'SC', 'SL', 'SG', 'SX', 'SK', \n 'SI', 'SB', 'SO', 'ZA', 'GS', 'SS', 'ES', 'LK', 'SD', 'SR', 'SJ', 'SZ', 'SE', 'CH', 'SY', 'TW', 'TJ', 'TZ', 'TH', 'TL', \n 'TG', 'TK', 'TO', 'TT', 'TN', 'TR', 'TM', 'TC', 'TV', 'UG', 'UA', 'AE', 'GB', 'US', 'UM', 'UY', 'UZ', 'VU', 'VE', 'VN', \n 'VG', 'VI', 'WF','EH','YE','ZM','ZW']\n\n\nHorizon_considered = ['201701', '201712']\n\nHorizon_considered = [dt.datetime.strptime(date, '%Y%m') for date in Horizon_considered]\n\n## foncitons permettant de retourner des messages d'erreurs dans un dictionaire sans faire crasher le front\ndef abort_if_country_doesnt_exist(country):\n if country not in Country_list:\n abort(404, message=\"Country {} doesn't exist\".format(country))\n\ndef abort_if_date_format_is_invalid(month):\n try :\n month = dt.datetime.strptime(month, '%Y%m')\n except ValueError:\n abort(404, message=\"Date_format {} or {} doesn't exist\".format(month))\n if month < Horizon_considered[0]:\n abort(404, message=\"Date {} too young\".format(month))\n if month > Horizon_considered[1]:\n abort(404, message=\"Date {} too old\".format(month))\n\n# Requête\nclass api_beahaviour(Resource):\n def get(self, country1, country2, month):\n\n ### On test les variables, init_date, end_date\n abort_if_country_doesnt_exist(country1)\n abort_if_country_doesnt_exist(country2)\n abort_if_date_format_is_invalid(month)\n\n ### Create the request with pymongo (output : les données agregées)\n def Impact(country1, country2, month):\n [imp_pos, imp_neg, mention] = [0, 0, 0]\n \n client = MongoClient(MongoIPClient, Port)\n\n # On recupere le dataset\n db = client.gdelt\n\n # On recupere les donnees de country1 sur country2 au mois month sur la collection 'events'\n table = db.events.find({'Actor1Geo_CountryCode': country1, \n 'Actor2Geo_CountryCode': country2, \n 'MonthYear': month})\n \n # On aggrège nos données\n\n for el in table:\n Gold = el['GoldsteinScale']\n NumMen = el['NumMentions']\n\n if Gold > 0:\n imp_pos = imp_pos + (Gold * NumMen)\n else:\n imp_neg = imp_neg + (abs(Gold) * NumMen)\n mention = mention + NumMen\n\n #table = pd.DataFrame(list(table))\n #imp_pos = (table['GoldsteinScale'][table['GoldsteinScale'] > 0].astype(int) * table['NumMentions'][table['GoldsteinScale'] > 0]).sum()\n #imp_neg = (table['GoldsteinScale'][table['GoldsteinScale'] < 0].astype(int) * table['NumMentions'][table['GoldsteinScale'] < 0]).sum()\n\n #mention = table['NumMentions'].sum()\n\n if mention != 0:\n imp_pos = round(imp_pos / mention, 2)\n imp_neg = round(imp_neg / mention, 2)\n\n return imp_pos, imp_neg , mention\n \n ### Add value to the request\n imp1_C1_C2_pos, imp1_C1_C2_neg, mention_C1_C2= Impact(country1, country2, int(month))\n imp1_C2_C1_pos, imp1_C2_C1_neg, mention_C2_C1 = Impact(country2, country1, int(month))\n\n ### Create the answer\n awnser = {'pays1' : country1,\n 'pays2' : country2,\n 'imp1_C1_C2_pos': imp1_C1_C2_pos,\n 'imp1_C1_C2_neg' : imp1_C1_C2_neg,\n 'mention_C1_C2' : mention_C1_C2,\n 'imp1_C2_C1_pos': imp1_C2_C1_pos,\n 'imp1_C2_C1_neg' : imp1_C2_C1_neg,\n 'mention_C2_C1' : mention_C2_C1\n }\n\n return awnser, 201\n \n## Actually setup the Api resource routing here\n\napi.add_resource(api_beahaviour, '///')\n\nif __name__ == '__main__':\n app.run(debug = True)\n", "repo_name": "qdonnars/No_SQL_GDELT_Project", "sub_path": "api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 5710, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask_restful.abort", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask_restful.abort", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_restful.abort", "line_number": 49, "usage_type": "call"}, {"api_name": "flask_restful.abort", "line_number": 51, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 54, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "35929865920", "text": "\"\"\"\nThis module scrapes the distrowatch.com website for the top linux distros.\nORIGINAL REPO: https://github.com/dani0105/free-datasets\nDATE: 2022-10-04\nREQUIREMENTS: requests, bs4, pandas, re\n\"\"\"\n\n\n# IMPORTS\n\n# Imports or providing the absolute date\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n# Imports or handling the data\nimport pandas as pd\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\n\n# Define rs and bs4 objects\nreq = requests.get(\"https://distrowatch.com/dwres.php?resource=popularity\", timeout=5)\nsoup = BeautifulSoup(req.content, \"html.parser\")\n\n# Prevents repeating this twice\nphr3 = soup.find_all(\"td\", class_=\"phr3\")\n\n# Actual data\ndistro_names = [i.text for i in soup.find_all(\"td\", class_=\"phr2\")]\nscores = [i.text for i in phr3]\nchange = [re.search(r\"/a(.*?).png\", i.findNext(\"img\")[\"src\"]).group(1) for i in phr3]\n\n# Get absolute date for each set\ntwelve = (datetime.datetime.now() - relativedelta(months=12)).strftime(\"%Y-%m-%d\")\nsix = (datetime.datetime.now() - relativedelta(months=6)).strftime(\"%Y-%m-%d\")\nthree = (datetime.datetime.now() - relativedelta(months=3)).strftime(\"%Y-%m-%d\")\none = (datetime.datetime.now() - relativedelta(months=1)).strftime(\"%Y-%m-%d\")\n\n# Create groups based on timeframe\n\ntwelve_mons = list(\n zip(\n distro_names[0:266],\n [twelve] * 266,\n [datetime.datetime.now().strftime(\"%Y-%m-%d\")] * 266,\n scores[0:266],\n change[0:266],\n )\n)\nsix_mons = list(\n zip(\n distro_names[266:532],\n [six] * 266,\n [datetime.datetime.now().strftime(\"%Y-%m-%d\")] * 266,\n scores[266:532],\n change[266:532],\n )\n)\nthree_mons = list(\n zip(\n distro_names[532:798],\n [three] * 266,\n [datetime.datetime.now().strftime(\"%Y-%m-%d\")] * 266,\n scores[532:798],\n change[532:798],\n )\n)\none_mon = list(\n zip(\n distro_names[798:1064],\n [one] * 266,\n [datetime.datetime.now().strftime(\"%Y-%m-%d\")] * 266,\n scores[798:1064],\n change[798:1064],\n )\n)\n\n# Create dataframe from groups\ndf = pd.DataFrame(\n twelve_mons + six_mons + three_mons + one_mon,\n columns=[\"distro\", \"start_date\", \"end_date\", \"score\", \"change\"],\n)\n\n# Save to csv (this will save in the same directory as where the script is run)\n\ndf.to_csv(\"LINUX_DISTROS.csv\", encoding=\"utf-8\", index=False)\n", "repo_name": "dani0105/free-datasets", "sub_path": "datasets/linux-distros/distro_scraper.py", "file_name": "distro_scraper.py", "file_ext": "py", "file_size_in_byte": 2374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "re.search", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 55, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "73339825125", "text": "\"\"\"Get most recent team data for NYUrban.\"\"\"\nfrom datetime import datetime, timezone\nfrom urllib.parse import parse_qs\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import SessionNotCreatedException, WebDriverException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as e_c\nfrom selenium.webdriver.chrome.options import Options\n\nimport os\n\nfrom config import Config\n\nDRIVER_URL = \"https://sites.google.com/chromium.org/driver/\"\n\n\nclass VbScrape:\n def __init__(self, cnf: Config):\n self.cnf = cnf\n self.url = self.cnf.cnf_value(\"scrape.url\")\n self.log_scrape = self.cnf.cnf_value(\"flag.log_scrape\")\n self.scrape_headless = self.cnf.cnf_value(\"flag.scrape_headless\")\n self.driver = None\n self.html_doc = None\n self.team_name = None\n self.games = []\n self.gym = {}\n\n def get_schedule(self) -> list:\n \"\"\"Get schedule for the current NY Urban season.\"\"\"\n try:\n options = Options()\n if self.scrape_headless:\n options.headless = True\n if self.log_scrape:\n print(\n f\"...setting up {'headless ' if self.scrape_headless else ''}driver\"\n )\n self.driver = webdriver.Chrome(\n executable_path=self.get_chromedriver_path(), options=options\n )\n try:\n self.login()\n self.get_latest_team_page()\n self.parse_team_data()\n self.parse_gym_info()\n finally:\n self.driver.close()\n self.driver = None\n except SessionNotCreatedException as e:\n print(\n f\">>>ERROR: {e.msg}Get the version matching your Chrome browser and place it in '/vendor' from:\\n{DRIVER_URL}\"\n )\n except WebDriverException as e:\n print(\n f\">>>ERROR: Get the latest 'chromedriver' binary from the link below and place it in '/vendor':\\n{DRIVER_URL}\"\n )\n except Exception as e:\n print(f\">>> Some other exception with selenium: {e}\")\n return [self.team_name, self.games, self.gym]\n\n def get_chromedriver_path(self) -> str:\n return os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"vendor\",\n self.cnf.cnf_value(\"scrape.chromedriver\"),\n )\n\n def login(self) -> None:\n \"\"\"Authenticate into NY Urban.\"\"\"\n if not self.driver:\n return None\n if self.log_scrape:\n print(f\"...logging in: {self.url}\")\n self.driver.get(self.url)\n assert \"Volleyball League\" in self.driver.title\n user = self.driver.find_element_by_id(\"username\")\n user.clear()\n user.send_keys(self.cnf.cnf_value(\"scrape.user\"))\n pwd = self.driver.find_element_by_id(\"password\")\n pwd.clear()\n pwd.send_keys(self.cnf.cnf_value(\"scrape.pwd\"))\n pwd.send_keys(Keys.RETURN)\n if self.driver.title == \"Login Problems\":\n raise ValueError(\"invalid credentials\")\n WebDriverWait(self.driver, 10).until(e_c.title_is(\"Team Listing\"))\n\n def get_latest_team_page(self) -> None:\n \"\"\"Get the page source for the most recent team.\"\"\"\n if not self.driver:\n return\n if self.log_scrape:\n print(\"...navigating to latest team details\")\n assert \"Team Listing\" in self.driver.title\n team_link = self.driver.find_element_by_xpath(\"//tbody/tr[2]/td/a\")\n team_link.click()\n WebDriverWait(self.driver, 10).until(e_c.title_is(\"Team Detail\"))\n source = self.driver.page_source\n assert \"Division:\" in source\n self.html_doc = source\n\n def parse_team_data(self) -> None:\n \"\"\"\n Parse team data into a list.\n\n Returns a list of games. Each game is a list with the following items:\n [datetime, location, opponent]\n \"\"\"\n if self.log_scrape:\n print(\"...retrieving schedule\")\n soup = BeautifulSoup(self.html_doc, \"html.parser\")\n self.team_name: str = soup.find(\"div\", class_=\"team\").h1.span.text.strip()\n team_table = soup.find(\"div\", class_=\"team_div\").div.table.tbody\n rows = team_table.findChildren(\"tr\", recursive=False)\n for row in rows[1:]:\n cols = row.findChildren(\"td\", recursive=False)\n if \"No Game This Week\" not in cols[3].text:\n self.games.append(\n [\n # TODO check for next-year rollover\n datetime.strptime(\n f\"{cols[0].text.split()[1].strip()} {datetime.now().year} {cols[2].text.strip()}PM -0500\",\n \"%m/%d %Y %I:%M%p %z\",\n ),\n cols[1].div.a.text.strip(),\n next(sub for sub in re.split(\"\\n|\\t\", cols[3].text) if sub),\n ]\n )\n\n def parse_gym_info(self) -> None:\n \"\"\"Get a dict mapping of gym code to name and address.\"\"\"\n if self.log_scrape:\n print(\"...retrieving gyms\")\n soup = BeautifulSoup(self.html_doc, \"html.parser\")\n gym_table = soup.find(\"div\", class_=\"locationcontent\").div.table.tbody\n rows = gym_table.findChildren(\"tr\", recursive=False)\n for row in rows[1:]:\n cols = row.findChildren(\"td\", recursive=False)\n gym_code = cols[0].string\n if gym_code in self.gym:\n continue\n gym_name = cols[1].contents[0].text.strip()\n try:\n address = parse_qs(cols[2].a.attrs.get(\"href\")).get(\"address\")[0]\n except Exception:\n address = None\n self.gym[gym_code] = [gym_name, address]\n\n @staticmethod\n def parse_schedule(schedule: list, upcoming_only=False) -> list:\n \"\"\"Get the schedule with gym names and locations.\"\"\"\n from_ts = datetime.now(timezone.utc) if upcoming_only else datetime(2000, 1, 1)\n games = []\n for one_game in schedule[1]:\n if one_game[0] < from_ts:\n continue\n one_game.extend(schedule[2][one_game[1]])\n games.append(one_game)\n return [schedule[0], games]\n", "repo_name": "ericgarig/vb_gcal", "sub_path": "vb_scrape.py", "file_name": "vb_scrape.py", "file_ext": "py", "file_size_in_byte": 6456, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.Config", "line_number": 22, "usage_type": "name"}, {"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 36, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.SessionNotCreatedException", "line_number": 54, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.WebDriverException", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 68, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 87, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 87, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 90, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.title_is", "line_number": 90, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 90, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 101, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.title_is", "line_number": 101, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 101, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 115, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "re.split", "line_number": 130, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 138, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 156, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "72438329444", "text": "import requests\n\nheaders = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Token 96f4a5ea160a345defdfff0320144e7c469ce70f'\n}\n\n\ndef get_meta_data(ticker):\n url = \"https://api.tiingo.com/tiingo/daily/{}\".format(ticker)\n response = requests.get(url, headers=headers)\n return response.json()\n\n\ndef get_price_data(ticker):\n url = \"https://api.tiingo.com/tiingo/daily/{}/prices\".format(ticker)\n response = requests.get(url, headers=headers)\n return response.json()[0]\n\n\ndef get_histo_price_data(ticker):\n url = \"https://api.tiingo.com/tiingo/daily/{}/prices?startDate=2012-1-1&endDate=2022-3-3\".format(ticker)\n response = requests.get(url, headers=headers)\n return response.json()", "repo_name": "CHAMS1110/stockapp", "sub_path": "apps/home/tiingo.py", "file_name": "tiingo.py", "file_ext": "py", "file_size_in_byte": 717, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "30390214415", "text": "from PIL import Image, ImageEnhance\nimport math\nfrom skimage import data, filters, io\nfrom matplotlib import pyplot as plt\nimport os\nimport cv2\nimport numpy as np\nimport imutils\n\nclass ImageExample(object):\n def __init__(self, fileName, decision=\"\"):\n self.decision = decision\n self.whitePixels = 0.0\n self.grayPixels = 0.0\n self.blackPixels = 0.0\n self.greenPixels = 0.0\n self.bluePixels = 0.0\n self.redPixels = 0.0\n self.yellowPixels = 0.0\n self.darkGreenPixels = 0.0\n self.darkBrownPixels = 0.0\n self.magentaPixels = 0.0\n self.vividPixels = 0.0\n self.pixels = 0.0\n self.colors = [(0, 0, 0), (255, 255, 255), (191, 195, 201), (0, 0, 255), (255, 0, 0), (255, 255, 0), (255, 0, 255), (0, 115, 0), (150, 60, 0)]\n\n stri = os.getcwd()\n self.binaryImage = cv2.imread(stri + fileName, cv2.IMREAD_GRAYSCALE)\n self.transformImage()\n self.setHuMoments()\n self.colorImage = Image.open(fileName)\n self.colorImage = ImageEnhance.Brightness(self.colorImage).enhance(1.4)\n self.colorImage = ImageEnhance.Contrast(self.colorImage).enhance(1.3)\n self.changeImageColors()\n self.countPixels()\n\n def findNearestColor(self, pixelColor):\n closest_colors = sorted(self.colors, key=lambda color: self.getDistance(color, pixelColor))\n return closest_colors[0]\n \n def changeImageColors(self):\n pixels = self.colorImage.load()\n for i in range(self.colorImage.size[0]):\n for j in range (self.colorImage.size[1]):\n pixels[i,j] = self.findNearestColor(pixels[i,j])\n\n def countPixels(self):\n pixels = self.colorImage.load()\n for i in range(self.colorImage.size[0]):\n for j in range (self.colorImage.size[1]):\n if pixels[i,j] == (0, 0, 0):\n self.blackPixels+=1\n elif pixels[i,j] == (255, 255, 255):\n self.whitePixels+=1\n elif pixels[i,j] == (191, 195, 201):\n self.grayPixels+=1\n elif pixels[i,j] == (0, 0, 255):\n self.bluePixels+=1\n elif pixels[i,j] == (255, 0, 0):\n self.redPixels+=1\n elif pixels[i,j] == (255, 255, 0):\n self.yellowPixels+=1\n elif pixels[i,j] == (255, 0, 255):\n self.magentaPixels+=1\n elif pixels[i,j] == (0, 115, 0):\n self.darkGreenPixels+=1\n elif pixels[i,j] == (150, 60, 0):\n self.darkBrownPixels+=1\n self.pixels+=1\n self.blackPixels = round((self.blackPixels / self.pixels),1)\n self.grayPixels = round((self.grayPixels / self.pixels),1)\n self.whitePixels = round((self.whitePixels / self.pixels),1)\n self.darkBrownPixels = round((self.darkBrownPixels / self.pixels),1)\n self.darkGreenPixels = round((self.darkGreenPixels / self.pixels),1)\n self.vividPixels = round((self.magentaPixels+self.bluePixels+self.yellowPixels+self.redPixels)/self.pixels,1)\n \n def getDistance(self, c1, c2):\n (r1,g1,b1) = c1\n (r2,g2,b2) = c2\n return math.sqrt((r1 - r2)**2 + (g1 - g2) ** 2 + (b1 - b2) **2)\n\n def getString(self):\n exst = \"\"\n exst += \"black:\" + str(self.blackPixels)+\";\"\n exst += \"gray:\" + str(self.grayPixels)+\";\"\n exst += \"white:\" + str(self.whitePixels)+\";\"\n exst += \"vivid:\" + str(self.vividPixels)+\";\"\n exst += \"darkBrown:\" + str(self.darkBrownPixels)+\";\"\n exst += \"darkGreen:\" + str(self.darkGreenPixels)+\";\"\n exst += \"h1:\" + str(self.huMoments[0]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h2:\" + str(self.huMoments[1]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h3:\" + str(self.huMoments[2]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h4:\" + str(self.huMoments[3]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h5:\" + str(self.huMoments[4]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h6:\" + str(self.huMoments[5]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"h7:\" + str(self.huMoments[6]).replace(\"[\",\"\").replace(\"]\",\"\").replace(\".\",\"\")+\";\"\n exst += \"decision:\" + self.decision\n return exst\n\n def transformImage(self):\n _, self.binaryImage = cv2.threshold(self.binaryImage, 128, 255, cv2.THRESH_BINARY)\n\n def setHuMoments(self):\n moments = cv2.moments(self.binaryImage)\n self.huMoments = cv2.HuMoments(moments)\n for i in range(0,7):\n if self.huMoments[i]!=0:\n self.huMoments[i] = abs(round(-1* math.copysign(1.0, self.huMoments[i]) * math.log10(abs(self.huMoments[i]))))\n if self.huMoments[0]<3:\n self.huMoments[0]=2\n if self.huMoments[0]>3:\n self.huMoments[0]=4\n if self.huMoments[1]<7:\n self.huMoments[1]=6\n if self.huMoments[1]>8:\n self.huMoments[1]=9\n if self.huMoments[2]<10:\n self.huMoments[2]=9\n if self.huMoments[2]>19:\n self.huMoments[2]=20\n if self.huMoments[3]<10:\n self.huMoments[3]=9\n if self.huMoments[3]>15:\n self.huMoments[3]=16\n if self.huMoments[4]<21:\n self.huMoments[4]=20\n if self.huMoments[4]>31:\n self.huMoments[4]=32\n if self.huMoments[5]<14:\n self.huMoments[5]=13\n if self.huMoments[5]>19:\n self.huMoments[5]=20\n if self.huMoments[6]<21:\n self.huMoments[6]=20\n if self.huMoments[6]>31:\n self.huMoments[6]=32\n", "repo_name": "Koanne/SZI", "sub_path": "ImageExample.py", "file_name": "ImageExample.py", "file_ext": "py", "file_size_in_byte": 5807, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.getcwd", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 31, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 31, "usage_type": "name"}, {"api_name": "PIL.ImageEnhance.Brightness", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 32, "usage_type": "name"}, {"api_name": "PIL.ImageEnhance.Contrast", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.ImageEnhance", "line_number": 33, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 101, "usage_type": "attribute"}, {"api_name": "cv2.moments", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.HuMoments", "line_number": 105, "usage_type": "call"}, {"api_name": "math.copysign", "line_number": 108, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "24786266446", "text": "from __future__ import annotations\n\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport pytest\n\nimport ibis\nimport ibis.expr.datatypes as dt\n\ndd = pytest.importorskip(\"dask.dataframe\")\n\nfrom dask.dataframe.utils import tm # noqa: E402\n\nfrom ibis.backends.dask.execution import execute # noqa: E402\n\n\n@pytest.fixture(scope=\"module\")\ndef value():\n return OrderedDict([(\"fruit\", \"pear\"), (\"weight\", 0)])\n\n\n@pytest.fixture(scope=\"module\")\ndef struct_client(value, npartitions):\n df = dd.from_pandas(\n pd.DataFrame(\n {\n \"s\": [\n OrderedDict([(\"fruit\", \"apple\"), (\"weight\", None)]),\n value,\n OrderedDict([(\"fruit\", \"pear\"), (\"weight\", 1)]),\n ],\n \"key\": list(\"aab\"),\n \"value\": [1, 2, 3],\n }\n ),\n npartitions=npartitions,\n )\n return ibis.dask.connect({\"t\": df})\n\n\n@pytest.fixture\ndef struct_table(struct_client):\n return struct_client.table(\n \"t\",\n schema={\n \"s\": dt.Struct.from_tuples([(\"fruit\", dt.string), (\"weight\", dt.int8)])\n },\n )\n\n\ndef test_struct_field_literal(value):\n struct = ibis.literal(value)\n assert struct.type() == dt.Struct.from_tuples(\n [(\"fruit\", dt.string), (\"weight\", dt.int8)]\n )\n\n expr = struct[\"fruit\"]\n result = execute(expr.op())\n assert result == \"pear\"\n\n expr = struct[\"weight\"]\n result = execute(expr.op())\n assert result == 0\n\n\ndef test_struct_field_series(struct_table):\n t = struct_table\n expr = t.s[\"fruit\"]\n result = expr.compile()\n expected = dd.from_pandas(\n pd.Series([\"apple\", \"pear\", \"pear\"], name=\"fruit\"),\n npartitions=1,\n )\n tm.assert_series_equal(result.compute(), expected.compute(), check_index=False)\n\n\ndef test_struct_field_series_group_by_key(struct_table):\n t = struct_table\n expr = t.group_by(t.s[\"fruit\"]).aggregate(total=t.value.sum())\n result = expr.compile()\n expected = dd.from_pandas(\n pd.DataFrame([(\"apple\", 1), (\"pear\", 5)], columns=[\"fruit\", \"total\"]),\n npartitions=1,\n )\n tm.assert_frame_equal(\n result.compute().reset_index(drop=True),\n expected.compute().reset_index(drop=True),\n )\n\n\ndef test_struct_field_series_group_by_value(struct_table):\n t = struct_table\n expr = t.group_by(t.key).aggregate(total=t.s[\"weight\"].sum())\n result = expr.compile()\n # these are floats because we have a NULL value in the input data\n expected = dd.from_pandas(\n pd.DataFrame([(\"a\", 0.0), (\"b\", 1.0)], columns=[\"key\", \"total\"]),\n npartitions=1,\n )\n tm.assert_frame_equal(\n result.compute().reset_index(drop=True),\n expected.compute().reset_index(drop=True),\n )\n", "repo_name": "ibis-project/ibis", "sub_path": "ibis/backends/dask/tests/execution/test_structs.py", "file_name": "test_structs.py", "file_ext": "py", "file_size_in_byte": 2790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3246, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytest.importorskip", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 20, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 31, "usage_type": "call"}, {"api_name": "ibis.dask.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "ibis.dask", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 23, "usage_type": "call"}, {"api_name": "ibis.expr.datatypes.Struct.from_tuples", "line_number": 47, "usage_type": "call"}, {"api_name": "ibis.expr.datatypes.Struct", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ibis.expr.datatypes", "line_number": 47, "usage_type": "name"}, {"api_name": "ibis.expr.datatypes.string", "line_number": 47, "usage_type": "attribute"}, {"api_name": "ibis.expr.datatypes.int8", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 42, "usage_type": "attribute"}, {"api_name": "ibis.literal", "line_number": 53, "usage_type": "call"}, {"api_name": "ibis.expr.datatypes.Struct.from_tuples", "line_number": 54, "usage_type": "call"}, {"api_name": "ibis.expr.datatypes.Struct", "line_number": 54, "usage_type": "attribute"}, {"api_name": "ibis.expr.datatypes", "line_number": 54, "usage_type": "name"}, {"api_name": "ibis.expr.datatypes.string", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ibis.expr.datatypes", "line_number": 55, "usage_type": "name"}, {"api_name": "ibis.expr.datatypes.int8", "line_number": 55, "usage_type": "attribute"}, {"api_name": "ibis.backends.dask.execution.execute", "line_number": 59, "usage_type": "call"}, {"api_name": "ibis.backends.dask.execution.execute", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 72, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm.assert_series_equal", "line_number": 75, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm", "line_number": 75, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm.assert_frame_equal", "line_number": 86, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm", "line_number": 86, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 98, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm.assert_frame_equal", "line_number": 101, "usage_type": "call"}, {"api_name": "dask.dataframe.utils.tm", "line_number": 101, "usage_type": "name"}]} +{"seq_id": "24947176960", "text": "\"\"\"\nDetails the various flask endpoints for processing and retrieving\ncommand details as well as a swagger spec endpoint\n\"\"\"\n\nfrom multiprocessing import Process, Queue\nimport sys\nfrom flask import Flask, request, jsonify\nfrom flask_swagger import swagger\nimport json\nfrom db import session, engine\nfrom base import Base, Command\nfrom command_parser import get_valid_commands, process_command_output\nimport os\napp = Flask(__name__)\n\n\n@app.route('/commands', methods=['GET'])\ndef get_command_output():\n \"\"\"\n Returns as json the command details that have been processed\n ---\n tags: [commands]\n responses:\n 200:\n description: Commands returned OK\n 400:\n description: Commands not found\n \"\"\"\n commands = session.query(Command)\n result=session.execute(commands)\n json_data=[]\n for r in result:\n json_data.append({\n 'id' : r[0],\n 'command_string' : r[1],\n 'length' : r[2],\n 'duration' : r[3],\n 'output' : r[4].decode()\n })\n if not json_data:\n return \"Commands not found\"\n json_data = json.dumps(json_data)\n return jsonify(json.loads(json_data))\n\n\n@app.route('/commands/', methods=['GET'])\ndef get_command_id(command_id):\n \"\"\"\n Returns as json the command details that have been processed\n ---\n tags: [commands]\n responses:\n 200:\n description: Commands returned OK\n 400:\n description: Commands not found\n \"\"\"\n commands = session.query(Command)\n commands=(str(commands))\n commands+=\" where commands.id={0}\".format(command_id)\n result = session.execute(commands)\n json_data = []\n for r in result:\n json_data.append({\n 'id': r[0],\n 'command_string': r[1],\n 'length': r[2],\n 'duration': r[3],\n 'output': r[4].decode()\n })\n if not json_data:\n return \"Commands not found\"\n json_data = json.dumps(json_data)\n return jsonify(json.loads(json_data))\n # json_data=json.dumps([dict(r) for r in result])\n\n\n@app.route('/commands', methods=['POST'])\ndef process_commands():\n \"\"\"\n Processes commmands from a command list\n ---\n tags: [commands]\n parameters:\n - name: filename\n in: formData\n description: filename of the commands text file to parse\n required: true\n type: string\n responses:\n 200:\n description: Processing OK\n \"\"\"\n fi = request.args.get('filename')\n print(fi)\n file_data = request.args.get('file_data')\n print(file_data)\n if file_data is not None:\n fi=\"commands_data.txt\"\n with open(fi,'w') as f:\n file_data=file_data.split(\"\\\\n\")\n print(file_data)\n for data in file_data:\n f.write(data+\"\\n\")\n if fi is None:\n return \"Processing Error\"\n queue = Queue()\n get_valid_commands(queue, fi)\n processes = [Process(target=process_command_output, args=(queue,session,))\n for num in range(3)]\n for process in processes:\n process.start()\n #for process in processes:\n # process.join()\n if file_data is not None:\n os.remove(\"commands_data.txt\")\n pass\n return 'Successfully processed commands.'\n\n\n@app.route('/database', methods=['POST'])\ndef make_db():\n \"\"\"\n Creates database schema\n ---\n tags: [db]\n responses:\n 200:\n description: DB Creation OK\n \"\"\"\n Base.metadata.create_all(engine)\n return 'Database creation successful.'\n\n\n@app.route('/database', methods=['DELETE'])\ndef drop_db():\n \"\"\"\n Drops all db tables\n ---\n tags: [db]\n responses:\n 200:\n description: DB table drop OK\n \"\"\"\n Base.metadata.drop_all(engine)\n return 'Database deletion successful.'\n\n\nif __name__ == '__main__':\n \"\"\"\n Starts up the flask server\n \"\"\"\n port = 8080\n use_reloader = True\n\n # provides some configurable options\n for arg in sys.argv[1:]:\n if '--port' in arg:\n port = int(arg.split('=')[1])\n elif '--use_reloader' in arg:\n use_reloader = arg.split('=')[1] == 'true'\n\n app.run(port=port, debug=True, use_reloader=use_reloader)\n\n\n@app.route('/spec')\ndef swagger_spec():\n \"\"\"\n Display the swagger formatted JSON API specification.\n ---\n tags: [docs]\n responses:\n 200:\n description: OK status\n \"\"\"\n spec = swagger(app)\n spec['info']['title'] = \"Nervana cloud challenge API\"\n spec['info']['description'] = (\"Nervana's cloud challenge \" +\n \"for interns and full-time hires\")\n spec['info']['license'] = {\n \"name\": \"Nervana Proprietary License\",\n \"url\": \"http://www.nervanasys.com\",\n }\n spec['info']['contact'] = {\n \"name\": \"Nervana Systems\",\n \"url\": \"http://www.nervanasys.com\",\n \"email\": \"info@nervanasys.com\",\n }\n spec['schemes'] = ['http']\n spec['tags'] = [\n {\"name\": \"db\", \"description\": \"database actions (create, delete)\"},\n {\"name\": \"commands\", \"description\": \"process and retrieve commands\"}\n ]\n return jsonify(spec)\n", "repo_name": "kasirajanss93/intel-cloud-challenge", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5176, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "db.session.query", "line_number": 30, "usage_type": "call"}, {"api_name": "base.Command", "line_number": 30, "usage_type": "argument"}, {"api_name": "db.session", "line_number": 30, "usage_type": "name"}, {"api_name": "db.session.execute", "line_number": 31, "usage_type": "call"}, {"api_name": "db.session", "line_number": 31, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "db.session.query", "line_number": 59, "usage_type": "call"}, {"api_name": "base.Command", "line_number": 59, "usage_type": "argument"}, {"api_name": "db.session", "line_number": 59, "usage_type": "name"}, {"api_name": "db.session.execute", "line_number": 62, "usage_type": "call"}, {"api_name": "db.session", "line_number": 62, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 75, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "multiprocessing.Queue", "line_number": 108, "usage_type": "call"}, {"api_name": "command_parser.get_valid_commands", "line_number": 109, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 110, "usage_type": "call"}, {"api_name": "command_parser.process_command_output", "line_number": 110, "usage_type": "name"}, {"api_name": "db.session", "line_number": 110, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 117, "usage_type": "call"}, {"api_name": "base.Base.metadata.create_all", "line_number": 132, "usage_type": "call"}, {"api_name": "db.engine", "line_number": 132, "usage_type": "argument"}, {"api_name": "base.Base.metadata", "line_number": 132, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 132, "usage_type": "name"}, {"api_name": "base.Base.metadata.drop_all", "line_number": 146, "usage_type": "call"}, {"api_name": "db.engine", "line_number": 146, "usage_type": "argument"}, {"api_name": "base.Base.metadata", "line_number": 146, "usage_type": "attribute"}, {"api_name": "base.Base", "line_number": 146, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask_swagger.swagger", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "9680100789", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom bs4 import BeautifulSoup\nfrom django.shortcuts import render\nimport re\nfrom xml.dom.minidom import parse, parseString\nfrom .models import Bsi, Bsm, Bsds, Bshf, Cci, Ccm, Ccds, Cchf, Chi, Chm, Chds, Chhf, Mi, Mm, Mds, Mhf\n#remember to import models\n\n\ndef index(request):\n \"\"\"\n View function for home page of site.\n \"\"\"\n # Render the HTML template index.html with the data in the context variable\n htmlDoc = open(\"infopage/templates/index.html\")\n soup = BeautifulSoup(htmlDoc, 'lxml')\n findBsi = soup.find(id='bsi')\n findBsm = soup.find(id='bsm')\n findBsds = soup.find(id='bsds')\n findBshf = soup.find(id='bshf')\n findCci = soup.find(id='cci')\n findCcm = soup.find(id='ccm')\n findCcds = soup.find(id='ccds')\n findCchf = soup.find(id='cchf')\n findChi = soup.find(id='chi')\n findChm = soup.find(id='chm')\n findChds = soup.find(id='chds')\n numBsi = Bsi.objects.count()\n numBsm = Bsm.objects.count()\n numBsds = Bsds.objects.count()\n numBshf = Bshf.objects.count()\n numCci = Cci.objects.count()\n numCcm = Ccm.objects.count()\n numCcds = Ccds.objects.count()\n numCchf = Cchf.objects.count()\n numChi = Chi.objects.count()\n numChm = Chm.objects.count()\n numChds = Chds.objects.count()\n print(numCci)\n\n\n if numBsi > 0 and numBsi <= 1:\n addAttBsi = findBsi['style'] = \"top: 4px; left: 50px; width: 15px; height: 15px;\"\n elif numBsi >= 2 and numBsi <=3:\n addAttBsi = findBsi['style'] = \"top: 1px; left: 46px; width: 25px; height: 25px;\"\n elif numBsi > 3:\n addAttBsi = findBsi['style'] = \"top: -3px; left: 40px; width: 35px; height: 35px;\"\n else:\n addAttBsi = findBsi['style'] = \"width: 0px; height: 0px;\"\n\n if numBsm > 0 and numBsm <= 1:\n addAttBsm = findBsm['style'] = \"top: 4px; left: 95px; width: 15px; height: 15px;\"\n elif numBsm >= 2 and numBsm <= 3:\n addAttBsm = findBsm['style'] = \"top: 1px; left: 90px; width: 25px; height: 25px;\"\n elif numBsm > 3:\n addAttBsm = findBsm['style'] = \"top: -3px; left: 85px; width: 35px; height: 35px;\"\n else:\n addAttBsm = findBsm['style'] = \"width: 0px; height: 0px;\"\n\n if numBsds > 0 and numBsds <= 1:\n addAttBsds = findBsds['style'] = \"top: 4px; left: 140px; width: 15px; height: 15px;\"\n elif numBsds >= 2 and numBsds <= 3:\n addAttBsds = findBsds['style'] = \"top: 1px; left: 135px; width: 25px; height: 25px;\"\n elif numBsds > 3:\n addAttBsds = findBsds['style'] = \"top: -3px; left: 130px; width: 35px; height: 35px;\"\n else:\n addAttBsds = findBsds['style'] = \"width: 0px; height: 0px;\"\n\n if numBshf > 0 and numBshf <= 1:\n addAttBshf = findBshf['style'] = \"top: 4px; left: 185px; width: 15px; height: 15px;\"\n elif numBshf >= 2 and numBshf <= 3:\n addAttBshf = findBshf['style'] = \"top: 1px; left: 180px; width: 25px; height: 25px;\"\n elif numBshf > 3:\n addAttBshf = findBshf['style'] = \"top: -3px; left: 175px; width: 35px; height: 35px;\"\n else:\n addAttBshf = findBshf['style'] = \"width: 0px; height: 0px;\"\n\n\n if numCci > 0 and numCci <= 1:\n addAttCci = findCci['style'] = \"top: 49px; left: 50px; width: 15px; height: 15px;\"\n elif numCci >= 2 and numCci <= 3:\n addAttCci = findCci['style'] = \"top: 44px; left: 46px; width: 25px; height: 25px;\"\n elif numCci > 3:\n addAttCci = findCci['style'] = \"top: 39px; left: 40px; width: 35px; height: 35px;\"\n else:\n addAttCci = findCci['style'] = \"width: 0px; height: 0px;\"\n\n if numCcm > 0 and numCcm <= 1:\n addAttCcm = findCcm['style'] = \"top: 49px; left: 95px; width: 15px; height: 15px;\"\n elif numCci >= 2 and numCci <= 3:\n addAttCcm = findCcm['style'] = \"top: 44px; left: 140px; width: 25px; height: 25px;\"\n elif numCcm > 3:\n addAttCcm = findCcm['style'] = \"top: 39px; left: 185px; width: 35px; height: 35px;\"\n else:\n addAttCcm = findCcm['style'] = \"width: 0px; height: 0px;\"\n\n if numCcds > 0 and numCcds <= 1:\n addAttCcds = findCcds['style'] = \"top: 49px; left: 140px; width: 15px; height: 15px;\"\n elif numCcds >= 2 and numCcds <= 3:\n addAttCcds = findCcds['style'] = \"top: 44px; left: 135px; width: 25px; height: 25px;\"\n elif numCcds > 3:\n addAttCcds = findCcds['style'] = \"top: 39px; left: 130px; width: 35px; height: 35px;\"\n else:\n addAttCcds = findCcds['style'] = \"width: 0px; height: 0px;\"\n\n if numCchf > 0 and numCchf <= 1:\n addAttCchf = findCchf['style'] = \"top: 49px; left: 185px; width: 15px; height: 15px;\"\n elif numCchf >= 2 and numCchf <= 3:\n addAttCchf = findCchf['style'] = \"top: 44px; left: 180px; width: 25px; height: 25px;\"\n elif numCchf > 3:\n addAttCchf = findCchf['style'] = \"top: 39px; left: 175px; width: 35px; height: 35px;\"\n else:\n addAttCchf = findCchf['style'] = \"width: 0px; height: 0px;\"\n\n if numChi > 0 and numChi <= 1:\n addAttChi = findChi['style'] = \"top: 94px; left: 50px; width: 15px; height: 15px;\"\n elif numChi >= 2 and numChi <= 3:\n addAttChi = findChi['style'] = \"top: 139px; left: 46px; width: 25px; height: 25px;\"\n elif numChi > 3:\n addAttChi = findChi['style'] = \"top: 184px; left: 40px; width: 35px; height: 35px;\"\n else:\n addAttChi = findChi['style'] = \"width: 0px; height: 0px;\"\n\n if numChm > 0 and numChm <= 1:\n addAttChm = findChm['style'] = \"top: 94px; left: 95px; width: 15px; height: 15px;\"\n elif numChm >= 2 and numChm <= 3:\n addAttChm = findChm['style'] = \"top: 139px; left: 135px; width: 25px; height: 25px;\"\n elif numChm > 3:\n addAttChm = findChm['style'] = \"top: 184px; left: 180px; width: 35px; height: 35px;\"\n else:\n addAttChm = findChm['style'] = \"width: 0px; height: 0px;\"\n\n if numChds > 0 and numChds <= 1:\n addAttChds = findChds['style'] = \"top: 94px; left: 95px; width: 15px; height: 15px;\"\n elif numChds >= 2 and numChds <= 3:\n addAttChds = findChds['style'] = \"top: 139px; left: 135px; width: 25px; height: 25px;\"\n elif numChds > 3:\n addAttChds = findChds['style'] = \"top: 184px; left: 180px; width: 35px; height: 35px;\"\n else:\n addAttChds = findChds['style'] = \"width: 0px; height: 0px;\"\n\n\n\n htmlDoc.close()\n html = soup.prettify(\"utf-8\")\n\n with open(\"infopage/templates/index.html\", \"wb\") as output:\n output.write(html)\n\n\n return render(\n request,\n 'index.html',\n context={},\n )\n", "repo_name": "KrystalPhuar/digitalhealth", "sub_path": "Desktop/dhWithYing/digitalhealth/infopage/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Bsi.objects.count", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Bsi.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Bsi", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Bsm.objects.count", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Bsm.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Bsm", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Bsds.objects.count", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Bsds.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Bsds", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Bshf.objects.count", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Bshf.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Bshf", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Cci.objects.count", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Cci.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Cci", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Ccm.objects.count", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Ccm.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.Ccm", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Ccds.objects.count", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Ccds.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.Ccds", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Cchf.objects.count", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Cchf.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Cchf", "line_number": 36, "usage_type": "name"}, {"api_name": "models.Chi.objects.count", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Chi.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Chi", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Chm.objects.count", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Chm.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Chm", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Chds.objects.count", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Chds.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Chds", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "42699221417", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rom.models import CharacterBase, CharacterJob\nfrom rom.form_character import CharacterForm, CharacterWGForm\nfrom django.db.models import Q\nfrom guild.models import WAR_TYPE\n\n\nclass CharacterManagement(object):\n def __init__(self, user, base_id=None):\n self.user = user\n if base_id is not None:\n try:\n self.base = self.user.bases.get(pk=base_id)\n except CharacterBase.DoesNotExist:\n raise Exception(\"%s DoesNotExist\" % base_id)\n else:\n self.base = None\n\n def push_base(self, form_json, hash_form):\n ign = form_json['ign']\n base_level = form_json['base_level']\n contribution = form_json['contribution']\n gold_medal = form_json['gold_medal']\n job_ids = form_json['jobs']\n\n self.base = CharacterBase(\n member=self.user,\n ign=ign,\n base_level=base_level,\n contribution=contribution,\n gold_medal=gold_medal,\n )\n\n data = dict()\n data['hash_form'] = hash_form\n self.base.update_data(data_dict=data)\n self.base.save()\n\n self.__push_jobs(job_ids=job_ids)\n return self.base\n\n def update_base(self, form_json, hash_form):\n if self.base is not None:\n ign = form_json['ign']\n base_level = form_json['base_level']\n contribution = form_json['contribution']\n gold_medal = form_json['gold_medal']\n job_ids = form_json['jobs']\n\n self.base.member = self.user\n self.base.ign = ign\n self.base.base_level = base_level\n self.base.contribution = contribution\n self.base.gold_medal = gold_medal\n\n data = dict()\n data['hash_form'] = hash_form\n self.base.update_data(data_dict=data)\n self.base.save()\n\n self.__push_jobs(job_ids=job_ids)\n if self.__guild_exists():\n woe = form_json['woe_job']\n woc = form_json['woc_job']\n zone = form_json['zone_job']\n self.__push_war_job(woe=woe, woc=woc, zone=zone)\n\n return self.base\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def delete_base(self):\n if self.base is not None:\n self.base.delete()\n return 0\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def __push_jobs(self, job_ids):\n job_ids = [int(job_id) for job_id in job_ids]\n if self.base is not None:\n job_ids_old = [[job_ch.job.pk, job_ch] for job_ch in self.base.jobs.all()]\n\n for job_id_old, job_ch in job_ids_old:\n if job_id_old not in job_ids:\n job_ch.delete()\n\n for job_id in job_ids:\n self.__push_job(\n job_id=job_id,\n )\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def __push_job(self, job_id):\n if not self.base.check_job(job_id=job_id):\n job = CharacterJob(\n base=self.base,\n job_id=job_id,\n )\n job.save()\n return job\n\n def __push_war_job(self, woe, woc, zone):\n war_woe = self.base.guild_war_jobs.filter(war=0).first()\n war_woe.job = self.base.jobs.get(job_id=woe).job\n war_woe.save()\n war_woc = self.base.guild_war_jobs.filter(war=1).first()\n war_woc.job = self.base.jobs.get(job_id=woc).job\n war_woc.save()\n war_zone = self.base.guild_war_jobs.filter(war=2).first()\n war_zone.job = self.base.jobs.get(job_id=zone).job\n war_zone.save()\n\n def get_base(self):\n if self.base is not None:\n return self.__base_dto(self.base)\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def get_bases(self, filter_list=[]):\n bases = list()\n bases_obj = self.user.bases.all()\n\n for f, v in filter_list:\n if f == 'join_guild':\n bases_obj = bases_obj.filter(Q(waiting__guild=v) | Q(waiting__isnull=True), guild__isnull=True)\n\n for b in bases_obj:\n bases.append(self.__base_dto(b))\n return bases\n\n @staticmethod\n def __base_dto(base):\n base_dto = dict()\n base_dto['id'] = base.pk\n base_dto['ign'] = base.ign\n base_dto['base_level'] = base.base_level\n base_dto['contribution'] = base.contribution\n base_dto['gold_medal'] = base.gold_medal\n\n jobs_ch = base.jobs.all()\n jobs = []\n for j in jobs_ch:\n job = dict()\n job['id'] = j.pk\n job['job_id'] = j.job.pk\n job['job_name'] = j.job.name\n job['job_image'] = j.job.image\n jobs.append(job)\n\n base_dto['jobs'] = jobs\n\n guild_m = base.guild.first()\n guild_obj = None\n if guild_m:\n guild_obj = guild_m.guild\n if guild_obj is not None:\n guild = dict()\n guild['guild_id'] = guild_obj.pk\n guild['guild_name'] = guild_obj.name\n guild['guild_image'] = guild_obj.image\n guild['invite_code'] = guild_obj.invite_code\n guild['guild_data'] = guild_obj.get_data_json()\n for war_id, war in WAR_TYPE:\n if base.guild_war_jobs.filter(war=war_id).first().job:\n guild[war] = base.guild_war_jobs.filter(war=war_id).first().job.pk\n else:\n guild[war] = None\n else:\n guild = None\n base_dto['guild'] = guild\n\n waiting_approve = base.waiting.first()\n w_guild_obj = None\n if waiting_approve:\n w_guild_obj = waiting_approve.guild\n if w_guild_obj is not None:\n waiting = dict()\n waiting['guild_id'] = w_guild_obj.pk\n waiting['guild_name'] = w_guild_obj.name\n waiting['guild_image'] = w_guild_obj.image\n waiting['invite_code'] = w_guild_obj.invite_code\n waiting['guild_data'] = w_guild_obj.get_data_json()\n else:\n waiting = None\n base_dto['waiting'] = waiting\n\n return base_dto\n\n def __guild_exists(self):\n return self.base.guild.all().exists()\n\n def get_request_form(self, request):\n if self.base is not None:\n if self.__guild_exists():\n return CharacterWGForm(request.POST)\n return CharacterForm(request.POST)\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def get_form_base(self):\n if self.base is not None:\n if self.__guild_exists():\n return self.__generate_form_wg()\n return self.__generate_form()\n else:\n raise Exception(\"Please init CharacterManagement with base_id\")\n\n def __generate_form(self):\n base_dto = self.get_base()\n\n initial = dict()\n initial['ign'] = base_dto['ign']\n initial['base_level'] = base_dto['base_level']\n initial['contribution'] = base_dto['contribution']\n initial['gold_medal'] = base_dto['gold_medal']\n job_ids = list()\n for job in base_dto['jobs']:\n job_ids.append(job['job_id'])\n initial['jobs'] = job_ids\n\n form = CharacterForm(initial=initial)\n return form\n\n def __generate_form_wg(self):\n base_dto = self.get_base()\n\n initial = dict()\n initial['ign'] = base_dto['ign']\n initial['base_level'] = base_dto['base_level']\n initial['contribution'] = base_dto['contribution']\n initial['gold_medal'] = base_dto['gold_medal']\n job_ids = list()\n for job in base_dto['jobs']:\n job_ids.append(job['job_id'])\n initial['jobs'] = job_ids\n initial['woe_job'] = base_dto['guild']['woe']\n initial['woc_job'] = base_dto['guild']['woc']\n initial['zone_job'] = base_dto['guild']['zone']\n\n form = CharacterWGForm(initial=initial)\n return form\n\n def get_hash_form(self):\n return self.base.get_data_json('hash_form')\n", "repo_name": "iArraylist/jgm", "sub_path": "rom/services/character_management.py", "file_name": "character_management.py", "file_ext": "py", "file_size_in_byte": 8325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rom.models.CharacterBase.DoesNotExist", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rom.models.CharacterBase", "line_number": 16, "usage_type": "name"}, {"api_name": "rom.models.CharacterBase", "line_number": 28, "usage_type": "call"}, {"api_name": "rom.models.CharacterJob", "line_number": 99, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 129, "usage_type": "call"}, {"api_name": "guild.models", "line_number": 161, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 162, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 163, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 164, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 165, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 166, "usage_type": "name"}, {"api_name": "guild.models.WAR_TYPE", "line_number": 167, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 169, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 171, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 173, "usage_type": "name"}, {"api_name": "guild.models", "line_number": 174, "usage_type": "name"}, {"api_name": "rom.form_character.CharacterWGForm", "line_number": 199, "usage_type": "call"}, {"api_name": "rom.form_character.CharacterForm", "line_number": 200, "usage_type": "call"}, {"api_name": "rom.form_character.CharacterForm", "line_number": 225, "usage_type": "call"}, {"api_name": "rom.form_character.CharacterWGForm", "line_number": 244, "usage_type": "call"}]} +{"seq_id": "21437229902", "text": "import glob\nimport pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.lines import Line2D\nfrom tqdm import tqdm\nimport torch\nimport argparse\nimport torch.nn.functional as F\nfrom torch.distributions import Bernoulli\n\nconditions = [\"equal\", \"unequal\"]\nagents = [\"human\", \"raw\", \"fitted\"]\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--agent\", type=str, required=True, choices=agents)\nparser.add_argument(\"--condition\", type=str, required=True, choices=conditions)\nargs = parser.parse_args()\n\nif args.agent == 'raw':\n probs = torch.stack([torch.load('data/loo_llama_fixed_model=65B_fold=' + str(i) + '.pth', map_location='cpu')[1] for i in range(100)], dim=-1)\n splits = torch.load('data/splits.pth')\n action_mat = Bernoulli(probs=probs).sample()\n actions = torch.zeros(probs.numel())\n counter = 0\n for i in range(probs.shape[0]):\n for j in range(probs.shape[1]):\n actions[splits[i, j]] = action_mat[i, j]\n\n\nif args.agent == 'fitted':\n logits = torch.stack([torch.load('data/loo_centaur_fixed_model=65B_fold=' + str(i) + '.pth', map_location='cpu')[2] for i in range(100)], dim=-1)\n splits = torch.load('data/splits.pth')\n action_mat = Bernoulli(logits=logits).sample()\n actions = torch.zeros(logits.numel())\n counter = 0\n for i in range(logits.shape[0]):\n for j in range(logits.shape[1]):\n actions[splits[i, j]] = action_mat[i, j]\n\n# preprocess data\ndf = pd.read_csv('data/exp1.csv')\nforced_choices_1 = np.zeros(len(df))\nchoices_raw = np.zeros(len(df))\nchoices_fitted = np.zeros(len(df))\nnum_participants = df.participant.max() + 1\nnum_tasks = df.task.max() + 1\nrow_counter1 = 0\nrow_counter2 = 0\nfor participant in tqdm(range(num_participants)):\n df_participant = df[df['participant'] == participant]\n for task in range(num_tasks):\n df_task = df_participant[df_participant['task'] == task]\n choices_1 = df_task[df_task.trial < 4].choice.sum()\n num_trials = df_task.trial.max() + 1\n row_counter1 = row_counter1 + 4\n for trial in range(4, num_trials):\n forced_choices_1[row_counter1] = choices_1\n if args.agent == 'raw':\n choices_raw[row_counter1] = actions[row_counter2].item()\n if args.agent == 'fitted':\n choices_fitted[row_counter1] = actions[row_counter2].item()\n row_counter1 = row_counter1 + 1\n row_counter2 = row_counter2 + 1\n\ndf['forced_choices_1'] = forced_choices_1.astype('int')\nif args.agent == 'raw':\n df['choice'] = choices_raw\nif args.agent == 'fitted':\n df['choice'] = choices_fitted\nif args.condition == 'equal':\n # extract data\n df = df[(df.trial == 4) & (df.forced_choices_1 == 2)]\n reward_differences = (df.expected_reward0 - df.expected_reward1).to_numpy().astype(float)\n choices = 1 - df.choice.to_numpy()\n horizon = (df[(df.trial == 4) & (df.forced_choices_1 == 2)].horizon == 10).to_numpy().astype(float)\n interaction = horizon * reward_differences\n\n log_reg = sm.Logit(choices, np.stack((reward_differences, horizon, interaction, np.ones(reward_differences.shape)), axis=-1)).fit()\n\nelif args.condition == 'unequal':\n # case: x3 1\n df_31 = df[(df.trial == 4) & (df.forced_choices_1 == 3)]\n reward_differences_31 = (df_31.expected_reward0 - df_31.expected_reward1).to_numpy().astype(float)\n choices_31 = 1 - df_31.choice.to_numpy()\n horizons_31 = (df[(df.trial == 4) & (df.forced_choices_1 == 3)].horizon == 10).to_numpy().astype(float)\n\n # case: x3 0\n df_13 = df[(df.trial == 4) & (df.forced_choices_1 == 1)]\n reward_differences_13 = (df_13.expected_reward1 - df_13.expected_reward0).to_numpy().astype(float)\n choices_13 = df_13.choice.to_numpy()\n horizons_13 = (df[(df.trial == 4) & (df.forced_choices_1 == 1)].horizon == 10).to_numpy().astype(float)\n\n choices = np.concatenate((choices_31, choices_13), axis=0)\n reward_differences = np.concatenate((reward_differences_31, reward_differences_13), axis=0)\n horizon = np.concatenate((horizons_31, horizons_13), axis=0)\n interaction = horizon * reward_differences\n\n log_reg = sm.Logit(choices, np.stack((reward_differences, horizon, interaction, np.ones(reward_differences.shape)), axis=-1)).fit()\n\nx_reward_differences = np.linspace(-30, 30, 1000)\nx_horizon6 = np.ones(1000)\nx_6 = np.stack((x_reward_differences, x_horizon6, x_horizon6 * x_reward_differences, np.ones(1000)), axis=-1)\ny_6 = log_reg.predict(x_6)\n\nx_reward_differences = np.linspace(-30, 30, 1000)\nx_horizon1 = np.zeros(1000)\nx_1 = np.stack((x_reward_differences, x_horizon1, x_horizon1 * x_reward_differences, np.ones(1000)), axis=-1)\ny_1 = log_reg.predict(x_1)\n\n# make plot\nplt.rcParams[\"figure.figsize\"] = (1.8,2.0)\nplt.plot(x_1[:, 0], y_1, color='C0' if args.condition == 'equal' else 'C1')\nplt.plot(x_6[:, 0], y_6, color='C0' if args.condition == 'equal' else 'C1', ls='--')\nsns.despine()\nplt.xlabel('Reward difference', size=9)\nif args.condition == 'equal':\n plt.ylabel('p(first option)', size=9)\nelse:\n plt.ylabel('p(more informative)', size=9)\n\nplt.legend(['Horizon 1', 'Horizon 6',], frameon=False, bbox_to_anchor=(0.0, 1.02, 1, 0.2), loc=\"lower left\", borderaxespad=0, handletextpad=0.5, columnspacing=0.6, ncol=1, prop={'size': 9})\nplt.ylim(0, 1)\nplt.xlim(-30, 30)\nplt.yticks(size=9)\nplt.xticks(size=9)\nplt.tight_layout()\nplt.savefig('figures/probs_agent=' + args.agent + '_condition=' + args.condition + '.pdf', bbox_inches='tight')\nplt.show()\n", "repo_name": "marcelbinz/CENTaUR", "sub_path": "HorizonTask/plot_choice_probabilities.py", "file_name": "plot_choice_probabilities.py", "file_ext": "py", "file_size_in_byte": 5560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.distributions.Bernoulli", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.distributions.Bernoulli", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 51, "usage_type": "call"}, {"api_name": "statsmodels.api.Logit", "line_number": 80, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 80, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 97, "usage_type": "call"}, {"api_name": "statsmodels.api.Logit", "line_number": 100, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 113, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "19057957355", "text": "import dataclasses\nimport datetime\nimport typing\n\nimport discord\n\nfrom database.models.model import Model\n\n\n@dataclasses.dataclass()\nclass ReactionRole(Model):\n message_id: int = None\n emoji: str = None\n guild: int = None\n created: datetime.datetime = dataclasses.field(default_factory=datetime.datetime.utcnow)\n author: int = None\n role: int = None\n\n def get_guild(self) -> typing.Optional[discord.Guild]:\n return self.bot.get_guild(self.guild)\n\n def get_role(self) -> typing.Optional[discord.Role]:\n guild = self.get_guild()\n if guild:\n return guild.get_role(self.role)\n\n @classmethod\n def setup_table(cls) -> str:\n return \"\"\"\n CREATE TABLE IF NOT EXISTS reaction_role(\n id bigint primary key ,\n message_id bigint,\n emoji VARCHAR(256),\n guild bigint references guilds(id) on DELETE CASCADE,\n created timestamp,\n author bigint,\n role bigint,\n unique (message_id,emoji)\n )\n \"\"\"\n", "repo_name": "LucasCoderT/iceteabot", "sub_path": "database/models/reaction_role.py", "file_name": "reaction_role.py", "file_ext": "py", "file_size_in_byte": 1024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "database.models.model.Model", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "dataclasses.field", "line_number": 15, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "attribute"}, {"api_name": "discord.Guild", "line_number": 19, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 22, "usage_type": "attribute"}, {"api_name": "discord.Role", "line_number": 22, "usage_type": "attribute"}, {"api_name": "dataclasses.dataclass", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "31174357580", "text": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=''),\n path('login', views.ulogin, name='login'),\n path('register', views.uregister, name='register'),\n path('dashboard', views.udashbaord, name='dashboard'),\n path('add-consignment', views.addconsign, name='add-consignment'),\n path('logout', views.ulogout, name='logout'),\n\n path('payment-success//', views.paystatus, name=\"payment-success\"),\n\n\n\n # Url for Vendor Operations and view\n\n path('vendor-login', views.vendorlogin, name='vendor-login'),\n path('vendor-register', views.vendorregister, name='vendor-register'),\n path('vendor', views.vendor, name='vendor'),\n path('add-vehicle', views.addvehicle, name='add-vehicle'),\n path('add-driver', views.add_driver, name='add-driver'),\n path('update-status//', views.update_status, name='update-status'),\n path('add-tvride', views.add_tvride, name='add-tvride'),\n\n\n\n]\n", "repo_name": "Creative-Me-Sanket/SmartTransportManagementSystem", "sub_path": "userapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 996, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "5880658710", "text": "from flask import render_template, request, redirect, url_for, flash, send_file, safe_join\nfrom app import app, Users, db, login_manager, create_project, allowed_file, classification, Projects, csv_writer\nfrom flask_login import LoginManager, UserMixin, login_required, login_user, current_user, logout_user\nimport os\nfrom werkzeug.utils import secure_filename\n\nlist_project = []\n\n@app.route('/', methods=['POST', 'GET'])\n@login_required\ndef index():\n\tlist_project = os.listdir('project/'+current_user.user_name)\n\tif request.method == \"POST\":\n\t\tquery = Projects.query.filter_by(title_project=request.form['title_card'], project_username=current_user.user_name).all()\n\t\treturn redirect(f\"/create_project/{current_user.user_name}/{query[0].title_project}/upload_files/add_classes/data_classification/{query[0].file_name}\")\n\n\treturn render_template(\"base.html\", list=list_project)\n\n@app.route('/login/register', methods=[\"POST\", \"GET\"])\ndef register():\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\tsearch_username = Users.query.filter_by(user_name = request.form['user_name']).all()\n\t\t\tsearch_email = Users.query.filter_by(email = request.form['email']).all()\n\t\t\tif request.form['user_name'] != \"\" and request.form['email'] != \"\" and request.form['psw'] != \"\":\n\t\t\t\tif search_username == []:\n\t\t\t\t\tif search_email == []:\n\t\t\t\t\t\tu = Users(user_name=request.form['user_name'],email=request.form['email'])\n\t\t\t\t\t\tu.set_password(request.form['psw'])\n\t\t\t\t\t\tdb.session.add(u)\n\t\t\t\t\t\tdb.session.flush()\n\t\t\t\t\t\tdb.session.commit()\n\t\t\t\t\t\tos.mkdir('project/'+str(request.form['user_name']))\n\t\t\t\t\t\treturn redirect(url_for('login'))\n\t\t\t\t\telse:\n\t\t\t\t\t\tflash(\"Email is already in use\")\n\t\t\t\telse:\n\t\t\t\t\tflash(\"Username is already in use\")\n\t\t\telse:\n\t\t\t\tflash(\"Fill in all the fields\")\n\n\t\texcept:\n\t\t\tdb.session.rollback()\n\t\t\tprint(\"Error\")\n\n\treturn render_template(\"register.html\")\n\n@app.route('/login', methods=['POST', \"GET\"])\ndef login():\n\tif request.method == \"POST\":\n\t\tuser = db.session.query(Users).filter(Users.user_name == request.form['user_name']).first()\n\t\tif user and user.check_password(request.form['psw']):\n\t\t\trm = True if request.form.get('cb_remember') else False\n\t\t\tlogin_user(user, remember=rm)\n\t\t\treturn redirect(\"/\")\n\t\telse:\n\t\t\tflash(\"Invalid username/password\", 'error')\n\t\t\treturn redirect(url_for('login'))\n\treturn render_template(\"login.html\")\n\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n flash(\"You have been logged out.\")\n return redirect(url_for('login'))\n\n@app.route('/create_project/', methods=['POST', \"GET\"])\n@login_required\ndef project():\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\tcreate_project(current_user.user_name, request.form['title_project'])\n\t\t\treturn redirect(f\"/create_project/{current_user.user_name}/{request.form['title_project']}/upload_files/\")\n\t\texcept FileExistsError:\n\t\t\tflash(\"Проект с таким именем уже существует\", 'error')\n\n\treturn render_template('create_project.html', name=current_user.user_name)\n\n@app.route('/create_project///upload_files/', methods=['POST', \"GET\"])\n@login_required\ndef upload_files(name_user, name_project):\n\tif request.method == \"POST\":\n\t\tif \"file\" not in request.files:\n\t\t\tflash('No file part')\n\t\t\treturn redirect(request.url)\n\t\tfile = request.files[\"file\"]\n\t\tif file.filename == '':\n\t\t\tflash('Not selected file')\n\t\t\treturn redirect(request.url)\n\t\tif file and allowed_file(file.filename):\n\t\t\tfilename = secure_filename(file.filename)\n\t\t\tfile.save(os.path.join('project/'+current_user.user_name+'/'+name_project, filename))\n\t\t\treturn redirect(f'add_classes/{filename}')\n\t\telse:\n\t\t\tflash('Invalid file extension')\n\n\treturn render_template('add_files.html')\n\n@app.route('/create_project///upload_files/add_classes/', methods=['POST', \"GET\"])\n@login_required\ndef add_classes(name_user, name_project, filename):\n\tu = Users.query.filter_by(id = 1).all()\n\tp = Projects(title_project=name_project, file_name=filename, project_username=current_user.user_name, u_id=u[0].id, class_list=\"\")\n\tsearch_project = Projects.query.filter_by(title_project=name_project, project_username=current_user.user_name).all()\n\tif search_project != []:\n\t\tif request.method == \"POST\":\n\t\t\tif '+' in request.form:\n\t\t\t\tif request.form['title_class'] == \"\":\n\t\t\t\t\tflash(\"Название класса не может быть пустым\")\n\t\t\t\telse:\n\t\t\t\t\tquery = Projects.query.filter_by(title_project=name_project, project_username=current_user.user_name).all()\n\t\t\t\t\tlist_=query[0].class_list.split()\n\t\t\t\t\tlist_.append(request.form['title_class'])\n\t\t\t\t\tl = \" \".join(list_)\n\t\t\t\t\tProjects.query.filter_by(title_project=name_project, project_username=current_user.user_name).update({\"class_list\": l})\n\t\t\t\t\tdb.session.commit()\n\t\t\tif 'continue' in request.form:\n\t\t\t\treturn redirect(f\"data_classification/{filename}\")\n\telse:\n\t\tdb.session.add(p)\n\t\tdb.session.commit()\n\n\tquery = Projects.query.filter_by(title_project=name_project, project_username=current_user.user_name).all()\n\tlocal_list = query[0].class_list.split()\n\treturn render_template('add_classes.html', classes=local_list)\n\n@app.route('/create_project///upload_files/add_classes/data_classification/', methods=['POST', \"GET\"])\n@login_required\ndef data_classification(name_user, name_project, filename):\n\tpath = f\"project/{current_user.user_name}/{name_project}/{filename}\"\n\tsorted_path=\"sorted.txt\"\n\tdata_csv, column_names = classification(path)\n\tdata_csv_changed = []\n\tfor item in data_csv:\n\t\tr = \" \".join(item)\n\t\tdata_csv_changed.append(r)\n\tquery = Projects.query.filter_by(title_project=name_project, project_username=current_user.user_name).all()\n\tlocal_list = query[0].class_list.split()\n\n\tif request.method == \"POST\":\n\t\tdict = {}\n\t\tk = []\n\t\tsorted_list = []\n\t\tfor item in data_csv_changed:\n\t\t\td1 = {str(item): str(request.form.get(item))}\n\t\t\tdict.update(d1)\n\t\tfor i in sorted(dict.items(), key=lambda para: para[1]):\n\t\t\tk.append(data_csv_changed.index(i[0]))\n\t\tfor i in k:\n\t\t\tsorted_list.append(data_csv[i])\n\t\tcsv_writer(sorted_list, current_user.user_name, name_project, column_names)\n\t\tdownload_path=f\"project/{name_user}/{name_project}/sorted.csv\"\n\t\ttry:\n\t\t\treturn send_file(download_path, as_attachment=True)\n\t\texcept Exception as e:\n\t\t\treturn str(e)\n\n\treturn render_template(\"data_classification.html\", data = data_csv_changed, classes=local_list, paht=sorted_path)", "repo_name": "mDmitriy2001/project_practice", "sub_path": "view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 6401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "app.Projects.query.filter_by", "line_number": 14, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 9, "usage_type": "call"}, {"api_name": "app.app", "line_number": 9, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "app.Users.query.filter_by", "line_number": 23, "usage_type": "call"}, {"api_name": "app.Users.query", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.Users", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "app.Users.query.filter_by", "line_number": 24, "usage_type": "call"}, {"api_name": "app.Users.query", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.Users", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "app.Users", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 30, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 30, "usage_type": "name"}, {"api_name": "app.db.session.flush", "line_number": 31, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 31, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 32, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 32, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 40, "usage_type": "call"}, {"api_name": "app.db.session.rollback", "line_number": 43, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 19, "usage_type": "call"}, {"api_name": "app.app", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "app.db.session.query", "line_number": 51, "usage_type": "call"}, {"api_name": "app.Users", "line_number": 51, "usage_type": "argument"}, {"api_name": "app.db.session", "line_number": 51, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 51, "usage_type": "name"}, {"api_name": "app.Users.user_name", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 48, "usage_type": "call"}, {"api_name": "app.app", "line_number": 48, "usage_type": "name"}, {"api_name": "flask_login.logout_user", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 66, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 61, "usage_type": "call"}, {"api_name": "app.app", "line_number": 61, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "app.create_project", "line_number": 73, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 78, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 78, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 68, "usage_type": "call"}, {"api_name": "app.app", "line_number": 68, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 90, "usage_type": "name"}, {"api_name": "app.allowed_file", "line_number": 91, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.user_name", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 98, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 80, "usage_type": "call"}, {"api_name": "app.app", "line_number": 80, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 81, "usage_type": "name"}, {"api_name": "app.Users.query.filter_by", "line_number": 103, "usage_type": "call"}, {"api_name": "app.Users.query", "line_number": 103, "usage_type": "attribute"}, {"api_name": "app.Users", "line_number": 103, "usage_type": "name"}, {"api_name": "app.Projects", "line_number": 104, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 104, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 104, "usage_type": "name"}, {"api_name": "app.Projects.query.filter_by", "line_number": 105, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 105, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 110, "usage_type": "call"}, {"api_name": "app.Projects.query.filter_by", "line_number": 112, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 112, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 112, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "app.Projects.query.filter_by", "line_number": 116, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 116, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 116, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 116, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 117, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 117, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 117, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 118, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 119, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 121, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 121, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 121, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 122, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 122, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 122, "usage_type": "name"}, {"api_name": "app.Projects.query.filter_by", "line_number": 124, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 124, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 124, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 124, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 124, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 126, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 100, "usage_type": "call"}, {"api_name": "app.app", "line_number": 100, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 101, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 131, "usage_type": "name"}, {"api_name": "app.classification", "line_number": 133, "usage_type": "call"}, {"api_name": "app.Projects.query.filter_by", "line_number": 138, "usage_type": "call"}, {"api_name": "app.Projects.query", "line_number": 138, "usage_type": "attribute"}, {"api_name": "app.Projects", "line_number": 138, "usage_type": "name"}, {"api_name": "flask_login.current_user.user_name", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 146, "usage_type": "name"}, {"api_name": "app.csv_writer", "line_number": 152, "usage_type": "call"}, {"api_name": "flask_login.current_user.user_name", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 152, "usage_type": "name"}, {"api_name": "flask.send_file", "line_number": 155, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 159, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 128, "usage_type": "call"}, {"api_name": "app.app", "line_number": 128, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 129, "usage_type": "name"}]} +{"seq_id": "44012901654", "text": "# -*- coding:utf-8 -*-\n\nimport pika\nimport time\n\nusername = \"faith\"\npwd = \"qq2921481\"\nuser_pwd = pika.PlainCredentials(username, pwd)\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='172.16.54.130', credentials=user_pwd))\nchannel = connection.channel()\n\n# 声明 queue,因为不确定生产者先执行还是消费者先执行\nchannel.queue_declare(queue='hello', durable=True)\n\n\ndef callback(ch, method, properties, body):\n \"\"\" 回调函数,有点像事件驱动模型 \"\"\"\n print(ch, method, properties) # ch -》 channel\n time.sleep(10)\n print(\"收到消息\", body)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\n#\nchannel.basic_qos(prefetch_count=1) # 只要有一个队列在等待就不给你发了\n\nchannel.basic_consume(callback,\n queue='hello') # 不确认no_ack=True,默认是确认的no_ack=False,默认客户端不确认,就不删队列,保证消息被完整消费,全部挂了重启还会重新消费未消费完毕的队列\n\nchannel.start_consuming()\n\nprint(\"生产者等待消息中,阻塞\") # 默认是轮询机制,开启多个server 轮询接受\n\n\"\"\" \n rabbitmqctl stop \n rabbitmq-server start & \n rabbitmqctl list_queues\n rabbitmq-plugins enable rabbitmq_management # 开启web管理界面 \n Exchange 定义的时候是有类型的,以决定到底是哪些Queue符合条件,可以接受信息\n fanout 所有bind到此exchange的queue都可以接收消息\n direct 通过routingkey 和 exchange 决定的哪个唯一的queue可以接受消息\n topic:所有符合routingkey(此时可以是一个表达式)的routeKey 所bind得queue可以接受到消息\n headers: 通过headers决定发给谁\n\n\"\"\"\n", "repo_name": "qq453388937/calc_mmd", "sub_path": "rbmq/02_rb_server_consumer.py", "file_name": "02_rb_server_consumer.py", "file_ext": "py", "file_size_in_byte": 1750, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pika.PlainCredentials", "line_number": 8, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 10, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "26531503570", "text": "# Run in terminal after setting up virtual envr - \n#python3 -m pip install schedule\n#python3 -m pip install requests\n\nimport schedule # pip3 install schedule\nimport requests # pip3 install request\nimport time\nimport json\n\n#having issues importing schedule and requests\n # Fixed, only had to run 'pip3 install request' in terminal\n\ndef pull_currency_data(site_url, apikey, file_name):\n \"\"\"\n :param site_url: URL to which requests get should hit\n :param apikey: Key from your account\n :param file_name: location and file name where output should be saved/appended\n \"\"\"\n global data\n data = requests.get(url=site_url+apikey)\n file = open(file_name, \"a\")\n currency = json.loads(data.content)\n output = data.headers['Date'] + '|' + '1' + '|' + str(currency['USD']) + '|' + str(currency['JPY']) + '|' + \\\n str(currency['EUR']) + '|' + str(currency['NGN']) + '\\n'\n file.write(output)\n file.close()\n\nkey = '2447285030a725a86d4ef48ea1336edb9c67189caa0750fb8ab4697799861dc8'\nurl = 'https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,JPY,EUR,NGN&api_key='\nfile_name = './currency_extract.csv'\n\n# Refresh rate is every 60seconds\nschedule.every(60).seconds.do(pull_currency_data, url, key, file_name)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)", "repo_name": "KushimoKola/Python_Ingestion", "sub_path": "exchange_data.py", "file_name": "exchange_data.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 33, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "1626100910", "text": "import os\n\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\nfrom tqdm import tqdm\n\n\nclass ETLProcess(object):\n def __init__(self, read_db, write_db, write_table_name):\n self.read_db = read_db\n self.write_db = write_db\n self.write_table_name = write_table_name\n self._reset()\n\n def __unicode__(self):\n return \"ETLProcess: (table: {})\".format(self.write_table_name)\n\n def extract(self, sql, write_pk_field=None, types=None):\n self.types = types\n if sql.endswith('.sql'):\n with open(sql, 'r') as f:\n sql = f.read()\n if write_pk_field:\n self.extract_method = (self._format_sql, (sql, write_pk_field))\n else:\n self.extract_method = (self.read_db.query, (sql,))\n\n def transform(self, *fields):\n self.transform_pipeline.fields = fields\n return self.transform_pipeline\n\n def load(self, upsert_fields=None, ensure=None, safe=False):\n method, args = self.extract_method\n if os.getenv(\"VERBOSE\"):\n print(\"Querying data...\")\n results = method(*args)\n results = self._apply_middleware(results)\n if results:\n if os.getenv(\"VERBOSE\"):\n print(\"Loading {}\".format(self.write_table_name))\n results = tqdm(results)\n\n table = self.write_db[self.write_table_name]\n self._write_rows(table, results, upsert_fields, ensure, safe)\n self._reset()\n\n def extract_override(self, f):\n self.extract_method = (f, tuple())\n\n def link(self, field, target, table_name, child_field, name=None):\n self.links.append(\n (field, target, table_name, child_field, {\"name\": name}))\n\n def link_closest(self, field, target, table_name, child_field, name=None,\n method=\">=\"):\n self.links.append((field, target, table_name, child_field,\n {'closest_method': method, 'name': name}))\n\n def middleware(self, f):\n self._middleware.append(f)\n\n def ignore(self, *args):\n self._ignored += args\n\n def _reset(self):\n self.transform_pipeline = TransformPipeline()\n self.transform_pipeline._reset()\n self.links = []\n self._middleware = []\n self._ignored = []\n self.types = None\n\n def _apply_middleware(self, results):\n for middleware in self._middleware:\n results = middleware(results)\n return results\n\n def _format_sql(self, sql, write_pk_field):\n last_pk = 0\n try:\n rows = self.write_db.query(\"SELECT MAX({}) max FROM {};\".format(\n write_pk_field, self.write_table_name\n ))\n except (OperationalError, ProgrammingError):\n pass\n else:\n last_pk = next(rows)['max'] or last_pk\n return self.read_db.query(sql.format(last_pk))\n\n def _write_rows(self, table, rows, upsert_fields, ensure=None, safe=False):\n dropped = False\n for row in rows:\n row_data = self._update_row(row)\n if upsert_fields:\n table.upsert(row_data, upsert_fields, ensure=None)\n else:\n table.insert(row_data, ensure=ensure)\n if not dropped and not safe:\n self._drop_old_columns(table, row_data.keys())\n dropped = True\n\n def _update_row(self, row):\n row_data = self._type_format(row)\n row_data = self.transform_pipeline.transform(row)\n row_data = self._make_links(row_data)\n row_data = self._remove_ignored(row_data)\n return row_data\n\n def _remove_ignored(self, row):\n for field in self._ignored:\n row.pop(field)\n return row\n\n def _drop_old_columns(self, table, current_columns):\n current_columns += ['id']\n for column in table.columns:\n if column not in current_columns:\n table.drop_column(column)\n\n def _make_links(self, row_data):\n for field, target, table_name, child_field, options in self.links:\n closest_method = options.get('closest_method')\n if closest_method:\n query = \"SELECT id FROM {0} WHERE {1} \" + closest_method + \" \\\n {2} ORDER BY {1};\"\n if closest_method.startswith(\"<\"):\n query = query[:-1] + \" DESC;\"\n else:\n query = \"SELECT id FROM {0} WHERE {1} = {2};\"\n query = query.format(table_name, child_field, row_data[target])\n res = self.write_db.query(query)\n try:\n id = next(res)['id']\n except StopIteration:\n id = None\n row_data[options.get(\"name\", field)] = id\n return row_data\n\n def _type_format(self, row):\n if self.types:\n for k, type_ in self.types.items():\n row[k] = type_(row[k])\n return row\n\n\ndef default(data):\n def inner(default_value):\n return data or default_value\n return inner\n\n\ndef func(data):\n def inner(f):\n return f(data)\n return inner\n\n\nclass TransformPipeline(object):\n builtin_methods = {\n 'default': default,\n 'func': func,\n }\n\n def __init__(self):\n self.fields = []\n self.pipeline = {}\n\n def __unicode__(self):\n return \"TransformPipeline (fields: {})\".format(self.fields)\n\n def __getattr__(self, method, *args, **kwargs):\n def inner(*args, **kwargs):\n for field in self.fields:\n if field not in self.pipeline:\n self.pipeline[field] = []\n self.pipeline[field].append((method, args, kwargs))\n return self\n return inner\n\n def transform(self, row):\n for field, methods in self.pipeline.items():\n row[field] = self._update(row[field], methods)\n return row\n\n def _update(self, data, methods):\n for method, args, kwargs in methods:\n try:\n f = getattr(data, method)\n except AttributeError:\n f = self.builtin_methods[method](data)\n data = f(*args, **kwargs)\n return data\n\n def _reset(self):\n self.fields = []\n self.pipeline = {}\n", "repo_name": "exit99/easy-etl", "sub_path": "easy_etl/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 6285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.OperationalError", "line_number": 83, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.ProgrammingError", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "42297270676", "text": "import torch\nimport torch.nn as nn\n\n\nclass Discriminator(nn.Module):\n def __init__(self, in_channels=1, img_size=32):\n super(Discriminator, self).__init__()\n \n self.model = nn.Sequential(\n *self._block(in_channels, 16, normalize=False),\n *self._block(16, 32),\n *self._block(32, 64),\n *self._block(64, 128)\n )\n \n self.fc = nn.Linear(128*(img_size//2**4)**2, 1)\n \n self.sigmoid = nn.Sigmoid()\n \n def _block(self, in_channels, out_channels, normalize=True):\n layers = [\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.2)\n ]\n \n if normalize:\n layers.append(nn.BatchNorm2d(out_channels))\n \n return layers\n \n def forward(self, img):\n x = self.model(img)\n x = x.view(x.shape[0], -1)\n x = self.fc(x)\n x = self.sigmoid(x)\n return x\n \n \nclass Generator(nn.Module):\n def __init__(self, in_features=100, img_channels=1, img_size=32):\n super(Generator, self).__init__()\n self.img_size = img_size\n \n self.fc = nn.Linear(in_features, 128*((img_size//4)**2))\n \n self.model = nn.Sequential(\n nn.BatchNorm2d(128),\n *self._block(128, 128),\n *self._block(128, 64),\n nn.Conv2d(64, img_channels, kernel_size=3, padding=1),\n nn.Tanh()\n )\n \n def _block(self, in_channels, out_channels):\n layers = [\n nn.Upsample(scale_factor=2),\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.2, inplace=True)\n ]\n \n return layers\n \n def forward(self, z):\n z = self.fc(z)\n z = z.view(z.shape[0], 128, self.img_size//4, self.img_size//4)\n z = self.model(z)\n return z\n \n \nif __name__ == '__main__':\n img = torch.randn(1, 1, 32, 32)\n z = torch.randn(1, 100)\n \n generator = Generator()\n discriminator = Discriminator()\n \n assert generator(z).shape == img.shape\n assert discriminator(img).shape == (1, 1)", "repo_name": "onebottlekick/bhban_ai_pytorch", "sub_path": "CNN/gan/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 2310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Dropout2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Tanh", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Upsample", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.randn", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "4953846105", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='reading',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Bookname', models.CharField(max_length=60, verbose_name='\\u4e66\\u540d')),\n ('Author', models.CharField(max_length=60, verbose_name='\\u4f5c\\u8005')),\n ('ISBN', models.CharField(max_length=60, verbose_name='ISBN')),\n ('Category', models.CharField(max_length=60, null=True, verbose_name='\\u7c7b\\u522b', blank=True)),\n ('Picture', models.ImageField(null=True, upload_to=b'pictures', blank=True)),\n ('Grade', models.IntegerField(default=0, verbose_name='\\u8bc4\\u5206')),\n ('Ranking', models.DateTimeField(auto_now_add=True, verbose_name='\\u4e0a\\u699c\\u65f6\\u95f4', null=True)),\n ('Price', models.CharField(default=0, max_length=60, verbose_name='\\u4ef7\\u683c')),\n ('Postage', models.CharField(default=0, max_length=60, verbose_name='\\u90ae\\u8d39')),\n ('Abstract', models.CharField(max_length=130, verbose_name='\\u7b80\\u4ecb')),\n ('Comment', models.CharField(max_length=130, null=True, verbose_name='\\u4e66\\u8bc4', blank=True)),\n ('Quantity', models.IntegerField(default=0, null=True, verbose_name='\\u6570\\u91cf')),\n ('createTime', models.DateTimeField(auto_now_add=True, verbose_name='\\u521b\\u5efa\\u65f6\\u95f4', null=True)),\n ('website', models.URLField()),\n ],\n ),\n ]\n", "repo_name": "RomanceSky/DoubanBak", "sub_path": "Read/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "12521203376", "text": "import RPi.GPIO as GPIO\nimport os,time,threading,sys,signal\nfrom Biblio.pn532 import *\nfrom Biblio.lcd import Lcd\nfrom Biblio.Button import Button\nfrom datetime import datetime\n\n#Thread qui va récuperer la valeur de la carte\nclass CardThread(threading.Thread):\n\n\tdef __init__(self,C1,C2):\n\t\tthreading.Thread.__init__(self)\n\t\tself.valCard = '' #La valeur de la carte sera mise ici\n\t\tself.C1 = C1 #code de la carte 1 \n\t\tself.C2 = C2 #code de la carte 2\n\n\tdef run(self):\n\t\tcmd = \"sudo python3 DetectCard.py \"+self.C1+\" \"+self.C2 # prépare la commande pour récupèrer la valeur de la carte\n\t\tprint(cmd) \n\t\tres = os.popen(cmd) #lance la commande dans un autre processus\n\t\tself.valCard = res.read().split(\"\\n\") #Attends de recevoir la réponse\n\n#Classe utilisant le composant lecteur NFC\nclass NFC:\n\tdef __init__(self,hexaCarte1,hexaCarte2):\n\t\tself.C1 = hexaCarte1 #code de la carte 1\n\t\tself.C2 = hexaCarte2 #code de la carte 2\n\n\t#Fonction permettant de lancer le thread et de modifier l'affichage du LCD en fonction du temps passé à attendre\n\t#Elle renvoie la valeur de la carte mise sur le lecteur, ou -2 si le temps est dépassé \n\tdef ProgDetectCard(self):\n\t\tself.th = CardThread(str(self.C1),str(self.C2)) #Initialise le Thread\n\t\tself.th.start() #Le lance (sa fonction run())\n\t\tlcd = Lcd(1)\n\t\tbutCache = Button(5)\n\t\testAppuie = False\n\t\tafficheTemps = False\n\t\ttempsAttente = datetime.now()\n\t\tlcd.setText(\"En attente de carte ...\")\n\t\tlcd.setColor(\"bleu\")\n\t\twhile self.th.valCard == \"\": #Attends qu'il récupère sa valeur \n\t\t\tif butCache.estAppuie() and not estAppuie:\n\t\t\t\testAppuie = True\n\t\t\telif not butCache.estAppuie() and estAppuie:\n\t\t\t\treturn -2\n\t\t\t#Si ça fait déjà 4 secondes que le joueur n'a pas mit la carte on annonce que c'est bientôt fini\n\t\t\telif not afficheTemps and 4 == int((datetime.now() - tempsAttente).total_seconds()):\n\t\t\t\tlcd.setText(\"Bientot fini...\")\n\t\t\t\tlcd.setColor(\"rouge\")\n\t\t\t\tafficheTemps = True\n\t\tprint(\"test : \",self.th.valCard[2])\n\t\tif len(self.th.valCard[2]) > 2 : # Pas de carte au bout de 10 seconde\n\t\t\treturn -2\n\t\telse :\n\t\t\treturn self.th.valCard[2] #La retourne\n\n\t#Fonction qui attend que le joueur place une carte\n\t#Si au bout de 7 secondes, la fonction n'a toujours rien reçu, on arrête la fonction grâce au signal alarm.\n\t#Sinon affiche le numéro de carte (1 ou 2) placé par le joueur. \n\tdef DetectCard(self):\n\t\ttry:\n\t\t\tpn532 = PN532_I2C(debug=False, reset=20, req=16)\n\t\t\tic, ver, rev, support = pn532.get_firmware_version()\n\t\t\tprint('Found PN532 with firmware version: {0}.{1}'.format(ver, rev))\n\n\t\t\t# Configure PN532 to communicate with MiFare cards\n\t\t\tpn532.SAM_configuration()\n\t\t\t#Fonction du signal\n\t\t\tdef fin_signal_ALRM(sig,ignore):\n\t\t\t\tprint(\"ça fait 7 secondes, on kill\")\n\t\t\t\tsys.exit()\n\n\t\t\tsignal.signal(signal.SIGALRM,fin_signal_ALRM)\n\t\t\tsignal.alarm(7) #Lance le signal alarm sur 7 secondes\n\n\t\t\tprint('Waiting for RFID/NFC card...')\n\t\t\t# Check if a card is available to read\n\t\t\twhile True :\n\t\t\t\tuid = pn532.read_passive_target(timeout=0.5) #Attend que le joueur place une carte\n\t\t\t\n\t\t\t\tif uid is None:\n \t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ttabHexa = [hex(i) for i in uid]\n\t\t\n\t\t\t\tcarte1 = self.C1[1:-1].split(\",\") # Transforme en tableau\t\t\t\t\n\t\t\t\tcarte2 = self.C2[1:-1].split(\",\") # Transforme en tableau\n\t\t\t\testC1 = True\t\n\t\t\t\testC2 = True\n\t\n\t\t\t\tfor i in range(len(tabHexa)):\n\t\t\t\t\tif tabHexa[i] != carte1[i]:\t\n\t\t\t\t\t\testC1 = False\n\t\t\t\t\tif tabHexa[i] != carte2[i]:\n\t\t\t\t\t\testC2 = False\n\t\t\t\t\n\t\t\t\tif(estC1):\n\t\t\t\t\tprint(\"1\")\n\t\t\t\t\treturn 1\n\t\t\t\telif(estC2):\n\t\t\t\t\tprint(\"2\")\n\t\t\t\t\treturn 2\n\t\t\t\telse:\n\t\t\t\t\tprint(\"-1\")\n\t\t\t\t\treturn -1\t\t\n\t\texcept KeyboardInterrupt: #Ctrl C\n\t\t\tprint(\"stop\")\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\tfinally:\n\t\t\tGPIO.cleanup()\n", "repo_name": "Sebastien-Gineste/Raspberry", "sub_path": "Biblio/NFC.py", "file_name": "NFC.py", "file_ext": "py", "file_size_in_byte": 3696, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "threading.Thread", "line_number": 9, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 12, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.popen", "line_number": 20, "usage_type": "call"}, {"api_name": "Biblio.lcd.Lcd", "line_number": 34, "usage_type": "call"}, {"api_name": "Biblio.Button.Button", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 73, "usage_type": "call"}, {"api_name": "signal.SIGALRM", "line_number": 73, "usage_type": "attribute"}, {"api_name": "signal.alarm", "line_number": 74, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 111, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 111, "usage_type": "name"}]} +{"seq_id": "22081581832", "text": "import sys\nimport re\nimport abc\nimport six\nfrom decimal import Decimal\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\n\nif sys.version_info[0] == 3:\n from urllib.request import urlopen\nelif sys.version_info[0] == 2:\n from urllib import urlopen\nelse:\n raise Exception('Python version 2 or 3 required')\n\n\ndef make_soup(url, parser=\"html.parser\"):\n response = urlopen(url)\n soup = BeautifulSoup(response, parser)\n return soup\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass SecurityPage(object):\n\n @classmethod\n def from_url(cls, url):\n if '/uk/funds/snapshot/snapshot' in url:\n return FundsPage(url)\n elif '/uk/stockreport/' in url:\n return StockPage(url)\n elif '/uk/etf/' in url:\n return ETFPage(url)\n\n def __init__(self, url):\n self.url = url\n cls_name = self.__class__.__name__\n security_type = cls_name[:cls_name.find(\"Page\")]\n self.data_ = {\"type\": security_type, \"url\": self.url}\n\n def get_data(self):\n soup = make_soup(self.url)\n self._update_data(soup)\n return self.data_\n\n @abc.abstractmethod\n def _update_data(self, soup):\n \"\"\"\"\"\"\n\n\nclass FundsPage(SecurityPage):\n \"\"\"\n http://www.morningstar.co.uk/uk/funds/snapshot/snapshot.aspx?id=F00000NGEH\n \"\"\"\n def _update_data(self, soup):\n text = soup.find_all('div', class_='snapshotTitleBox')[0].h1.text\n self.data_[\"name\"] = str(text)\n table = soup.find_all('table', class_='overviewKeyStatsTable')[0]\n for tr in table.find_all('tr'):\n tds = tr.find_all('td')\n if len(tds) != 3:\n continue\n if tds[0].text.startswith('NAV'):\n date = tds[0].span.text\n (currency, value) = tds[2].text.split()\n if tds[0].text.startswith('Day Change'):\n change = tds[2].text.strip()\n if tds[0].text.startswith('ISIN'):\n isin = tds[2].text.strip()\n result = {\n 'value': Decimal(value),\n 'currency': currency,\n 'change': change,\n 'date': datetime.strptime(date, '%d/%m/%Y').date(),\n 'ISIN': isin\n }\n self.data_.update(result)\n\n\nclass StockPage(SecurityPage):\n def _update_data(self, soup):\n title = soup.find_all('span', class_='securityName')[0].text\n value = soup.find_all('span', id='Col0Price')[0].text\n change = soup.find_all('span', id='Col0PriceDetail')[0].text\n change = change.split('|')[1].strip()\n date = soup.find_all('p', id='Col0PriceTime')[0].text[6:16]\n currency = soup.find_all('p', id='Col0PriceTime')[0].text\n currency = re.search(r'\\|\\s([A-Z]{3,4})\\b', currency).group(1)\n isin = soup.find_all('td', id='Col0Isin')[0].text\n return {\n 'name': title,\n 'value': Decimal(value),\n 'currency': currency,\n 'change': change,\n 'date': datetime.strptime(date, '%d/%m/%Y').date(),\n 'ISIN': isin\n }\n\n\nclass ETFPage(SecurityPage):\n def _update_data(self, soup):\n text = soup.find_all('div', class_='snapshotTitleBox')[0].h1.text\n self.data_[\"name\"] = text.split('|')[0].strip()\n self.data_[\"ticker\"] = text.split('|')[1].strip()\n for keyword in [\"Exchange\", \"ISIN\"]:\n line = soup.find(text=keyword)\n if line is None:\n continue\n text = line.parent.nextSibling.nextSibling.text\n self.data_[keyword] = str(text)\n line = soup.find(text=\"Closing Price\")\n if line is not None:\n self.data_[\"currency\"] = \\\n line.parent.nextSibling.nextSibling.text[:3]\n", "repo_name": "chrpinedo/MorningScraper", "sub_path": "morningscraper/security.py", "file_name": "security.py", "file_ext": "py", "file_size_in_byte": 3748, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.version_info", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 12, "usage_type": "attribute"}, {"api_name": "urllib.urlopen", "line_number": 19, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 47, "usage_type": "attribute"}, {"api_name": "six.add_metaclass", "line_number": 24, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 24, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "re.search", "line_number": 89, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "38091732836", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nEPSILON = 1e-12\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, **kwargs):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return F.relu(x, inplace=True)\n\nclass Fusion(nn.Module):\n def __init__(self, feature_dim):\n super(Fusion, self).__init__()\n\n self.linear = nn.Linear(8*2048, feature_dim, bias=False)\n self.bn = nn.BatchNorm1d(feature_dim)\n self.pool = nn.AdaptiveMaxPool2d(1)\n self.M = 8\n self.attentions = BasicConv2d(2048, self.M, kernel_size=1)\n self.linear.weight.data.normal_(0, 0.001)\n init.normal_(self.bn.weight.data, 1.0, 0.02)\n init.constant_(self.bn.bias.data, 0.0)\n\n def forward(self, feat, feat2):\n feat2_att = self.attentions(feat2)\n\n B, C, H, W = feat.size()\n _, M, AH, AW = feat2_att.size()\n\n x = (torch.einsum('imjk,injk->imn', (feat2_att, feat)) / float(H * W)).view(B, -1)\n x = torch.sign(x) * torch.sqrt(torch.abs(x) + EPSILON)\n x = F.normalize(x, dim=-1)\n x = self.linear(x)\n x = self.bn(x)\n\n return x", "repo_name": "bar371/ReFace", "sub_path": "ReIDModules/AIM_CCReID/models/Fusion.py", "file_name": "Fusion.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.einsum", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.sign", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.functional.normalize", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "13658462749", "text": "'''\nAuthor: Cao Shixin\nDate: 2023-04-06 09:34:01\nLastEditors: Cao Shixin\nLastEditTime: 2023-04-14 16:11:09\nDescription: \n'''\nfrom django.urls import path, re_path\nfrom app2 import views\n\nurlpatterns = [\n path('app2/index/',view=views.index),\n # 动态路由配置(路由参数)\n path('app2/show//',view= views.show),\n # 参数数据类型,int(匹配0和正整数)、str(任意非空字符串,不包含”/“,默认类型)、slug(匹配任何ASCII字符串、连接线和下划线)、uuid(uuid字符串,必须包含“-”,所有字母必须小写)\n path('app2/article//',view=views.show_uuid,name='show_uuid'),\n path('app2/article//',view=views.show_slug,name='show_slug'),\n \n # 灵活性更高的re_path()\n re_path('app2/list/(?P\\d{4})/',view= views.article_list),\n re_path('app2/list/(?P\\d+)&key=(?P\\w+)',view=views.article_page,name='article_page'),\n \n # 反向解析路由,即使用name字段,(项目中最好后面也是name不变,只调整path的route即可)\n # name规则:应用名+配置项名称\n path('app2/url_reverse/',view=views.url_reverse,name='app2_url_reverse'),\n \n path('app2/test_get/',view=views.test_get,name='test_get'),\n path('app2/hello_world/',view=views.hello_world,name='app2_hello_world'),\n \n path('app2/test_post/',view=views.test_post,name='app2_test_post'),\n path('app2/test_response/',view=views.test_response,name='app2_test_response'),\n \n path('app2/test_render/',view=views.test_render,name='app2_test_render'),\n path('app2/test_redirect_model//',view=views.test_redirect_model,name='app2_test_redirect_model'),\n path('app2/userinfo/',view=views.userinfo,name='app2_userinfo'),\n]\n", "repo_name": "KirstenDunst/CSXPythonFile", "sub_path": "Django/myshop-test/app2/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1788, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "app2.views.index", "line_number": 12, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "app2.views.show", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "app2.views.show_uuid", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "app2.views.show_slug", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 20, "usage_type": "call"}, {"api_name": "app2.views.article_list", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 20, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 21, "usage_type": "call"}, {"api_name": "app2.views.article_page", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 21, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "app2.views.url_reverse", "line_number": 25, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "app2.views.test_get", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "app2.views.hello_world", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "app2.views.test_post", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "app2.views.test_response", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 31, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "app2.views.test_render", "line_number": 33, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "app2.views.test_redirect_model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "app2.views.userinfo", "line_number": 35, "usage_type": "attribute"}, {"api_name": "app2.views", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "29442737154", "text": "import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import models\nfrom util import *\n\n\"\"\"\nOur modified resnet18 model without transfer learning\n\"\"\"\nclass ResNet18(nn.Module):\n def __init__(self):\n super(ResNet18, self).__init__()\n self.features = models.resnet18()\n self.features.layer3 = nn.Sequential()\n self.features.layer4 = nn.Sequential()\n self.features.fc = nn.Linear(128, 10)\n self.features.relu = nn.LeakyReLU(inplace=True)\n self.features = self.features\n def forward(self, x):\n x = self.features(x)\n return x\n \n \n\"\"\"\nOur modified resnet18 model with transfer learning.\n\"\"\"\nclass ResNet18_tl(nn.Module):\n def __init__(self):\n super(ResNet18_tl, self).__init__()\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, True)\n self.features = nn.Sequential(\n *list(model_ft.children())[:6]\n )\n self.conv1 = nn.Conv2d(in_channels = 128, out_channels = 256, kernel_size = 2, padding = 1)\n self.conv2 = nn.Conv2d(in_channels = 256, out_channels = 256, kernel_size = 3, padding = 1)\n self.bh1 = nn.BatchNorm2d(num_features = 256)\n self.pool3 = nn.AvgPool2d(kernel_size = (2,2))\n \n self.lin1 = nn.Linear(in_features = 1024, out_features =500)\n self.lin2 = nn.Linear(in_features = 500, out_features = 10)\n\n def forward(self, x):\n x = self.features(x)\n x = self.conv1(x)\n x = self.conv2(x)\n x = F.relu(self.bh1(x))\n x = self.pool3(x)\n x = torch.flatten(x, start_dim = 1)\n x = self.lin1(x)\n x = self.lin2(x) \n return x\n", "repo_name": "michael4706/resnet18_model", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1762, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 15, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torchvision.models.resnet18", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.flatten", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "28756787330", "text": "import openai\r\nimport config\r\nimport typer\r\nfrom rich import print\r\nfrom rich.table import Table\r\n\r\n\r\n\r\ndef main():\r\n openai.api_key = config.api_key\r\n\r\n #la parte que se mostrara primero del programa\r\n print(\"[bold green]ChatGPT con python[/bold green]\")\r\n\r\n #tabla de opciones \r\n table = Table(\"Comando\", \"Descripcion\")\r\n table.add_row(\"break\", \"salir del chat\")\r\n print(table)\r\n \r\n while True:\r\n text = input(\"Introduce algo: \")\r\n \r\n if text == \"break\":\r\n break\r\n \r\n # crear la solicitud de completado utilizando el estado actual del modelo\r\n completion = openai.Completion.create(\r\n engine=\"text-davinci-003\",\r\n prompt=text,\r\n max_tokens=2048\r\n )\r\n \r\n # imprimir la respuesta del modelo de IA\r\n message = completion.choices[0].text\r\n print(message)\r\n\r\n\r\n \r\n \r\nif __name__ == \"__main__\":\r\n typer.run(main)", "repo_name": "eduardoLOEZ/chat-gpt", "sub_path": "gpt.py", "file_name": "gpt.py", "file_ext": "py", "file_size_in_byte": 965, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openai.api_key", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.api_key", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rich.print", "line_number": 13, "usage_type": "call"}, {"api_name": "rich.table.Table", "line_number": 16, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 18, "usage_type": "call"}, {"api_name": "openai.Completion.create", "line_number": 27, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rich.print", "line_number": 35, "usage_type": "call"}, {"api_name": "typer.run", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "32688636501", "text": "from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Requests the raw HTML from the site\nwebpage = requests.get('https://content.codecademy.com/courses/beautifulsoup/cacao/index.html')\n\n\n# Creates BeautifulSoup object that will traverse the HTML\nsoup = BeautifulSoup(webpage.content, \"html.parser\")\n\n# -- START Making a historgram of the ratings data--\n# Pull all ratings into a list\nratings = soup.find_all(attrs = {\n \"class\" : \"Rating\"\n})\n\n# Pull all ratings into a list\nratings_list = []\n\n#append to ratings_list each rating from ratings and convert the values to float and start at element 1 of the list since it has the word \"Rating\"\nfor rating in ratings[1:]:\n ratings_list.append(float(rating.get_text()))\n\n# print(ratings_list)\n# print(ratings)\n\n#use Matplotlib to create a histogram of ratings_list\nplt.hist(ratings_list)\nplt.show()\n\n# END -- Making a historgram of the ratings data--\n\n#Find top 10 highest rated cholatier companies\n# alt method:\n# company_tags = soup.find_all(attrs = {\n# \"class\": \"Company\"\n# })\n\ncompany_tags = soup.select(\".Company\")\ncompany_names = []\nfor company in company_tags[1:]:\n company_names.append(company.get_text())\n\n# print(company_tags)\n# print(company_names)\n\n# #creates dictionary \n# company_rating = {\"Company\": company_names, \"Rating\": ratings_list}\n# #creates dataframe \n# company_rating_df = pd.DataFrame.from_dict(company_rating)\n# # print(company_rating_df)\n\n# #creates dataframe of top 10\n# mean_ratings = company_rating_df.groupby(\"Company\").Rating.mean()\n# top_ten = mean_ratings.nlargest(10)\n# # print(top_ten)\n\n#scrape cocoa perecent ratings\ncocoa_percent_tags = soup.select(\".CocoaPercent\")\ncocoa_percents = []\n\nfor cocoa_percentage in cocoa_percent_tags[1:]:\n cocoa_percents.append(float(cocoa_percentage.get_text().strip(\"%\")))\n# print(cocoa_percents)\n\n#create add columns to dataframe\ncompany_rating = {\"Company\": company_names, \"Rating\": ratings_list, \"CocoaPercentage\": cocoa_percents}\ncompany_rating_df = pd.DataFrame.from_dict(company_rating)\n# print(company_rating_df)\n\n#make scatterplot of ratings vs cocoa percentage\nplt.scatter(company_rating_df.CocoaPercentage, company_rating_df.Rating)\n\nz = np.polyfit(company_rating_df.CocoaPercentage, company_rating_df.Rating, 1)\nline_function = np.poly1d(z)\nplt.plot(company_rating_df.CocoaPercentage, line_function(company_rating_df.CocoaPercentage), \"r--\")\n\nplt.show()\nplt.clf()\n", "repo_name": "ughdeeb/beautifulsoup-codecademy-project", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2465, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 71, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "880290250", "text": "import requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse\nfrom urllib3.exceptions import InsecureRequestWarning\n\n# Disable insecure request warnings\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass Request:\n\n # Constructor of Request\n def __init__(self, url, head):\n self.url = url\n self.header = head\n self.content = None\n self.status_code = None\n self.reason = ''\n\n # Function that obtains html code from url - Request\n def getContent(self, printer):\n try:\n target = checkUrl(self.url)\n req = requests.get(target, headers=self.header, timeout=2, verify=False,)\n self.status_code = req.status_code\n self.reason = req.reason\n if req.status_code == 200:\n html = req.content\n soup = BeautifulSoup(html,'lxml')\n self.content = soup\n except Exception:\n printer.messageError('[ERROR] Domain not found --> {}'.format(target))\n\n # Function that obtains the content of image to download\n def getImage(self, printer):\n try:\n target = checkUrl(self.url)\n req = requests.get(target, headers=self.header, verify=False)\n self.status_code = req.status_code\n self.reason = req.reason\n if req.status_code == 200:\n self.content = req.content\n except Exception:\n printer.check_status_code(target, req.status_code, req.reason)\n\n # Function that obtains the content of file to download\n def getFile(self, printer):\n try:\n target = checkUrl(self.url)\n req = requests.get(target, headers=self.header, verify=False, stream=True)\n self.status_code = req.status_code\n self.reason = req.reason\n if req.status_code == 200:\n self.content = req\n except Exception:\n printer.check_status_code(target, req.status_code, req.reason)\n\n # Function that obtains links to pages to be scanned\n def getLinksHrefs(self):\n links = list()\n if self.content != None:\n elements = self.content.findAll('a')\n for a in elements:\n href = a.get('href')\n links.append(href)\n return links\n\n # Function that obtains links to img to be downloaded\n def getLinksImg(self):\n links = list()\n if self.content != None:\n elements = self.content.findAll('img')\n for img in elements:\n src = img.get('src')\n links.append(src)\n return links\n\n# Function that checks the syntax 'http' of a url [origin]\ndef checkUrl(origin):\n parser = urlparse(origin)\n if parser.scheme == '':\n url = 'http://'+parser.path\n return url\n else:\n return origin\n", "repo_name": "goldcod3/Arachnida", "sub_path": "spider/utils/Request.py", "file_name": "Request.py", "file_ext": "py", "file_size_in_byte": 2881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.packages.urllib3.disable_warnings", "line_number": 7, "usage_type": "call"}, {"api_name": "urllib3.exceptions.InsecureRequestWarning", "line_number": 7, "usage_type": "argument"}, {"api_name": "requests.packages", "line_number": 7, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "94492834", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport logging\nimport argparse\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nfrom collections import OrderedDict\nfrom matplotlib import pyplot as plt\n\nLOG = logging.getLogger(__name__)\n\n__version__ = \"1.0.0\"\n__author__ = (\"Xingguo Zhang\",)\n__email__ = \"invicoun@foxmail.com\"\n__all__ = []\n\n\ndef read_tsv(file):\n\n for line in open(file):\n line = line.strip()\n\n if not line or line.startswith(\"#\"):\n continue\n line = line.replace(\",\", \"\")\n\n yield line.split(\"\\t\")\n\n\ndef deal_with(files, types=\"all\", model=\"length\"):\n\n r = {}\n if types==\"all\":\n types = [\"MITE\", \"LTR\", \"SINE\", \"TER\", \"HELITRON\"]\n\n if model== \"length\":\n model = 4\n elif model== \"number\":\n model = 2\n else:\n model = -1\n\n for file in files:\n for line in read_tsv(file):\n if line[0] not in r:\n r[line[0]] = [float(line[5]), []]\n if line[1] not in types:\n continue\n r[line[0]][1].append((line[1], float(line[model])))\n\n pr = OrderedDict()\n sample = []\n for name, values in sorted(r.items(), key=lambda d:d[1][0], reverse=True):\n sample.append(name.replace(\"_\", \" \").capitalize())\n for types, dv in values[1]:\n if types not in pr:\n pr[types] = []\n pr[types].append(dv)\n return sample, pr\n\n\ndef plot_transposon(files, types=\"all\", model=\"length\"):\n\n sample, pr = deal_with(files, types, model)\n if model == \"length\":\n yl = \"Transposon length(pb)\"\n elif model == \"number\":\n yl = \"Transposon number\"\n else:\n yl = \"% transposon\"\n #PCOLOR = [\"#E6FA3C\", \"#E6AD67\", \"#FA2883\", \"#524CE6\", \"#6AFFE0\"]\n #PCOLOR = [\"#8975FA\", \"#3BA5F0\", \"#91FAD7\", \"#83E673\", \"#FFFA4D\"]\n #PCOLOR = [\"#FA2843\", \"#62F0A3\", \"#FA7D5A\", \"#5AEAE3\", \"#FF8619\"]\n #PCOLOR = [\"#0780cf\", \"#765005\", \"#fa6d1d\", \"#0e2c82\", \"#b6b51f\"]\n PCOLOR = [\"#015699\", \"#FAC00F\", \"#F3764A\", \"#5FC6C9\", \"#4F596D\"]\n\n fig = plt.figure(figsize=[10,8.5])\n ax = fig.add_subplot(1,1,1,) \n \n ax.spines['top'].set_visible(False)#去掉上边边框\n ax.spines['bottom'].set_visible(False)#去掉下方边边框\n ax.spines['right'].set_visible(False)#去掉右边边框\n ax.spines['left'].set_visible(False)#去掉左边边框\n ax.grid(True, 'major', 'y', ls='--', lw=.5, c='black', alpha=.3)\n ax.xaxis.set_major_formatter(plt.FuncFormatter(''.format)) #X轴不显示刻度\n ax.xaxis.set_minor_formatter(plt.FuncFormatter(''.format))\n\n plt.tick_params(bottom=False, top=False, left=False, right=False, labelsize=12)\n plt.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.25)\n n = 0\n x = np.array(sample)\n\n for i in pr:\n y = np.array(pr[i])\n if n == 0:\n ax.bar(x, y, label=i, color=PCOLOR[n])\n h = y\n else:\n ax.bar(x, y, bottom=h, label=i, color=PCOLOR[n])\n h = h+y\n n += 1\n\n for a,b in zip(x, sample):\n ax.text(a, -2, b, ha='right', va='top', fontsize=12, rotation=45)\n\n plt.legend(loc=\"upper right\", frameon=False)\n plt.ylabel(yl, fontsize=14)\n plt.savefig(\"%s_transposon.png\" % types, dpi=500)\n plt.savefig(\"%s_transposon.pdf\" % types)\n\n return 0\n\n\ndef add_args(parser):\n\n parser.add_argument(\"input\", nargs='+', metavar='FILE', type=str,\n help='Input the statistical results of each sample transposon, stat_transposon.tsv.')\n parser.add_argument('-t', '--types', choices=[\"MITE\", \"LTR\", \"SINE\", \"TER\", \"HELITRON\", \"all\"], default=\"all\",\n help='Set the type of transposon displayed, default=all.')\n parser.add_argument('-m', '--model', choices=[\"length\", \"number\", \"percentage\"], default=\"length\",\n help='Set the type of data displayed, default=length.')\n\n return parser\n\n\ndef main():\n\n logging.basicConfig(\n stream=sys.stderr,\n level=logging.INFO,\n format=\"[%(levelname)s] %(message)s\"\n )\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nname:\n plot_transposon.py: Draw a picture of the transposon\nattention:\n plot_transposon.py *.stat_transposon.tsv\nversion: %s\ncontact: %s <%s>\\\n \"\"\" % (__version__, \" \".join(__author__), __email__))\n\n parser = add_args(parser)\n args = parser.parse_args()\n\n plot_transposon(args.input, args.types, args.model)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "zxgsy520/grandTE", "sub_path": "scripts/plot_transposon.py", "file_name": "plot_transposon.py", "file_ext": "py", "file_size_in_byte": 4547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.FuncFormatter", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.FuncFormatter", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 135, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 136, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 140, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "5041397542", "text": "import data_handlers\nimport cnn_model\nimport torch\nimport torch.nn as nn\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport time\nfrom scipy.special import softmax\nimport os\nslide = 120\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = torch.load(\"model.pt\").to(device)\nmodel.eval()\n\ndef find_bobber(img, slide = 120):\n result = np.zeros((((img.shape[0] - 150) // slide + 1), ((img.shape[1] - 150) // slide + 1)))\n for i in range(0, img.shape[0] - 150, slide):\n batch = np.zeros((((img.shape[1] - 150) // slide + 1), 3, 150, 150))\n for j in range(0, img.shape[1] - 150, slide):\n window = img[i : 150 + i, j : 150 + j]\n batch[j // slide, 0] = window[:,:,0]\n batch[j // slide, 1] = window[:,:,1]\n batch[j // slide, 2] = window[:,:,2]\n out = model(torch.Tensor(batch).float().to(device))\n temp = out.cpu().detach().numpy()\n result[i // slide] = softmax(temp, axis = 1)[:, 0]\n return result\n\ndef find_clicking_point(img, base_size = 25, starti = None, startj = None, endi = None, endj = None):\n if starti == None:\n starti, startj = 0,0\n endi, endj = img.shape[:2]\n if endi - starti < base_size and endj - startj < base_size:\n return starti, endi, startj, endj\n img2 = np.zeros(img.shape)\n nninput = np.empty((1, 3, 150, 150))\n while base_size < endi - starti or base_size < endj - startj:\n if endj - startj == endi - starti:\n midi = (endi + starti) // 2\n midj = (endj + startj) // 2\n tests = [(starti, midi, startj, endj), (midi, endi, startj, endj), (starti, endi, startj, midj), (starti, endi, midj, endj), ((starti + endi) // 4, (starti + endi) // 4 + (endi - starti) // 2, startj, endj)]\n elif endj - startj > endi - starti:\n width = endi - starti\n tests = [(starti, endi, startj, startj + width), (starti, endi, endj - width, endj), (starti, endi, (startj + endj) // 4, (startj + endj) // 4 + width)]\n else:\n width = endj - startj\n tests = [(starti, starti + width, startj, endj), (endi - width, endi, startj, endj), ((starti + endi) // 4, (starti + endi) // 4 + width, startj, endj)]\n grades = []\n for i1, i2, j1, j2 in tests:\n img2[i1:i2, j1:j2] = img[i1:i2, j1:j2]\n nninput[0, 0] = img2[:,:,0]\n nninput[0, 1] = img2[:,:,1]\n nninput[0, 2] = img2[:,:,2]\n out = model(torch.Tensor(nninput).float().to(device))\n temp = out.cpu().detach().numpy()[0]\n grades.append(softmax(temp)[0])\n img2[i1:i2, j1:j2] = 0\n starti, endi, startj, endj = tests[np.argmax(grades)]\n img2[starti:endi,startj:endj] = img[starti:endi,startj:endj]\n return (starti + endi) // 2, (startj + endj) // 2\n \n \n\ndef find_bobber_pos(img):\n result = find_bobber(img, 120)\n f = np.argmax(result)\n columns = ((img.shape[1] - 150) // slide + 1)\n i,j = f // columns, f % columns\n i1, j1 = find_clicking_point(img[i * 120 : i * 120 + 150, j * 120 : j * 120 + 150])\n return i * 120 + i1, j * 120 + j1\n\n \nif __name__ == \"__main__\":\n count = 0\n for i in os.listdir(\"images\"):\n count += 1\n img = plt.imread(\"images/{}\".format(i))[:,:,:3]\n plt.imshow(img)\n plt.show()\n result = find_bobber(img, 120)\n plt.imshow(result, cmap='gray')\n plt.show()\n f = np.argmax(result)\n columns = ((img.shape[1] - 150) // slide + 1)\n i,j = f // columns, f % columns\n find_clicking_point(img[i * 120 : i * 120 + 150, j * 120 : j * 120 + 150])\n #plt.imshow(img[i * 120 : i * 120 + 150, j * 120 : j * 120 + 150])\n #plt.show()\n\n \n", "repo_name": "Dspil/fishing_bot", "sub_path": "find_bobber.py", "file_name": "find_bobber.py", "file_ext": "py", "file_size_in_byte": 3769, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 25, "usage_type": "call"}, {"api_name": "scipy.special.softmax", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.special.softmax", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 67, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "1483964003", "text": "from flask_restful import Resource\nfrom flask_security import current_user\nfrom ..utils.response_format import build_data_response\n\nclass UserGetView(Resource):\n def get(self):\n if current_user.is_anonymous:\n response = build_data_response({\"user_id\": None, \"username\": None, \"email\": None}, 200)\n else:\n response = build_data_response({\n \"user_id\": current_user.id,\n \"username\": current_user.username,\n \"email\": current_user.email,\n },\n 200,)\n return response\n\n", "repo_name": "xemedo/flask-angular-security", "sub_path": "app/views/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 575, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask_restful.Resource", "line_number": 5, "usage_type": "name"}, {"api_name": "flask_security.current_user.is_anonymous", "line_number": 7, "usage_type": "attribute"}, {"api_name": "flask_security.current_user", "line_number": 7, "usage_type": "name"}, {"api_name": "utils.response_format.build_data_response", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.response_format.build_data_response", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_security.current_user.id", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask_security.current_user", "line_number": 11, "usage_type": "name"}, {"api_name": "flask_security.current_user.username", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask_security.current_user", "line_number": 12, "usage_type": "name"}, {"api_name": "flask_security.current_user.email", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask_security.current_user", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "9622585813", "text": "\"\"\"Search related views.\"\"\"\nfrom django import http\nfrom django import views\n\nfrom core.search import search_api\nfrom core import data_api\n\n\nclass SearchView(views.View):\n \"\"\"Searches for works similar to a given set.\"\"\"\n\n MAX_QUERY_WORKS = 200\n\n def get(self, request):\n work_info_dicts = []\n work_ids = self._extract_work_ids(request)\n if work_ids:\n similar_work_ids, trope_id_to_weight = (\n search_api.get_similar_books(\n work_ids))\n if similar_work_ids:\n work_info_dicts = data_api.get_work_info_dicts_by_id(\n similar_work_ids,\n allowed_trope_id_to_weight=trope_id_to_weight)\n return http.JsonResponse(\n {'results': work_info_dicts})\n\n def _extract_work_ids(self, request):\n \"\"\"Turns query parameter into a set of work ids.\"\"\"\n work_ids = set([])\n if request.GET.getlist('works'):\n work_ids = work_ids.union(set(\n data_api.get_work_ids_by_name(\n request.GET.getlist('works'))))\n query = request.GET.get('query')\n if query:\n work_id = search_api.get_work_id_for_search_query(query)\n if work_id is not None:\n work_ids.add(work_id)\n return set(list(work_ids)[:SearchView.MAX_QUERY_WORKS])\n\n\nclass AutocompleteView(views.View):\n \"\"\"Autocompleter for work names.\"\"\"\n\n def get(self, request):\n query = request.GET.get('query')\n if query:\n suggestions = search_api.get_autocomplete_suggestions(query)\n else:\n suggestions = []\n return http.JsonResponse(\n {'suggestions': suggestions})\n", "repo_name": "romack77/bookslikethis", "sub_path": "bookslikethis/core/views/search_views.py", "file_name": "search_views.py", "file_ext": "py", "file_size_in_byte": 1732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.View", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 9, "usage_type": "name"}, {"api_name": "core.search.search_api.get_similar_books", "line_number": 19, "usage_type": "call"}, {"api_name": "core.search.search_api", "line_number": 19, "usage_type": "name"}, {"api_name": "core.data_api.get_work_info_dicts_by_id", "line_number": 22, "usage_type": "call"}, {"api_name": "core.data_api", "line_number": 22, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http", "line_number": 25, "usage_type": "name"}, {"api_name": "core.data_api.get_work_ids_by_name", "line_number": 33, "usage_type": "call"}, {"api_name": "core.data_api", "line_number": 33, "usage_type": "name"}, {"api_name": "core.search.search_api.get_work_id_for_search_query", "line_number": 37, "usage_type": "call"}, {"api_name": "core.search.search_api", "line_number": 37, "usage_type": "name"}, {"api_name": "django.views.View", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.views", "line_number": 43, "usage_type": "name"}, {"api_name": "core.search.search_api.get_autocomplete_suggestions", "line_number": 49, "usage_type": "call"}, {"api_name": "core.search.search_api", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "7315827594", "text": "import requests\nfrom ratelimit import limits, sleep_and_retry\nfrom bs4 import BeautifulSoup\nfrom retry import retry\nimport urllib.parse\n \nREQUEST_WAIT_SECONDS = 10\n\n@retry(requests.exceptions.RequestException, tries=3) # robustness\ndef get_page(link):\n \"\"\"Get HTML page at given link.\n Raises PageDoesNotExistError if it doesn't exist.\n Raises generic Exception if unexpected HTTP codes are encountered.\"\"\"\n response = requests.get(link)\n if response.status_code == 200:\n return response\n if response.status_code == 404:\n raise PageDoesNotExistError(\"{} does not exist\".format(link))\n raise Exception(f\"Failed to get page {link}, got HTTP status code {response.status_code}.\")\n\nclass GoogleLyricsScraper:\n @sleep_and_retry\n @limits(calls=1, period=REQUEST_WAIT_SECONDS)\n def scrape_lyrics(self, artist, name):\n pg = get_page(\"https://www.google.com/search?q=\"\n + urllib.parse.quote(\" \".join((artist, name, \"lyrics\"))))\n soup = BeautifulSoup(pg.content, 'html.parser')\n lyric_blocks = []\n is_first = True\n for e in soup.select(\"div.BNeawe.tAd8D.AP7Wnd\"):\n # Lyrics appear to be nested in 2 layers of divs with\n # these class tags. And they're repeated, for some reason.\n for lyric_e in e.select(\"div.BNeawe.tAd8D.AP7Wnd\"):\n if is_first:\n is_first = False\n else:\n lyric_blocks.append(lyric_e.get_text())\n if not lyric_blocks:\n raise LyricsNotFoundError(\"Couldn't find lyrics for song {}, by {}\".format(name, artist))\n return \"\\n\".join(lyric_blocks)\n\nclass GeniusScraper:\n def scrape(self, link):\n pg = get_page(link)\n soup = BeautifulSoup(pg.content, 'html.parser')\n divs = soup.select(\"div.lyrics\")\n if not divs:\n raise LyricsNotFoundError(\"Couldn't find lyrics div on genius.com page.\")\n return divs[0].get_text()\n\nclass Top40dbScraper:\n def scrape(self, link):\n pg = get_page(link)\n soup = BeautifulSoup(pg.content, 'html.parser')\n for e in soup.find_all([\"small\", \"script\"]):\n e.decompose() # remove the log-in message and ad shit\n divs = soup.select(\"div#divTOP40DB_LYRICS\")\n if not divs:\n raise LyricsNotFoundError(\"Couldn't find lyrics div on top40db.net page.\")\n return divs[0].get_text()\n\nclass LyricsFreakScraper:\n def scrape(self, link):\n pg = get_page(link)\n soup = BeautifulSoup(pg.content, 'html.parser')\n divs = soup.select(\"div#content\")\n if not divs:\n raise LyricsNotFoundError(\"Couldn't find lyrics div on lyricsfreak.com page.\")\n return divs[0].get_text()\n\nclass AzLyricsScraper:\n @sleep_and_retry\n @limits(calls=1, period=REQUEST_WAIT_SECONDS)\n def scrape_lyrics(self, song_link):\n \"\"\"Get lyrics from azlyrics.com at the given link.\n\n Raises PageDoesNotExist exception if the page doesn't exist.\n \"\"\"\n lyrics_page = get_page(song_link)\n soup = BeautifulSoup(lyrics_page.content, 'html.parser')\n lyrics_div = soup.find(\"div\", class_=\"ringtone\").find_next_sibling(\"div\")\n return lyrics_div.text.strip()\n\nclass GenericLyricsScraper:\n def __init__(self):\n \"\"\"Unlike the Google scraper, doesn't scrape directly from\n the lyrics displayed by Google. Looks through search results\n and matches them with the appropriate scraper. Eg if there's\n a link for genius.com, passes that to the genius.com scraper.\"\"\"\n self._scrapers = [\n (\"genius.com\", GeniusScraper()),\n (\"top40db.net\", Top40dbScraper()),\n (\"lyricsfreak.com\", LyricsFreakScraper()),\n (\"azlyrics.com\", AzLyricsScraper())\n ]\n\n @sleep_and_retry\n @limits(calls=1, period=REQUEST_WAIT_SECONDS)\n def scrape_lyrics(self, artist, name):\n pg = get_page(\"https://www.google.com/search?q=\"\n + urllib.parse.quote(\" \".join((artist, name, \"lyrics\"))))\n soup = BeautifulSoup(pg.content, 'html.parser')\n for element in soup.select(\"a\"):\n print(\"========TRYING LINK=========\")\n link = element.get(\"href\")\n try:\n print(\"Processing link:\", link)\n # The parent div should contain the title of the page -- which, for\n # all of the sites we're using, should in turn contain the name of\n # the song.\n if not fuzzy_matches(name, element.parent.get_text()):\n # skip this link to ensure that we're not just getting the lyrics of\n # a random song by the same artist. Don't bother to check that the\n # artist is the same; in all likelihood, if the artist is\n # different, it's a different version of the same song.\n print(\"song name does not match:\", element.parent.get_text())\n continue\n for site_name, scraper in self._scrapers:\n if site_name in link:\n print(\"Matches with scraper for\", site_name)\n if link.startswith(\"/\"):\n link = \"https://www.google.com\" + link\n print(\"Trying to scrape from link\", link)\n return scraper.scrape(link)\n except Exception as e:\n continue # try the next link\n raise LyricsNotFoundError(\"Could not scrape lyrics from any website in first page of results.\")\n\ndef fuzzy_matches(important_s, s):\n important_tokens = set([token.lower() for token in important_s.strip().split()])\n tokens = set([token.lower() for token in s.strip().split()])\n # threshold chosen arbitrarily, pretty much\n return len(important_tokens.intersection(tokens)) / len(important_tokens) >= 0.49\n\nclass LyricsNotFoundError(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n\nclass PageDoesNotExistError(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n", "repo_name": "Kevinpgalligan/BeatlesAndYou", "sub_path": "scraping.py", "file_name": "scraping.py", "file_ext": "py", "file_size_in_byte": 6133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "retry.retry", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 9, "usage_type": "attribute"}, {"api_name": "urllib.parse.parse.quote", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 26, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 26, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}, {"api_name": "ratelimit.sleep_and_retry", "line_number": 22, "usage_type": "name"}, {"api_name": "ratelimit.limits", "line_number": 23, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 54, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 65, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 80, "usage_type": "call"}, {"api_name": "ratelimit.sleep_and_retry", "line_number": 72, "usage_type": "name"}, {"api_name": "ratelimit.limits", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.parse.parse.quote", "line_number": 101, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 101, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 101, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 102, "usage_type": "call"}, {"api_name": "ratelimit.sleep_and_retry", "line_number": 97, "usage_type": "name"}, {"api_name": "ratelimit.limits", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "13218306209", "text": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Article, Person\nfrom . import forms\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import User\nfrom django.core.paginator import Paginator\n\n\ndef articles_list(request):\n articles = Article.nodes.order_by('-date').all()\n article_list = []\n for article in articles:\n author = article.author.single()\n article_data = {\n 'title': article.title,\n 'body': article.body,\n 'slug': article.slug,\n 'date': article.date,\n 'author': author.name,\n 'snippet': article.snippet()\n }\n article_list.append(article_data)\n args = {'articles': article_list}\n return render(request, 'articles/articleslist.html', args)\n\n\ndef article_detail(request, slug):\n # return HttpResponse(slug)\n article = Article.nodes.get(slug=slug)\n author = article.author.single()\n return render(request, 'articles/article_detail.html', {'article': article, 'author': author})\n\n\n@login_required(login_url='/accounts/login')\ndef create_article(request):\n if request.method == 'POST':\n form = forms.CreateArticle(request.POST, request.FILES)\n if form.is_valid:\n instance = form.save()\n account = Person.nodes.get(name=request.user.username)\n instance.author.connect(account)\n instance.save()\n return redirect('articles:list')\n else:\n form = forms.CreateArticle()\n return render(request, 'articles/create_article.html', {'form': form})\n", "repo_name": "mamtech-A/djangoBlogneo", "sub_path": "articles/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Article.nodes.order_by", "line_number": 10, "usage_type": "call"}, {"api_name": "models.Article.nodes", "line_number": 10, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Article.nodes.get", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Article.nodes", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Person.nodes.get", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Person.nodes", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.Person", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "11719435611", "text": "import numpy as np\n\nfrom problems.toy_biobjective import circle_points, concave_fun_eval, create_pf\nfrom solvers import epo_search, pareto_mtl_search, linscalar, moo_mtl_search\n\nimport matplotlib.pyplot as plt\nfrom latex_utils import latexify\n\nif __name__ == '__main__':\n K = 4 # Number of trajectories\n n = 20 # dim of solution space\n m = 2 # dim of objective space\n rs = circle_points(K) # preference\n\n pmtl_K = 5\n pmtl_refs = circle_points(pmtl_K, 0, np.pi / 2)\n methods = ['EPO', 'PMTL', 'MOOMTL', 'LinScalar']\n latexify(fig_width=2., fig_height=1.55)\n ss, mi = 0.1, 100\n pf = create_pf()\n for method in methods:\n fig, ax = plt.subplots()\n fig.subplots_adjust(left=.12, bottom=.12, right=.97, top=.97)\n ax.plot(pf[:, 0], pf[:, 1], lw=2, c='k', label='Pareto Front')\n last_ls = []\n for k, r in enumerate(rs):\n r_inv = 1. / r\n ep_ray = 1.1 * r_inv / np.linalg.norm(r_inv)\n ep_ray_line = np.stack([np.zeros(m), ep_ray])\n label = r'$r^{-1}$ ray' if k == 0 else ''\n ax.plot(ep_ray_line[:, 0], ep_ray_line[:, 1], color='k',\n lw=1, ls='--', dashes=(15, 5), label=label)\n ax.arrow(.95 * ep_ray[0], .95 * ep_ray[1],\n .05 * ep_ray[0], .05 * ep_ray[1],\n color='k', lw=1, head_width=.02)\n # x0 = np.random.randn(n) * 0.4\n x0 = np.zeros(n)\n x0[range(0, n, 2)] = 0.3\n x0[range(1, n, 2)] = -.3\n x0 += 0.1 * np.random.randn(n)\n x0 = np.random.uniform(-0.6, 0.6, n) if method in [\"MOOMTL\", \"LinScalar\"] else x0\n if method == 'EPO':\n _, res = epo_search(concave_fun_eval, r=r, x=x0,\n step_size=ss, max_iters=100)\n if method == 'PMTL':\n _, res = pareto_mtl_search(concave_fun_eval,\n ref_vecs=pmtl_refs, r=r_inv, x=x0,\n step_size=0.2, max_iters=150)\n if method == 'LinScalar':\n _, res = linscalar(concave_fun_eval, r=r, x=x0,\n step_size=ss, max_iters=mi)\n if method == 'MOOMTL':\n _, res = moo_mtl_search(concave_fun_eval, x=x0,\n step_size=0.2, max_iters=150)\n last_ls.append(res['ls'][-1])\n last_ls = np.stack(last_ls)\n ax.scatter(last_ls[:, 0], last_ls[:, 1], s=40, c='b', alpha=1)\n ax.set_xlabel(r'$l_1$')\n ax.set_ylabel(r'$l_2$')\n ax.xaxis.set_label_coords(1.015, -0.03)\n ax.yaxis.set_label_coords(-0.01, 1.01)\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n fig.savefig('figures/' + method + '.pdf')\n\n plt.show()\n", "repo_name": "dbmptr/EPOSearch", "sub_path": "toy_experiments/compare_solvers.py", "file_name": "compare_solvers.py", "file_ext": "py", "file_size_in_byte": 2878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "52", "api": [{"api_name": "problems.toy_biobjective.circle_points", "line_number": 13, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.circle_points", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 16, "usage_type": "attribute"}, {"api_name": "latex_utils.latexify", "line_number": 18, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.create_pf", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "solvers.epo_search", "line_number": 43, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.concave_fun_eval", "line_number": 43, "usage_type": "argument"}, {"api_name": "solvers.pareto_mtl_search", "line_number": 46, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.concave_fun_eval", "line_number": 46, "usage_type": "argument"}, {"api_name": "solvers.linscalar", "line_number": 50, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.concave_fun_eval", "line_number": 50, "usage_type": "argument"}, {"api_name": "solvers.moo_mtl_search", "line_number": 53, "usage_type": "call"}, {"api_name": "problems.toy_biobjective.concave_fun_eval", "line_number": 53, "usage_type": "argument"}, {"api_name": "numpy.stack", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "27214941116", "text": "import pymysql\nimport threading\nimport configparser\nimport sys\nimport os\nimport os.path\n\ndef runOnWorker(worker, keyPath, command):\n print(\"########\"+worker['ipAddress']+\"########\")\n os.system(\"ssh -oStrictHostKeyChecking=no -i \\\"\"+keyPath+\"\\\" ubuntu@\"+worker['ipAddress']+\" '\"+command+\"'\")\n print(\"########\"+worker['ipAddress']+\"########\")\n\nparser = configparser.ConfigParser()\nparser.read(\"../halite.ini\")\nDB_CONFIG = parser[\"database\"]\n\nkeyPath = os.path.join(\"../\", parser[\"aws\"][\"keyfilepath\"])\n\ndb = pymysql.connect(host=DB_CONFIG[\"hostname\"], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)\ncursor = db.cursor()\n\ncursor.execute(\"select * from Worker\")\nworkers = cursor.fetchall()\n\ncommand = sys.argv[1]\n\nisAsync = False if len(sys.argv) < 3 else int(sys.argv[2]) == 1\n\nif isAsync:\n threads = []\n for worker in workers:\n t = threading.Thread(target=runOnWorker, args = (worker, keyPath, command))\n t.daemon = True\n t.start()\n threads.append(t)\n\n for t in threads:\n t.join()\nelse:\n for worker in workers:\n runOnWorker(worker, keyPath, command)\n", "repo_name": "HaliteChallenge/Halite", "sub_path": "admin/commandRunner.py", "file_name": "commandRunner.py", "file_ext": "py", "file_size_in_byte": 1184, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 188, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.system", "line_number": 10, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pymysql.connect", "line_number": 19, "usage_type": "call"}, {"api_name": "pymysql.cursors", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 27, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "13995086662", "text": "import json\nfrom path import Path as path\nimport pandas as pd\nfrom collections import defaultdict, Counter\n\ndef convert_fname(fname):\n output = fname\n output = output.replace(\":\",\"\")\n output = output.replace(\"_\",\"__\")\n output = output.replace(\"/\",\"_\")\n output = output.replace(\"\\\\\",\"_\")\n output = str(path(output).stem) + \".jpg\"\n return output\n\ndef txt_to_list(fname):\n with open(fname,'r', encoding='utf-8') as f:\n return f.read().split(\"\\n\")\n \ndef list_to_txt(mylist, fname):\n with open(fname,'w', encoding='utf-8') as f:\n f.write(\"\\n\".join(mylist))\n \ndef read_json(jsonfile):\n with open(jsonfile, 'r', encoding='utf-8') as f:\n return json.loads(f.read())\n \ndef write_json(h, jsonfile):\n with open(jsonfile,'w', encoding='utf-8') as f:\n f.write(json.dumps(h))\n \ndef folder_report(folder, prefix, n):\n counts = defaultdict(int)\n files = [f for f in path(folder).files() if f.name.startswith(prefix)]\n print(len(files))\n for f in files:\n key = f.split(\"_\")[n]\n counts[key] += 1\n keys = list(counts)\n values = [counts[k] for k in keys]\n df = pd.DataFrame()\n df['keys'] = keys\n df['values'] = values\n df.sort_values('values', axis=0, ascending= False, inplace = True)\n return df\n\ndef find_and_add_substrings(s, t):\n result = []\n start = 0\n while True:\n index = s.find(t, start)\n if index == -1:\n break\n result.append(s[:index + len(t)])\n start = index + 1\n return result\n\ndef get_most_common_folders(file_paths, N):\n folder_counts = Counter()\n \n # Count occurrences of each folder\n for file_path in file_paths:\n for folder in find_and_add_substrings(file_path, \"/\"):\n folder_counts[folder] += 1\n \n # Sort folders by counts in descending order\n sorted_folders = sorted(folder_counts.items(), key=lambda x: x[1], reverse=True)\n \n # Select top N folders\n most_common_folders = sorted_folders[:N]\n \n return most_common_folders", "repo_name": "xjdeng/sd_tools", "sub_path": "filetools.py", "file_name": "filetools.py", "file_ext": "py", "file_size_in_byte": 2049, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "path.Path", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 32, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 40, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "74187643354", "text": "import csv\nimport schedule\nimport time\n\n# def record(name, age):\n# with open('catalog.csv', 'a+') as f:\n# writer = csv.writer(f, delimiter='/')\n# writer.writerow((name, age))\n# con = input('con? ')\n# while con == 'yes':\n# record(input(), int(input()))\n\n\n\ndef write_csv():\n name = input('Enter name: ')\n age = input('Enter age: ')\n with open('users.csv', 'a') as f:\n writer = csv.writer(f, delimiter='/')\n writer.writerow(\n (name, age)\n )\n answ = input('Continue? y or n : ')\n if answ == 'y':\n write_csv()\n else:\n print('stop!')\n\nwrite_csv()\n\ndef mailing():\n with open('users.csv', 'r') as f:\n data = f.readlines()\n names = [i.replace('\\n','') for i in data]\n for i in names:\n name = i.split('/')\n if int(name[-1]) >= 18:\n print(f'Скидки сегодня! {name[0]}')\n\nschedule.every(3).second.do(mailing)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n\n", "repo_name": "dasmatoxx/schedule_profect", "sub_path": "git.py", "file_name": "git.py", "file_ext": "py", "file_size_in_byte": 1014, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "csv.writer", "line_number": 19, "usage_type": "call"}, {"api_name": "schedule.every", "line_number": 40, "usage_type": "call"}, {"api_name": "schedule.run_pending", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "27325634607", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 29 18:33:29 2021\n\n@author: cijzendoornvan\n\"\"\"\n\n######################\n# PACKAGES\n######################\nimport yaml\nimport os\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport matplotlib.gridspec as gridspec\nfrom JAT.Jarkus_Analysis_Toolbox import Transects, Extraction\n\n######################\n# LOAD SETTINGS\n######################\nconfig = yaml.safe_load(open(r\"C:\\Users\\cijzendoornvan\\OneDrive - Delft University of Technology\\Documents\\DuneForce\\JARKUS\\JAT\\Examples\\03_extract_all\\jarkus_03.yml\"))\nlocation_filter = yaml.safe_load(open(config['inputdir'] + config['data locations']['LocFilter']))\nplot_titles = yaml.safe_load(open(config['inputdir'] + config['data locations']['Titles'])) \n \nDirFigures = config['outputdir'] + config['save locations']['DirFig']\nif os.path.isdir(DirFigures) == False:\n os.mkdir(DirFigures)\n\n# Load jarkus dataset\ndata = Transects(config)\nconversion_alongshore2ids, conversion_ids2alongshore = data.get_conversion_dicts()\n\ndef get_distribution_plot(variable, dimension, figure_title, colorbar_label, start_yr, end_yr, Dir):\n \n # Create an array with locations and an array with labels of the ticks\n ticks_x = [350, 1100, 1700]\n labels_x = ['Wadden Coast', 'Holland Coast', 'Delta Coast']\n \n years_requested = list(range(start_yr, end_yr+1))\n ticks_y = range(0, len(years_requested))[0::5]\n labels_y = [str(yr) for yr in years_requested][0::5]\n \n dimension.rename(columns = conversion_ids2alongshore, inplace = True)\n dimension = dimension.sort_index(axis=1) # Flip transects based on alongshore order instead of ids order.\n # dimension.rename(columns = conversion_alongshore2ids, inplace = True)\n \n # Calculate overall average and stddev, used for range of colorbar\n average = np.nanmean(dimension.values)\n stddev = np.nanstd(dimension.values, ddof=1)\n range_value = 2*stddev\n vmin = average - range_value\n vmax = average + range_value\n \n average_through_space = dimension.mean(axis=0)\n average_through_time = dimension.mean(axis=1)\n vmin_avg = average_through_time.min()\n vmax_avg = average_through_time.max()\n \n # Set-up of figure\n fig = plt.figure(figsize=(20,10)) \n fig.suptitle(figure_title, fontsize=24)\n gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1], height_ratios=[3,2]) \n \n # PLOT TEMPORAL AND SPATIAL DISTRIBUTION OF VARIABLE\n ax1 = fig.add_subplot(gs[0])\n cmap = plt.cm.get_cmap('Greens') # Define color use for colorbar\n colorplot = plt.pcolor(dimension.loc[start_yr:end_yr], vmin=vmin, vmax=vmax, cmap=cmap)\n # Set labels and ticks of x and y axis\n plt.yticks(ticks_y, labels_y)\n plt.tick_params(axis='y', which='both',length=5, labelsize = 16)\n plt.xticks(ticks_x, labels_x) #rotation='vertical')\n plt.tick_params(axis='x', which='both',length=0, labelsize = 20)\n # plot boundaries between coastal regions\n plt.axvline(x=686, color='r') # Boundary kustvak 6 en 7, Den Helder, trsct 7000000, decameter 23417\n plt.axvline(x=1507, color='r')# Boundary kustvak 9 en 10, Hoek van Holland, trsct 10000000, decameter 35908\n \n # PLOT YEARLY AVERAGE OF VARIABLE\n ax2 = fig.add_subplot(gs[1])\n plt.plot(average_through_time, average_through_time.index, color='green')\n #plt.scatter(average_through_time, average_through_time.index, c=average_through_time, cmap=cmap, vmin=vmin, vmax=vmax)\n # Set labels and ticks of x and y axis\n ticks_y = average_through_time.index[0::5]\n plt.xlabel(colorbar_label)\n plt.yticks(ticks_y, labels_y)\n plt.tick_params(axis='y', which='both',length=5, labelsize = 16)\n plt.xlim([vmin_avg-0.75*stddev, vmax_avg+0.75*stddev])\n plt.tick_params(axis='x', which='both',length=5, labelsize = 16)\n \n # PLOT SPATIAL AVERAGES OF VARIABLE\n ax3 = fig.add_subplot(gs[2])\n plt.scatter(range(0, len(average_through_space)), average_through_space, c=average_through_space, cmap=cmap, vmin=vmin, vmax=vmax)\n # Set labels and ticks of x and y axis\n plt.xlim([0, len(average_through_space)])\n plt.xticks(ticks_x, labels_x) \n plt.ylabel(colorbar_label)\n plt.tick_params(axis='x', which='both',length=0, labelsize = 20)\n plt.ylim([vmin, vmax])\n plt.tick_params(axis='y', which='both',length=5, labelsize = 16)\n plt.axvline(x=686, color='r') # Boundary kustvak 6 en 7, Den Helder, trsct 7000000, decameter 23417\n plt.axvline(x=1507, color='r')# Boundary kustvak 9 en 10, Hoek van Holland, trsct 10000000, decameter 35908\n\n # Add colorbar\n fig.subplots_adjust(right=0.88)\n cbar_ax = fig.add_axes([0.90, 0.10, 0.02, 0.78])\n cbar = fig.colorbar(colorplot, cax=cbar_ax)\n cbar.set_label(colorbar_label,size=18, labelpad = 20)\n cbar.ax.tick_params(labelsize=16) \n \n plt.tight_layout\n \n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n plt.show()\n\n plt.savefig(Dir + variable + '_distribution' + '_plot.png')\n\n plt.close()\n\n#%%##############################\n#### EXECUTE ####\n#################################\nstart_yr = 1965 \nend_yr = 2021\n\nextract = Extraction(data, config) # initalize the extra class \nvariables = extract.get_requested_variables() # get all variables that were requested (based on jarkus.yml file)\n\nfor variable in variables:\n print(variable)\n # Get title and label from yml file\n figure_title = plot_titles[variable][0]\n colorbar_label = plot_titles[variable][1]\n \n # Create distribution plots for unfiltered dataframes\n dimension_unfilt = pickle.load(open(config['outputdir'] + config['save locations']['DirD'] + variable + '_dataframe.pickle','rb')) \n DirFig_unfilt = config['outputdir'] + config['save locations']['DirFig'] + config['save locations']['DirD']\n if os.path.isdir(DirFig_unfilt) == False:\n os.mkdir(DirFig_unfilt)\n get_distribution_plot(variable, dimension_unfilt, figure_title, colorbar_label, start_yr, end_yr, DirFig_unfilt)\n\n # Create distribution plots for filtered dataframes\n dimension_filt = pickle.load(open(config['outputdir'] + config['save locations']['DirE'] + variable + '_filtered_dataframe.pickle','rb')) \n DirFig_filt = config['outputdir'] + config['save locations']['DirFig'] + config['save locations']['DirE']\n if os.path.isdir(DirFig_filt) == False:\n os.mkdir(DirFig_filt)\n get_distribution_plot(variable, dimension_filt, figure_title, colorbar_label, start_yr, end_yr, DirFig_filt)\n", "repo_name": "christavanijzendoorn/JAT", "sub_path": "Examples/03_extract_all/Distribution_plots.py", "file_name": "Distribution_plots.py", "file_ext": "py", "file_size_in_byte": 6560, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.safe_load", "line_number": 22, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 23, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 28, "usage_type": "call"}, {"api_name": "JAT.Jarkus_Analysis_Toolbox.Transects", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.nanstd", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 67, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolor", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 110, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "JAT.Jarkus_Analysis_Toolbox.Extraction", "line_number": 126, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 139, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "35873857396", "text": "import requests as re\r\nfrom bs4 import BeautifulSoup as bs\r\nfrom fake_useragent import UserAgent\r\nimport os\r\nfrom retrying import retry\r\nimport math\r\n\r\ntables_url = 'https://www.fsc.gov.tw/ch/home.jsp?id=3&parentpath=0&contentid=128&mcustomize=lawnew_list.jsp&pagesize=100&page='\r\ncontent_path = 'content/'\r\n\r\n\r\ndef request_data_num():\r\n # 取得共有幾筆資料\r\n soup = request_fun(tables_url + str(1))\r\n return int(soup.select('.red')[0].text)\r\n\r\n\r\ndef request_all_table():\r\n request_data_num()\r\n # 建立資料夾\r\n if not os.path.exists(content_path):\r\n os.mkdir(content_path)\r\n\r\n total_data = '
    '\r\n\r\n # 資料總數除以一頁幾筆資料,獲得共有幾頁\r\n page_num = math.ceil(request_data_num() / 100)\r\n\r\n for i in range(1, page_num + 1):\r\n print('start page', i)\r\n table_soup = request_fun(tables_url + str(i))\r\n\r\n # 取得列表中的每一條項目\r\n table = table_soup.select('.newslist')[0].select('li')\r\n for j in range(0, len(table)):\r\n # 每次的第一項為欄位名稱,ver1只會取一次\r\n if i != 1 and j == 0:\r\n continue\r\n\r\n # 將該項目加入自行生成的列表中\r\n total_data += str(table[j])\r\n\r\n if table[j].select('a'):\r\n # id為該連結內容的dataserno\r\n content_url_tmp = table[j].select('a')[0]['href']\r\n content_url = 'https://www.fsc.gov.tw/ch/' + content_url_tmp\r\n item_id = content_url_tmp[content_url_tmp.find('dataserno=')\r\n + len(str('dataserno=')):content_url_tmp.find('&dtable=')]\r\n print('start content', j)\r\n request_each_content(content_url, item_id)\r\n print('done content', j)\r\n\r\n print('done page', i)\r\n\r\n total_data += '
'\r\n\r\n # 所有資料存到該html裡,只會有一張列表\r\n file = open('total_table.html', 'w', encoding='utf-8')\r\n file.write(str(total_data))\r\n file.close()\r\n\r\n\r\ndef request_each_content(content_url, item_id):\r\n # 取得每條項目中連結的內容\r\n content_soup = request_fun(content_url)\r\n try:\r\n content = content_soup.select('.maincontent')[0]\r\n except:\r\n print('not web page or no content')\r\n return\r\n file = open(content_path + '/' + str(item_id) + '.html', 'w', encoding='utf-8')\r\n file.write(str(content))\r\n file.close()\r\n\r\n\r\n@retry(stop_max_attempt_number=3)\r\ndef request_fun(url):\r\n # 發出request都由這邊發出\r\n fake_header_can = UserAgent().random\r\n fake_header = {'user-agent': fake_header_can}\r\n request = re.get(url, headers=fake_header)\r\n return bs(request.text, 'html.parser')\r\n\r\n\r\nif __name__ == '__main__':\r\n request_all_table()\r\n", "repo_name": "kimpao207/request_for_law", "sub_path": "financial_supervisory_commission_ver1.py", "file_name": "financial_supervisory_commission_ver1.py", "file_ext": "py", "file_size_in_byte": 2881, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 27, "usage_type": "call"}, {"api_name": "fake_useragent.UserAgent", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 81, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 82, "usage_type": "call"}, {"api_name": "retrying.retry", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "17936337647", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport simplejson\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom management.models import WorkRecord, Service\nfrom services.utils import get_resident, new_year_day\nfrom ehr.forms import BodyExamForm\nfrom ehr.models import BodyExam\n\nfrom .forms import AftercareForm\nfrom .models import Aftercare\n\ndebug = logging.getLogger('debug')\n\n\ndef aftercare_page(request):\n return render(request, 'diabetes/aftercare_page.html')\n\n\ndef aftercare_review(request):\n resident = get_resident(request)\n\n context = {'aftercare_1': None, 'aftercare_2': None,\n 'aftercare_3': None, 'aftercare_4': None}\n\n for aftercare in context:\n service_item = Service.items.get(alias=aftercare, service_type__alias='diabetes')\n try:\n record = WorkRecord.objects.get(resident=resident, service_item=service_item)\n except WorkRecord.DoesNotExist:\n pass\n else:\n context[aftercare] = Aftercare.objects.get(id=record.item_id)\n context['resident'] = resident\n return render(request, 'diabetes/aftercare_review.html', context)\n\n\ndef aftercare_form(request):\n aftercare = request.POST.get('aftercare')\n form = AftercareForm()\n resident = get_resident(request)\n template = 'diabetes/aftercare_%s_form_content.html' % aftercare\n\n #因为要算体质指数,所以要把一般体格检查里的身高取得\n record = WorkRecord.objects.filter(resident=resident, model_name='BodyExam',\n submit_time__gte=new_year_day()).first()\n if record:\n body_exam = BodyExam.objects.get(id=record.item_id)\n sign_height = body_exam.height\n else:\n sign_height = 0.0\n return render(request, template, {'form': form, 'resident': resident, 'sign_height': sign_height})\n\n\ndef aftercare_submit(request):\n success = False\n resident = get_resident(request)\n item_alias = request.POST.get('aftercare')\n service_item = Service.items.get(alias=item_alias, service_type__alias='diabetes')\n form = AftercareForm(request.POST)\n if form.is_valid():\n result = form.save()\n record = WorkRecord(provider=request.user, resident=resident, service_item=service_item,\n app_label='diabetes', model_name='Aftercare',\n item_id=result.id, service_item_alias=service_item.alias)\n record.save()\n success = True\n else:\n debug.info(form.errors.as_data())\n\n return HttpResponse(simplejson.dumps({'success': success}),\n content_type='text/html; charset=UTF-8')\n\n\ndef aftercare_supplement_page(request):\n return render(request, 'diabetes/aftercare_supplement_page.html')\n\n\ndef aftercare_supplement_review(request):\n resident = get_resident(request)\n context = {'aftercare_5': None, 'aftercare_6': None}\n for aftercare in context:\n service_item = Service.items.get(alias=aftercare, service_type__alias='diabetes')\n try:\n record = WorkRecord.objects.get(resident=resident, service_item=service_item)\n except WorkRecord.DoesNotExist:\n pass\n else:\n context[aftercare] = Aftercare.objects.get(id=record.item_id)\n context['resident'] = resident\n\n return render(request, 'diabetes/aftercare_supplement_review.html', context)\n\n\ndef body_exam_page(request):\n return render(request, 'diabetes/body_exam_page.html')\n\n\ndef body_exam_form(request):\n resident = get_resident(request)\n records = WorkRecord.objects.filter(resident=resident,\n model_name='BodyExam',\n submit_time__gte=new_year_day())\n if records.count():\n result = BodyExam.objects.get(id=records[0].item_id)\n form = BodyExamForm(instance=result)\n else:\n form = BodyExamForm()\n\n return render(request, 'ehr/body_exam_form.html', {'form': form, 'resident': resident,\n 'type_alias': 'diabetes'})\n\n\ndef body_exam_submit(request):\n submit_data = request.POST.copy()\n if 'csrfmiddlewaretoken' in submit_data:\n submit_data.pop('csrfmiddlewaretoken')\n\n if submit_data:\n resident = get_resident(request)\n record = WorkRecord.objects.filter(resident=resident, model_name='BodyExam',\n submit_time__gte=new_year_day()).first()\n if record:\n result, created = BodyExam.objects.update_or_create(id=record.item_id, defaults=submit_data)\n success = True\n else:\n form = BodyExamForm(submit_data)\n if form.is_valid():\n result = form.save(commit=False)\n result.resident = resident\n result.save()\n success = True\n else:\n success, message = False, u'数据保存到数据库时失败'\n if success:\n service_item = Service.items.get(alias='physical_examination',\n service_type__alias='diabetes')\n WorkRecord.objects.create(provider=request.user, resident=resident, service_item=service_item,\n app_label='diabetes', model_name='BodyExam', item_id=result.id,\n service_item_alias=service_item.alias)\n message = u'记录保存成功'\n else:\n success, message = False, u'没有提交任何数据结果'\n\n return HttpResponse(simplejson.dumps({'success': success, 'message': message}),\n content_type='text/html; charset=UTF-8')\n", "repo_name": "1330018801/public_health", "sub_path": "diabetes/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 24, "usage_type": "call"}, {"api_name": "management.models.Service.items.get", "line_number": 30, "usage_type": "call"}, {"api_name": "management.models.Service.items", "line_number": 30, "usage_type": "attribute"}, {"api_name": "management.models.Service", "line_number": 30, "usage_type": "name"}, {"api_name": "management.models.WorkRecord.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 32, "usage_type": "name"}, {"api_name": "management.models.WorkRecord.DoesNotExist", "line_number": 33, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Aftercare.objects.get", "line_number": 36, "usage_type": "call"}, {"api_name": "models.Aftercare.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.Aftercare", "line_number": 36, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "forms.AftercareForm", "line_number": 43, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 44, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 48, "usage_type": "name"}, {"api_name": "services.utils.new_year_day", "line_number": 49, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects.get", "line_number": 51, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ehr.models.BodyExam", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 60, "usage_type": "call"}, {"api_name": "management.models.Service.items.get", "line_number": 62, "usage_type": "call"}, {"api_name": "management.models.Service.items", "line_number": 62, "usage_type": "attribute"}, {"api_name": "management.models.Service", "line_number": 62, "usage_type": "name"}, {"api_name": "forms.AftercareForm", "line_number": 63, "usage_type": "call"}, {"api_name": "management.models.WorkRecord", "line_number": 66, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 79, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 83, "usage_type": "call"}, {"api_name": "management.models.Service.items.get", "line_number": 86, "usage_type": "call"}, {"api_name": "management.models.Service.items", "line_number": 86, "usage_type": "attribute"}, {"api_name": "management.models.Service", "line_number": 86, "usage_type": "name"}, {"api_name": "management.models.WorkRecord.objects.get", "line_number": 88, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 88, "usage_type": "name"}, {"api_name": "management.models.WorkRecord.DoesNotExist", "line_number": 89, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 89, "usage_type": "name"}, {"api_name": "models.Aftercare.objects.get", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Aftercare.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "models.Aftercare", "line_number": 92, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 103, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 104, "usage_type": "name"}, {"api_name": "services.utils.new_year_day", "line_number": 106, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects.get", "line_number": 108, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects", "line_number": 108, "usage_type": "attribute"}, {"api_name": "ehr.models.BodyExam", "line_number": 108, "usage_type": "name"}, {"api_name": "ehr.forms.BodyExamForm", "line_number": 109, "usage_type": "call"}, {"api_name": "ehr.forms.BodyExamForm", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 113, "usage_type": "call"}, {"api_name": "services.utils.get_resident", "line_number": 123, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects.filter", "line_number": 124, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 124, "usage_type": "name"}, {"api_name": "services.utils.new_year_day", "line_number": 125, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects.update_or_create", "line_number": 127, "usage_type": "call"}, {"api_name": "ehr.models.BodyExam.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "ehr.models.BodyExam", "line_number": 127, "usage_type": "name"}, {"api_name": "ehr.forms.BodyExamForm", "line_number": 130, "usage_type": "call"}, {"api_name": "management.models.Service.items.get", "line_number": 139, "usage_type": "call"}, {"api_name": "management.models.Service.items", "line_number": 139, "usage_type": "attribute"}, {"api_name": "management.models.Service", "line_number": 139, "usage_type": "name"}, {"api_name": "management.models.WorkRecord.objects.create", "line_number": 141, "usage_type": "call"}, {"api_name": "management.models.WorkRecord.objects", "line_number": 141, "usage_type": "attribute"}, {"api_name": "management.models.WorkRecord", "line_number": 141, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 148, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "23704765400", "text": "\"\"\"\n=============================================================\n~/tfat/tests/api/test_encounter_filters.py\nCreated: Jul-05-2021 15:32\nDESCRIPTION:\n\n The tests in this file ensure that the filters available for\n tag Encounter endpoint function as expected.\n\n Filters:\n\n + year\n + year__gte\n + year__lte\n + year__gt\n + year__lt\n + lake\n + lake__not\n + spc\n + spc__not\n + tagid\n + tagid__like\n + tagdoc\n + tagdoc__not\n + tagdoc__like\n + tag_origin\n + tag_origin__not\n + tag_position\n + tag_position__not\n + tag_type\n + tag_type__not\n + tag_colour\n + tag_colour__not\n + tagstat\n + tagstat__not\n + tlen\n + tlen__gte\n + tlen__lte\n + tlen__gt\n + tlen__lt\n + flen\n + flen__gte\n + flen__lte\n + flen__gt\n + flen__lt\n + rwt\n + rwt__null\n + rwt__gte\n + rwt__lte\n + rwt__gt\n + rwt__lt\n + sex\n + sex__not\n + sex__null\n + clipc\n + clipc__not\n + clipc__null\n + clipc__like\n + clipc__not_like\n + fate\n + fate__not\n\n\n\nA. Cottrill\n=============================================================\n\"\"\"\n\n\nfrom datetime import datetime\n\nimport pytest\nimport pytz\nfrom django.urls import reverse\nfrom rest_framework import status\n\nfrom ..factories import EncounterFactory, LakeFactory, ProjectFactory, SpeciesFactory\n\n\n@pytest.fixture\ndef encounters():\n\n superior = LakeFactory(abbrev=\"SU\", lake_name=\"Lake Superior\")\n huron = LakeFactory(abbrev=\"HU\", lake_name=\"Lake Huron\")\n ontario = LakeFactory(abbrev=\"ON\", lake_name=\"Lake Ontario\")\n\n walleye = SpeciesFactory(spc=\"334\", spc_nmco=\"walleye\")\n lake_trout = SpeciesFactory(spc=\"081\", spc_nmco=\"lake trout\")\n perch = SpeciesFactory(spc=\"331\", spc_nmco=\"yellow perch\")\n\n project0 = ProjectFactory(\n year=\"2012\", prj_cd=\"LSA_IS12_123\", prj_nm=\"Superior Index\", lake=superior\n )\n\n project1 = ProjectFactory(\n year=\"2015\",\n prj_cd=\"LHA_IA15_ABC\",\n prj_nm=\"Huron Community Index Netting\",\n lake=huron,\n )\n\n project2 = ProjectFactory(\n year=\"2017\",\n prj_cd=\"LOA_SC17_XYZ\",\n prj_nm=\"Lake Ontario Sport Creel\",\n lake=ontario,\n )\n\n fish0 = EncounterFactory(\n species=walleye,\n project=project0,\n observation_date=datetime(2013, 11, 11).replace(tzinfo=pytz.UTC),\n flen=225,\n tlen=250,\n rwt=None,\n clipc=\"0\",\n sex=9,\n fate=\"K\",\n tagstat=\"C\",\n tagid=\"1111\",\n tagdoc=\"25012\",\n # _tag_type=\"2\",\n # _tag_position=\"5\",\n # _tag_origin=\"01\",\n # _tag_colour=\"2\",\n )\n fish0.save()\n\n fish1 = EncounterFactory(\n species=perch,\n project=project0,\n observation_date=datetime(2013, 11, 11).replace(tzinfo=pytz.UTC),\n flen=275,\n tlen=300,\n rwt=500,\n clipc=\"2\",\n sex=1,\n fate=\"K\",\n tagstat=\"C\",\n tagid=\"2222\",\n tagdoc=\"45042\",\n # _tag_type=\"4\",\n # _tag_position=\"5\",\n # _tag_origin=\"04\",\n # _tag_colour=\"2\",\n )\n fish1.save()\n\n fish2 = EncounterFactory(\n species=walleye,\n project=project2,\n observation_date=datetime(2017, 11, 11).replace(tzinfo=pytz.UTC),\n flen=375,\n tlen=400,\n rwt=1000,\n clipc=\"5\",\n sex=1,\n fate=\"K\",\n tagstat=\"C\",\n tagid=\"3333\",\n tagdoc=\"34013\",\n # _tag_type=\"3\",\n # _tag_position=\"4\",\n # _tag_origin=\"01\",\n # _tag_colour=\"3\",\n )\n fish2.save()\n\n fish3 = EncounterFactory(\n species=lake_trout,\n project=project1,\n observation_date=datetime(2015, 11, 11).replace(tzinfo=pytz.UTC),\n flen=375,\n tlen=400,\n rwt=1100,\n clipc=\"0\",\n sex=2,\n fate=\"R\",\n tagstat=\"C\",\n tagid=\"4444\",\n tagdoc=\"43043\",\n # _tag_type=\"4\",\n # _tag_position=\"3\",\n # _tag_origin=\"04\",\n # _tag_colour=\"3\",\n )\n fish3.save()\n\n fish4 = EncounterFactory(\n species=lake_trout,\n project=project2,\n observation_date=datetime(2017, 11, 11).replace(tzinfo=pytz.UTC),\n flen=325,\n tlen=350,\n rwt=800,\n clipc=\"23\",\n sex=None,\n fate=\"R\",\n tagstat=\"A\",\n tagid=\"4444\",\n tagdoc=\"43994\",\n # _tag_type=\"4\",\n # _tag_position=\"3\",\n # _tag_origin=\"99\",\n # _tag_colour=\"4\",\n )\n fish4.save()\n\n fish5 = EncounterFactory(\n species=perch,\n project=project1,\n observation_date=datetime(2015, 11, 11).replace(tzinfo=pytz.UTC),\n flen=225,\n tlen=250,\n rwt=500,\n clipc=None,\n sex=None,\n fate=\"R\",\n tagstat=\"A\",\n tagid=\"5511\",\n tagdoc=\"25044\",\n # _tag_type=\"2\",\n # _tag_position=\"5\",\n # _tag_origin=\"04\",\n # _tag_colour=\"4\",\n )\n fish5.save()\n\n return [fish0, fish1, fish2, fish3, fish4, fish5]\n\n\nfilter_args = [\n # where, when, what and how:\n ({\"lake\": \"ON\"}, [2, 4]),\n ({\"lake__not\": \"ON\"}, [0, 1, 3, 5]),\n ({\"lake\": \"ON,HU\"}, [2, 3, 4, 5]),\n ({\"lake__not\": \"ON,HU\"}, [0, 1]),\n ({\"year\": \"2015\"}, [3, 5]),\n ({\"year__gte\": \"2015\"}, [2, 3, 4, 5]),\n ({\"year__lte\": \"2015\"}, [0, 1, 3, 5]),\n ({\"year__gt\": \"2015\"}, [2, 4]),\n ({\"year__lt\": \"2015\"}, [0, 1]),\n ({\"spc\": \"081\"}, [3, 4]),\n ({\"spc_not\": \"081\"}, [0, 1, 2, 5]),\n ({\"spc\": \"081,334\"}, [0, 2, 3, 4]),\n ({\"spc_not\": \"081,334\"}, [1, 5]),\n # fish attributes\n ({\"tlen\": 350}, [4]),\n ({\"tlen__gte\": 350}, [2, 3, 4]),\n ({\"tlen__lte\": 350}, [0, 1, 4, 5]),\n ({\"tlen__gt\": 350}, [2, 3]),\n ({\"tlen__lt\": 350}, [0, 1, 5]),\n ({\"flen\": 325}, [4]),\n ({\"flen__gte\": 325}, [2, 3, 4]),\n ({\"flen__lte\": 325}, [0, 1, 4, 5]),\n ({\"flen__gt\": 325}, [2, 3]),\n ({\"flen__lt\": 325}, [0, 1, 5]),\n ({\"rwt\": 800}, [4]),\n ({\"rwt__null\": True}, [0]),\n ({\"rwt__null\": False}, [1, 2, 3, 4, 5]),\n ({\"rwt__gte\": 800}, [2, 3, 4]),\n ({\"rwt__lte\": 800}, [1, 4, 5]),\n ({\"rwt__gt\": 800}, [2, 3]),\n ({\"rwt__lt\": 800}, [1, 5]),\n ({\"sex\": 1}, [1, 2]),\n ({\"sex\": \"1,9\"}, [0, 1, 2]),\n ({\"sex__not\": 1}, [0, 3, 4, 5]),\n ({\"sex__not\": \"1,9\"}, [3, 4, 5]),\n ({\"sex__null\": True}, [4, 5]),\n ({\"sex__null\": False}, [0, 1, 2, 3]),\n ({\"clipc\": 0}, [0, 3]),\n ({\"clipc\": \"2,5\"}, [1, 2]),\n ({\"clipc__not\": 0}, [1, 2, 4, 5]),\n ({\"clipc__not\": \"2,5\"}, [0, 3, 4, 5]),\n ({\"clipc__null\": True}, [5]),\n ({\"clipc__null\": False}, [0, 1, 2, 3, 4]),\n ({\"clipc__like\": 2}, [1, 4]),\n ({\"clipc__not_like\": 2}, [0, 2, 3, 5]),\n ({\"fate\": \"K\"}, [0, 1, 2]),\n ({\"fate__not\": \"K\"}, [3, 4, 5]),\n # tag attributes:\n ({\"tagid\": \"1111\"}, [0]),\n ({\"tagid__like\": \"11\"}, [0, 5]),\n ({\"tagdoc\": \"25012\"}, [0]),\n ({\"tagdoc__not\": \"25012\"}, [1, 2, 3, 4, 5]),\n ({\"tagdoc__like\": \"250\"}, [0, 5]),\n # TODO: refactor encounter model to include separate fields for tag attributes:\n # ({\"tag_origin\": \"01\"}, [0, 2]),\n # ({\"tag_origin__not\": \"01\"}, [1, 3, 4, 5]),\n # ({\"tag_origin\": \"01,99\"}, [0, 2, 4]),\n # ({\"tag_origin__not\": \"01,99\"}, [1, 3, 5]),\n # ({\"tag_position\": \"3\"}, [3, 4]),\n # ({\"tag_position\": \"3,4\"}, [2, 3, 4]),\n # ({\"tag_position__not\": \"3\"}, [0, 1, 2, 5]),\n # ({\"tag_position__not\": \"3,4\"}, [0, 1, 5]),\n # ({\"tag_type\": \"4\"}, [1, 3, 4]),\n # ({\"tag_type__not\": \"4\"}, [0, 2, 5]),\n # ({\"tag_type\": \"3,4\"}, [1, 2, 3, 4]),\n # ({\"tag_type__not\": \"3,4\"}, [0, 5]),\n # ({\"tag_colour\": \"2\"}, [0, 1]),\n # ({\"tag_colour__not\": \"2\"}, [2, 3, 4, 5]),\n # ({\"tag_colour\": \"2,3\"}, [0, 1, 2, 3]),\n # ({\"tag_colour__not\": \"2,3\"}, [4, 5]),\n ({\"tagstat\": \"A\"}, [4, 5]),\n ({\"tagstat__not\": \"A\"}, [0, 1, 2, 3]),\n ({\"tagstat\": \"C\"}, [0, 1, 2, 3]),\n ({\"tagstat\": \"A,C\"}, [0, 1, 2, 3, 4, 5]),\n]\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize(\"filter,expected\", filter_args)\ndef test_Encounter_filters(client, encounters, filter, expected):\n \"\"\"The readonly api endpoint for tag encounters accepts filters that are\n attributes of the project (lake, project code, proejct name, year) the\n encountered fish (species, size, sex, ect) as well as the tag attributes.\n\n This test is parameterized to accept a list of two element tuples, the first\n element is the filter to apply, the second is the list of indices that\n correspond to the angler records that should be returned in the response.\n The indices are used to extract the id from the fixture and compare those to\n the angler id's returned by the response.\n\n \"\"\"\n\n ids = []\n for i, x in enumerate(encounters):\n if i in expected:\n ids.append(x.id)\n\n url = reverse(\"tfat_api:encounter-list\")\n response = client.get(url, filter)\n assert response.status_code == status.HTTP_200_OK\n\n # pull out the ids from the response:\n payload = response.data[\"results\"]\n observed_ids = {x[\"id\"] for x in payload}\n\n assert len(payload) == len(expected)\n assert set(ids) == observed_ids\n", "repo_name": "AdamCottrill/TFAT", "sub_path": "tfat/tests/api/test_encounter_filters.py", "file_name": "test_encounter_filters.py", "file_ext": "py", "file_size_in_byte": 9213, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "factories.LakeFactory", "line_number": 83, "usage_type": "call"}, {"api_name": "factories.LakeFactory", "line_number": 84, "usage_type": "call"}, {"api_name": "factories.LakeFactory", "line_number": 85, "usage_type": "call"}, {"api_name": "factories.SpeciesFactory", "line_number": 87, "usage_type": "call"}, {"api_name": "factories.SpeciesFactory", "line_number": 88, "usage_type": "call"}, {"api_name": "factories.SpeciesFactory", "line_number": 89, "usage_type": "call"}, {"api_name": "factories.ProjectFactory", "line_number": 91, "usage_type": "call"}, {"api_name": "factories.ProjectFactory", "line_number": 95, "usage_type": "call"}, {"api_name": "factories.ProjectFactory", "line_number": 102, "usage_type": "call"}, {"api_name": "factories.EncounterFactory", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 112, "usage_type": "attribute"}, {"api_name": "factories.EncounterFactory", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 132, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 132, "usage_type": "attribute"}, {"api_name": "factories.EncounterFactory", "line_number": 149, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 152, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 152, "usage_type": "attribute"}, {"api_name": "factories.EncounterFactory", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 172, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 172, "usage_type": "attribute"}, {"api_name": "factories.EncounterFactory", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 192, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 192, "usage_type": "attribute"}, {"api_name": "factories.EncounterFactory", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 212, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.urls.reverse", "line_number": 331, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 333, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 333, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 311, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 312, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 312, "usage_type": "attribute"}]} +{"seq_id": "34186597939", "text": "#!/usr/bin/env python\n\"\"\"\nA one-off script to migrate documents from the ES5 metasearch index to ES6.\n\nThis is necessary because trying to republish all best bets from search-admin frequently fails due to transient network issues.\n\nRequires two Elasticsearch clients: client5 and client6. These use different versions of the python client.\n\nclient5 fetches a page of docs from the old index, and client6 POSTs them to the new index. Simple!\n\"\"\"\nfrom elasticsearch5 import Elasticsearch as Elasticsearch5, TransportError as TransportError5\nfrom elasticsearch6 import Elasticsearch as Elasticsearch6, TransportError as TransportError6\nfrom elasticsearch6.helpers import bulk\nfrom datetime import datetime\nimport os\n\n# Using the example indices and doc types from GOV.UK's search API\n# https://github.com/alphagov/rummager/tree/master/config/schema/indexes\n\nINDEX = 'metasearch'\nGENERIC_DOC_TYPE = 'generic-document'\n\nES5_HOST_PORT = os.getenv('ES5_ORIGIN_HOST', 'http://elasticsearch5:80')\nES6_TARGET_PORT = os.getenv('ES6_TARGET_HOST', 'http://elasticsearch6:80')\n\nes_client5 = Elasticsearch5([ES5_HOST_PORT])\nes_client6 = Elasticsearch6([ES6_TARGET_PORT])\n\n\ndef _prepare_docs_for_bulk_insert(docs):\n for doc in docs:\n yield {\n \"_id\": doc['_id'],\n \"_source\": doc['_source'],\n }\n\ndef bulk_index_documents_to_es6(documents):\n try:\n bulk(\n es_client6,\n _prepare_docs_for_bulk_insert(documents),\n index=INDEX,\n doc_type=GENERIC_DOC_TYPE,\n chunk_size=100\n )\n except TransportError6 as e:\n print(\"Failed to index documents: %s\", str(e))\n\n\ndef fetch_documents(from_=0, page_size=100, scroll_id=None):\n try:\n if scroll_id is None:\n results = es_client5.search(INDEX, GENERIC_DOC_TYPE, from_=from_, size=page_size, scroll='2m')\n scroll_id = results['_scroll_id']\n else:\n results = es_client5.scroll(scroll_id=scroll_id, scroll='2m')\n docs = results['hits']['hits']\n return (scroll_id, docs)\n except TransportError5 as e:\n print(\"Failed to fetch documents: %s\", str(e))\n return str(e), e.status_code\n\n\nif __name__ == '__main__':\n start = datetime.now()\n\n dcount = es_client5.count(index=INDEX, doc_type=GENERIC_DOC_TYPE)['count']\n\n print('Preparing to index {} document(s) from ES5'.format(dcount))\n\n offset = 0\n page_size = 250\n scroll_id = None\n while offset <= dcount:\n scroll_id, docs = fetch_documents(from_=offset, page_size=page_size, scroll_id=scroll_id)\n\n print('Indexing documents {} to {} into ES6'.format(offset, offset+page_size))\n bulk_index_documents_to_es6(docs)\n\n offset += page_size\n\n print('Finished in {} seconds'.format(datetime.now() - start))\n", "repo_name": "alphagov-mirror/elasticsearch-migration-helpers", "sub_path": "scripts/copy_es5_metaseach_to_es6.py", "file_name": "copy_es5_metaseach_to_es6.py", "file_ext": "py", "file_size_in_byte": 2809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 24, "usage_type": "call"}, {"api_name": "elasticsearch5.Elasticsearch", "line_number": 26, "usage_type": "call"}, {"api_name": "elasticsearch6.Elasticsearch", "line_number": 27, "usage_type": "call"}, {"api_name": "elasticsearch6.helpers.bulk", "line_number": 39, "usage_type": "call"}, {"api_name": "elasticsearch6.TransportError", "line_number": 46, "usage_type": "name"}, {"api_name": "elasticsearch5.TransportError", "line_number": 59, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 82, "usage_type": "name"}]} +{"seq_id": "22930435431", "text": "#!/usr/bin/env python\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\n# Static network weights of Braitenberg vehicle controller\n# Fig. 5.5\n\nweights_l = np.array(\t[[250., 250., 250., 500., 0., 0., 0., 0.],\n\t\t\t\t\t\t[250., 250., 500., 1000., 0., 0., 0., 0.],\n\t\t\t\t\t\t[250., 500., 1000., 1500., 0., 0., 0., 0.],\n\t\t\t\t\t\t[500., 1000., 1500., 2000., 0., 0., 0., 0.]])\n\nweights_r = np.array(\t[[0., 0., 0., 0., 500., 250., 250., 250.],\n\t\t\t\t\t\t[0., 0., 0., 0., 1000., 500., 250., 250.],\n\t\t\t\t\t\t[0., 0., 0., 0., 1500., 1000., 500., 250.],\n\t\t\t\t\t\t[0., 0., 0., 0., 2000., 1500., 1000., 500.]])\n\nfig = plt.figure(figsize=(6,6))\n\nax1 = plt.subplot(211)\nplt.title('Left Weights')\nplt.imshow(weights_l, alpha=0.5)\nplt.axis('off')\nfor (j,i),label in np.ndenumerate(weights_l):\n\tax1.text(i,j,int(label),ha='center',va='center')\n\nax2 = plt.subplot(212)\nplt.title('Right Weights')\nplt.imshow(weights_r, alpha=0.5)\nplt.axis('off')\nfor (j,i),label in np.ndenumerate(weights_r):\n\tax2.text(i,j,int(label),ha='center',va='center')\n\nfig.tight_layout()\nplt.show()", "repo_name": "clamesc/Training-Neural-Networks-for-Event-Based-End-to-End-Robot-Control", "sub_path": "Controller/plot_braitenberg_weights.py", "file_name": "plot_braitenberg_weights.py", "file_ext": "py", "file_size_in_byte": 1045, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 49, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.ndenumerate", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.ndenumerate", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "27964049508", "text": "from flask import Flask, request\nfrom flask import render_template\nfrom morse import MorseCodeTranslator\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/morse_decoding\", methods=[\"POST\", \"GET\"])\ndef morse_decoding():\n if request.method == \"POST\":\n inputMorse = request.form.get(\"inputMorse\")\n # 코드를 넣어주세요.\n morse_translator = MorseCodeTranslator() \n \n inputMorset_M = set(inputMorse) # 부호 확인용\n if inputMorset_M.issubset(['-','.',' ']): \n english_text = morse_translator.morse_to_english(inputMorse)\n print(\"Morse -> English\",english_text)\n program_result = english_text\n elif inputMorse.isalpha():\n morse_text = morse_translator.english_to_morse(inputMorse)\n print(\"Englist -> Morse\",morse_text)\n program_result = morse_text\n else:\n program_result='ERROR'\n #english_text = morse_translator.morse_to_english(inputMorse)\n #english_text = inputMorse\n\n return render_template(\"index.html\", result=program_result)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5100)\n", "repo_name": "tlagmlwns/Finish_etc", "sub_path": "web_morse/morse/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 1219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "morse.MorseCodeTranslator", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "6732880384", "text": "import os\nfrom pathlib import Path\nfrom shutil import copy, copytree\nimport plistlib\nimport requests\nfrom urllib.parse import urlparse\nimport zipfile\nimport sys\nimport subprocess\nimport tempfile\nimport platform\nimport argparse\nfrom glob import glob\n\nfrom utils.copy import Copy\nfrom utils.downloader import DpkgDeb, Ldid\nfrom utils.hash import LdidHash\n\n\"\"\" Functions \"\"\"\ndef cmd_in_path(args, cmd):\n if args.debug:\n print(f\"[DEBUG] Checking if command {cmd} is in PATH...\")\n \n if cmd == \"ldid\":\n if is_ios():\n if args.debug:\n print(f\"[DEBUG] Checking for ldid on iOS\")\n \n if is_dpkg_installed(\"ldid\"):\n if args.debug:\n print(f\"[DEBUG] ldid is installed via dpkg\")\n \n return True\n else:\n print(\"[-] ldid is required on iOS, but it is not installed. Please install it from Procursus.\")\n exit(1)\n \n try:\n if args.debug:\n print(f\"[DEBUG] Checking ldid output...\")\n \n ldid_out = subprocess.check_output([\"ldid\"], stderr=subprocess.STDOUT)\n if \"procursus\" not in ldid_out.decode(\"utf-8\"):\n if args.debug:\n print(f\"[DEBUG] ldid installed is not from Procursus... skipping.\")\n \n return False\n else:\n if args.debug:\n print(f\"[DEBUG] ldid installed is from Procursus!\")\n \n return True\n except(Exception,):\n return False\n \n if args.debug:\n print(f\"[DEBUG] ldid is not in PATH... skipping.\")\n \n try:\n which_cmd = subprocess.check_output([\"which\", f\"{cmd}\"], stderr=subprocess.STDOUT)\n except:\n return False\n \n return True\n \n # if not f\"{cmd} not found\" in subprocess.run(f\"which {cmd}\".split(), capture_output=True, text=True).stdout:\n # return True\n \n #return which(cmd) is not None\n \ndef is_macos():\n if platform.machine().startswith(\"i\"):\n return False\n \n return sys.platform == \"darwin\"\n\ndef is_linux():\n return sys.platform == \"linux\"\n\ndef is_ios():\n if not sys.platform == \"darwin\":\n return False\n \n return platform.machine().startswith(\"i\")\n\ndef is_dpkg_installed(pkg):\n return (os.system(\"dpkg -s \" + pkg + \"> /dev/null 2>&1\")) == 0\n \n\n\"\"\" Main Function \"\"\"\ndef main(args):\n print(f\"IPA Permasigner | Version {subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('ascii').strip()}-{subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()}\")\n print(\"Program created by Nebula | Original scripts created by zhuowei | CoreTrust bypass by Linus Henze\")\n print()\n \n # Check if script is running on Windows, if so, fail\n if sys.platform == \"windows\":\n print(\"[-] Script must be ran on macOS or Linux.\")\n exit(1)\n \n # Check if codesign is added on Linux\n if args.codesign:\n if is_linux():\n print(\"[-] You cannot use codesign on Linux, remove the argument and it'll use ldid instead.\")\n exit(1)\n \n # Auto download ldid\n if not cmd_in_path(args, \"ldid\"):\n if Path(f\"{os.getcwd()}/ldid\").exists():\n if is_linux() and platform.machine() == \"x86_64\":\n if args.debug:\n print(f\"[DEBUG] On Linux x86_64, ldid not found...\")\n \n if not LdidHash.check_linux_64(args):\n print(\"[*] ldid is outdated or malformed, downloading latest version...\")\n os.remove(f\"{os.getcwd()}/ldid\")\n Ldid.download_linux_64()\n elif is_linux() and platform.machine() == \"aarch64\":\n if args.debug:\n print(f\"[DEBUG] On Linux aarch64, ldid not found...\")\n \n if not LdidHash.check_linux_arm64(args):\n print(\"[*] ldid is outdated or malformed, downloading latest version...\")\n os.remove(f\"{os.getcwd()}/ldid\")\n Ldid.download_linux_arm64()\n elif is_macos() and platform.machine() == \"x86_64\":\n if args.debug:\n print(f\"[DEBUG] On macOS x86_64, ldid not found...\")\n \n if not LdidHash.check_macos_64(args):\n print(\"[*] ldid is outdated or malformed, downloading latest version...\")\n os.remove(f\"{os.getcwd()}/ldid\")\n Ldid.download_macos_64()\n elif is_macos() and platform.machine() == \"arm64\":\n if args.debug:\n print(f\"[DEBUG] On macOS arm64, ldid not found...\")\n \n if not LdidHash.check_macos_arm64(args):\n print(\"[*] ldid is outdated or malformed, downloading latest version...\")\n os.remove(f\"{os.getcwd()}/ldid\")\n Ldid.download_macos_arm64()\n else:\n print(\"[*] ldid not found, downloading.\")\n if is_linux() and platform.machine() == \"x86_64\":\n Ldid.download_linux_64(args)\n elif is_linux() and platform.machine() == \"aarch64\":\n Ldid.download_linux_arm64(args)\n elif is_macos() and platform.machine() == \"x86_64\":\n Ldid.download_macos_64(args)\n elif is_macos() and platform.machine() == \"arm64\":\n Ldid.download_macos_arm64(args)\n \n # Auto download dpkg-deb on Linux\n if not cmd_in_path(args, \"dpkg-deb\") and is_linux():\n if not Path(f\"{os.getcwd()}/dpkg-deb\").exists():\n if platform.machine() == \"x86_64\":\n if args.debug:\n print(f\"[DEBUG] On Linux x86_64, dpkg-deb not found...\")\n \n print(\"[*] dpkg-deb not found, downloading.\")\n DpkgDeb.download_linux_64(args)\n print()\n elif platform.machine() == \"aarch64\":\n if args.debug:\n print(f\"[DEBUG] On Linux aarch64, dpkg-deb not found...\")\n \n print(\"[*] dpkg-deb not found, downloading.\")\n DpkgDeb.download_linux_arm64(args)\n print()\n \n if is_macos():\n try:\n which_dpkg = subprocess.check_output([\"which\", \"dpkg\"], stderr=subprocess.STDOUT)\n except:\n if args.debug:\n print(f\"[DEBUG] On macOS x86_64, dpkg-deb not found...\")\n print(\"[-] dpkg is not installed and is required on macOS. Install it though brew or Procursus to continue.\")\n exit(1)\n \n # Prompt the user if they'd like to use an external IPA or a local IPA\n if not (args.url or args.path):\n option = input(\"[?] Would you like to use an IPA stored on the web, or on your system? [external, local] \")\n option = option.lower()\n\n global out_deb_name\n \n with tempfile.TemporaryDirectory() as tmpfolder:\n print(\"[*] Created temporary directory.\")\n print()\n \n # If the user's choice is external, download an IPA\n # Otherwise, copy the IPA to the temporary directory\n if args.url:\n url = args.url\n \n if not os.path.splitext(urlparse(url).path)[1] == \".ipa\":\n print(\"[-] URL provided is not an IPA, make sure to provide a direct link.\")\n exit(1)\n \n res = requests.get(url, stream=True)\n \n try:\n if res.status_code == 200:\n print(f\"[*] Downloading file...\")\n \n with open(f\"{tmpfolder}/app.ipa\", \"wb\") as f:\n f.write(res.content)\n else:\n print(f\"[-] URL provided is not reachable. Status code: {res.status_code}\")\n exit(1)\n except requests.exceptions.RequestException as err:\n print(f\"[-] URL provided is not reachable. Error: {err}\")\n exit(1)\n elif args.path:\n path = args.path\n \n if path.strip()[-1] == \" \":\n path = path.strip()[:-1]\n \n if Path(path).exists():\n copy(path, f\"{tmpfolder}/app.ipa\")\n else:\n print(\"[-] That file does not exist! Make sure you're using a direct path to the IPA file.\")\n exit(1)\n elif option == \"external\":\n url = input(\"[?] Paste in the *direct* path to an IPA online: \")\n \n if not os.path.splitext(urlparse(url).path)[1] == \".ipa\":\n print(\"[-] URL provided is not an IPA, make sure to provide a direct link.\")\n exit(1)\n \n res = requests.get(url, stream=True)\n \n try:\n if res.status_code == 200:\n print(f\"[*] Downloading file...\")\n \n with open(f\"{tmpfolder}/app.ipa\", \"wb\") as f:\n f.write(res.content)\n else:\n print(f\"[-] URL provided is not reachable. Status code: {res.status_code}\")\n exit(1)\n except requests.exceptions.RequestException as err:\n print(f\"[-] URL provided is not reachable. Error: {err}\")\n exit(1) \n elif option == \"local\":\n path = input(\"[?] Paste in the path to an IPA in your file system: \")\n \n if path.strip()[-1] == \" \":\n path = path.strip()[:-1]\n \n if Path(path).exists():\n copy(path, f\"{tmpfolder}/app.ipa\")\n else:\n print(\"[-] That file does not exist! Make sure you're using a direct path to the IPA file.\")\n exit(1)\n else:\n print(\"[-] That is not a valid option!\")\n exit(1)\n print()\n \n # Unzip the IPA file\n print(\"[*] Unzipping IPA...\")\n with zipfile.ZipFile(f\"{tmpfolder}/app.ipa\", 'r') as f:\n os.makedirs(f\"{tmpfolder}/app\", exist_ok=False)\n f.extractall(f\"{tmpfolder}/app\")\n print()\n \n # Read data from the plist\n print(\"[*] Reading plist...\")\n global folder, app_name, app_bundle, app_version, app_min_ios, app_author, app_executable\n \n if Path(f\"{tmpfolder}/app/Payload\").exists():\n for fname in os.listdir(path=f\"{tmpfolder}/app/Payload\"):\n if fname.endswith(\".app\"):\n folder = fname\n print(\"Found plist!\")\n else:\n print(\"[-] IPA is not valid!\")\n exit(1)\n \n if Path(f\"{tmpfolder}/app/Payload/{folder}/Info.plist\").exists():\n with open(f\"{tmpfolder}/app/Payload/{folder}/Info.plist\", 'rb') as f:\n info = plistlib.load(f)\n app_name = info[\"CFBundleName\"]\n app_bundle = info[\"CFBundleIdentifier\"]\n app_version = info[\"CFBundleShortVersionString\"]\n app_min_ios = info[\"MinimumOSVersion\"]\n app_author = app_bundle.split(\".\")[1]\n if info[\"CFBundleExecutable\"]:\n app_executable = info[\"CFBundleExecutable\"]\n print(\"Executable found.\")\n else:\n app_executable = None\n print(\"No executable found.\")\n print(\"Found information about the app!\")\n print()\n \n # Get the deb file ready\n print(\"[*] Preparing deb file...\")\n print(\"Making directories...\")\n os.makedirs(f\"{tmpfolder}/deb/Applications\", exist_ok=False)\n os.makedirs(f\"{tmpfolder}/deb/DEBIAN\", exist_ok=False)\n print(\"Copying deb file scripts and control...\")\n Copy.copy_postrm(f\"{tmpfolder}/deb/DEBIAN/postrm\", app_name)\n Copy.copy_postinst(f\"{tmpfolder}/deb/DEBIAN/postinst\", app_name)\n Copy.copy_control(f\"{tmpfolder}/deb/DEBIAN/control\", app_name, app_bundle, app_version, app_min_ios, app_author)\n print(\"Copying app files...\")\n copytree(f\"{tmpfolder}/app/Payload/{folder}\", f\"{tmpfolder}/deb/Applications/{folder}\")\n print(\"Changing deb file scripts permissions...\")\n subprocess.run(f\"chmod 0755 {tmpfolder}/deb/DEBIAN/postrm\".split(), stdout=subprocess.DEVNULL)\n subprocess.run(f\"chmod 0755 {tmpfolder}/deb/DEBIAN/postinst\".split(), stdout=subprocess.DEVNULL)\n if app_executable is not None:\n print(\"Changing app executable permissions...\")\n full_path = f\"'{tmpfolder}/deb/Applications/{folder}/{app_executable}'\"\n os.system(\"chmod 0755 \" + full_path)\n print()\n \n # Sign the app\n print(\"[*] Signing app...\")\n Copy.copy_entitlements(f\"{tmpfolder}/entitlements.plist\", app_bundle)\n if args.codesign:\n print(\"Signing with codesign as it was specified...\")\n subprocess.run(f\"security import ./dev_certificate.p12 -P password -A\".split(), stdout=subprocess.DEVNULL)\n full_path = f\"'{tmpfolder}/deb/Applications/{folder}'\"\n frameworks_path = f\"{tmpfolder}/deb/Applications/{folder}/Frameworks\"\n \n os.system(f\"codesign -s 'We Do A Little Trolling iPhone OS Application Signing' --force --deep --preserve-metadata=entitlements '{full_path}'\")\n \n if Path(frameworks_path).exists():\n if args.debug:\n print(\"[DEBUG] Frameworks path exists\")\n \n for file in os.listdir(frameworks_path):\n if file.endswith(\".dylib\"):\n print(f\"Signing dylib {file}...\")\n os.system(f\"codesign -s 'We Do A Little Trolling iPhone OS Application Signing' --force --deep '{frameworks_path}/{file}'\")\n \n for fpath in glob(frameworks_path + '/*.framework'):\n fname = os.path.basename(fpath)\n if Path(f\"{fpath}/Info.plist\").exists():\n with open(f\"{fpath}/Info.plist\", 'rb') as f:\n info = plistlib.load(f)\n if info[\"CFBundleExecutable\"]:\n f_executable = info[\"CFBundleExecutable\"]\n if args.debug:\n print(f\"[DEBUG] Executable found in the {fname}\")\n else:\n f_executable = None\n if args.debug:\n print(f\"[DEBUG] No executable found in the {fname}\")\n if f_executable is not None:\n print(f\"Signing executable in {fname}\")\n exec_path = os.path.join(fpath, f_executable)\n if args.debug:\n print(f\"[DEBUG] Running command: codesign -s 'We Do A Little Trolling iPhone OS Application Signing' --force --deep {exec_path}\")\n subprocess.run(f\"codesign -s 'We Do A Little Trolling iPhone OS Application Signing' --force --deep '{exec_path}'\", shell=True)\n else:\n print(\"Signing with ldid...\")\n full_path = f\"'{tmpfolder}/deb/Applications/{folder}'\"\n frameworks_path = f\"{tmpfolder}/deb/Applications/{folder}/Frameworks\"\n if cmd_in_path(args, \"ldid\"):\n if args.debug:\n print(f\"[DEBUG] Running command: ldid -S{tmpfolder}/entitlements.plist -M -Kdev_certificate.p12 '{full_path}'\")\n \n os.system(f\"ldid -S{tmpfolder}/entitlements.plist -M -Kdev_certificate.p12 '{full_path}'\")\n else:\n subprocess.run(\"chmod +x ldid\".split(), stdout=subprocess.DEVNULL)\n if args.debug:\n print(f\"[DEBUG] Running command: ./ldid -S{tmpfolder}/entitlements.plist -M -Kdev_certificate.p12 '{full_path}'\")\n \n os.system(f\"./ldid -S{tmpfolder}/entitlements.plist -M -Kdev_certificate.p12 '{full_path}'\")\n \n if Path(frameworks_path).exists():\n if args.debug:\n print(\"[DEBUG] Frameworks path exists\")\n \n for file in os.listdir(frameworks_path):\n if file.endswith(\".dylib\"):\n print(f\"Signing dylib {file}...\")\n if cmd_in_path(args, \"ldid\"):\n if args.debug:\n print(f\"[DEBUG] Running command: ldid -Kdev_certificate.p12 '{frameworks_path}/{file}'\")\n \n os.system(f\"ldid -Kdev_certificate.p12 '{frameworks_path}/{file}'\")\n else:\n if args.debug:\n print(f\"[DEBUG] Running command: ./ldid -Kdev_certificate.p12 '{frameworks_path}/{file}'\")\n \n os.system(f\"./ldid -Kdev_certificate.p12 '{frameworks_path}/{file}'\")\n \n for fpath in glob(frameworks_path + '/*.framework'):\n fname = os.path.basename(fpath)\n if Path(f\"{fpath}/Info.plist\").exists():\n with open(f\"{fpath}/Info.plist\", 'rb') as f:\n info = plistlib.load(f)\n if info[\"CFBundleExecutable\"]:\n f_executable = info[\"CFBundleExecutable\"]\n if args.debug:\n print(f\"[DEBUG] Executable found in the {fname}\")\n else:\n f_executable = None\n if args.debug:\n print(f\"[DEBUG] No executable found in the {fname}\")\n if f_executable is not None:\n print(f\"Signing executable in {fname}\")\n exec_path = os.path.join(fpath, f_executable)\n if cmd_in_path(args, \"ldid\"):\n if args.debug:\n print(f\"[DEBUG] Running command: ldid -Kdev_certificate.p12 {exec_path}\")\n subprocess.run(f\"ldid -Kdev_certificate.p12 '{exec_path}'\", shell=True)\n else:\n if args.debug:\n print(f\"[DEBUG] Running command: ./ldid -Kdev_certificate.p12 {exec_path}\")\n subprocess.run(f\"./ldid -Kdev_certificate.p12 '{exec_path}'\", shell=True)\n print()\n\n # Package the deb file\n print(\"[*] Packaging the deb file...\")\n os.makedirs(\"output\", exist_ok=True)\n if Path(f\"output/{app_name}.deb\").exists():\n os.remove(f\"output/{app_name}.deb\")\n \n out_deb_name = app_name.replace(' ', '')\n\n if cmd_in_path(args, \"dpkg-deb\"):\n if args.debug:\n print(f\"[DEBUG] Running command: dpkg-deb -Zxz --root-owner-group -b {tmpfolder}/deb output/{app_name.replace(' ', '')}.deb\")\n \n subprocess.run(f\"dpkg-deb -Zxz --root-owner-group -b {tmpfolder}/deb output/{app_name.replace(' ', '')}.deb\".split(), stdout=subprocess.DEVNULL)\n else:\n if args.debug:\n print(f\"[DEBUG] Running command: ./dpkg-deb -Zxz --root-owner-group -b {tmpfolder}/deb output/{app_name.replace(' ', '')}.deb\")\n \n subprocess.run(f\"./dpkg-deb -Zxz --root-owner-group -b {tmpfolder}/deb output/{app_name.replace(' ', '')}.deb\".split(), stdout=subprocess.DEVNULL)\n\n # Done!!!\n print()\n print(\"[*] We are finished!\")\n print(\"[*] Copy the newly created deb from the output folder to your jailbroken iDevice and install it!\")\n print(\"[*] The app will continue to work when rebooted to stock.\")\n print(f\"[*] Output file: output/{out_deb_name}.deb\")\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--codesign', action='store_true', help=\"uses codesign instead of ldid.\")\n parser.add_argument('-d', '--debug', action='store_true', help=\"shows some debug info, only useful for testing.\")\n parser.add_argument('-u', '--url', type=str, help=\"the direct URL of the IPA to be signed.\")\n parser.add_argument('-p', '--path', type=str, help=\"the direct local path of the IPA to be signed.\")\n args = parser.parse_args()\n \n main(args)\n\n", "repo_name": "CrackerCat/permasigner", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 21058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.check_output", "line_number": 42, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 42, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 60, "usage_type": "attribute"}, {"api_name": "platform.machine", "line_number": 72, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 75, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sys.platform", "line_number": 81, "usage_type": "attribute"}, {"api_name": "platform.machine", "line_number": 84, "usage_type": "call"}, {"api_name": "os.system", "line_number": 87, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 109, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 109, "usage_type": "call"}, {"api_name": "platform.machine", "line_number": 110, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash.check_linux_64", "line_number": 114, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash", "line_number": 114, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 116, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 116, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_linux_64", "line_number": 117, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 117, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 118, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash.check_linux_arm64", "line_number": 122, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash", "line_number": 122, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 124, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 124, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_linux_arm64", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 125, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 126, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash.check_macos_64", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash", "line_number": 130, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 132, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_macos_64", "line_number": 133, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 133, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 134, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash.check_macos_arm64", "line_number": 138, "usage_type": "call"}, {"api_name": "utils.hash.LdidHash", "line_number": 138, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 140, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_macos_arm64", "line_number": 141, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 141, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 144, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_linux_64", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 145, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_linux_arm64", "line_number": 147, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 147, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 148, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_macos_64", "line_number": 149, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 149, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid.download_macos_arm64", "line_number": 151, "usage_type": "call"}, {"api_name": "utils.downloader.Ldid", "line_number": 151, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 155, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 155, "usage_type": "call"}, {"api_name": "platform.machine", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.downloader.DpkgDeb.download_linux_64", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.downloader.DpkgDeb", "line_number": 161, "usage_type": "name"}, {"api_name": "platform.machine", "line_number": 163, "usage_type": "call"}, {"api_name": "utils.downloader.DpkgDeb.download_linux_arm64", "line_number": 168, "usage_type": "call"}, {"api_name": "utils.downloader.DpkgDeb", "line_number": 168, "usage_type": "name"}, {"api_name": "subprocess.check_output", "line_number": 173, "usage_type": "call"}, {"api_name": "subprocess.STDOUT", "line_number": 173, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 196, "usage_type": "call"}, {"api_name": "os.path", "line_number": 196, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 196, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 200, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 220, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path", "line_number": 228, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 228, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 232, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 243, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 252, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 253, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 264, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 265, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 273, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 274, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 282, "usage_type": "call"}, {"api_name": "plistlib.load", "line_number": 284, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 302, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 303, "usage_type": "call"}, {"api_name": "utils.copy.Copy.copy_postrm", "line_number": 305, "usage_type": "call"}, {"api_name": "utils.copy.Copy", "line_number": 305, "usage_type": "name"}, {"api_name": "utils.copy.Copy.copy_postinst", "line_number": 306, "usage_type": "call"}, {"api_name": "utils.copy.Copy", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.copy.Copy.copy_control", "line_number": 307, "usage_type": "call"}, {"api_name": "utils.copy.Copy", "line_number": 307, "usage_type": "name"}, {"api_name": "shutil.copytree", "line_number": 309, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 311, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 311, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 312, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 312, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 316, "usage_type": "call"}, {"api_name": "utils.copy.Copy.copy_entitlements", "line_number": 321, "usage_type": "call"}, {"api_name": "utils.copy.Copy", "line_number": 321, "usage_type": "name"}, {"api_name": "subprocess.run", "line_number": 324, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 324, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 328, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 330, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 334, "usage_type": "call"}, {"api_name": "os.system", "line_number": 337, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 341, "usage_type": "call"}, {"api_name": "plistlib.load", "line_number": 343, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path", "line_number": 354, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 357, "usage_type": "call"}, {"api_name": "os.system", "line_number": 366, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 368, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 368, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 372, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 374, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 378, "usage_type": "call"}, {"api_name": "os.system", "line_number": 385, "usage_type": "call"}, {"api_name": "os.system", "line_number": 390, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 392, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path", "line_number": 393, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 394, "usage_type": "call"}, {"api_name": "plistlib.load", "line_number": 396, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 407, "usage_type": "call"}, {"api_name": "os.path", "line_number": 407, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 411, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 415, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 420, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 421, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 422, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 430, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 430, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 435, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 435, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 445, "usage_type": "call"}]} +{"seq_id": "74101819365", "text": "# coding: utf-8\n\n# In[1]:\n\n\n# ----------------------------------拟合圆,得到圆心和半径------------------------------------------#\nfrom numpy import * # 导入numpy的库函数\n\n\n\ndef circleLeastFit(points):\n center_x = 0.0\n center_y = 0.0\n radius = 0.0\n\n sum_x = sum_y = 0.0\n sum_x2 = sum_y2 = 0.0\n sum_x3 = sum_y3 = 0.0\n sum_xy = sum_x1y2 = sum_x2y1 = 0.0\n N = len(points)\n for i in range(1, N):\n x = points[i][0]\n y = points[i][1]\n x2 = x * x\n y2 = y * y\n sum_x += x\n sum_y += y\n sum_x2 += x2\n sum_y2 += y2\n sum_x3 += x2 * x\n sum_y3 += y2 * y\n sum_xy += x * y\n sum_x1y2 += x * y2\n sum_x2y1 += x2 * y\n\n C = D = E = G = H = 0.0\n a = b = c = 0.0\n C = N * sum_x2 - sum_x * sum_x\n D = N * sum_xy - sum_x * sum_y\n E = N * sum_x3 + N * sum_x1y2 - (sum_x2 + sum_y2) * sum_x\n G = N * sum_y2 - sum_y * sum_y\n H = N * sum_x2y1 + N * sum_y3 - (sum_x2 + sum_y2) * sum_y\n a = (H * D - E * G) / (C * G - D * D)\n b = (H * C - E * D) / (D * D - G * C)\n c = -(a * sum_x + b * sum_y + sum_x2 + sum_y2) / N\n\n center_x = a / (-2)\n center_y = b / (-2)\n radius = sqrt(a * a + b * b - 4 * c) / 2\n\n return center_x, center_y, round(radius, 2)\n\n\n# In[2]:\n\n\n# ----------------------------------分析图片,得到响应json--------------------------------------#\n# -*- coding: utf-8 -*-\nimport urllib.request\nimport urllib.error\nimport time\nimport json\n\n\ndef faceIdentify(fr):\n http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'\n key = \"enFC8NNZcf19CNRZGhGMvGKPHtMgVkfB\"\n secret = \"kIV15LIXTSX0gKflGEYHXekj8-mgEuHE\"\n # filepath = r\"gg.jpg\"\n\n boundary = '----------%s' % hex(int(time.time() * 1000))\n data = []\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'api_key')\n data.append(key)\n\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'api_secret')\n data.append(secret)\n\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"; filename=\" \"' % 'image_file')\n data.append('Content-Type: %s\\r\\n' % 'application/octet-stream')\n data.append(fr.read())\n fr.close()\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'return_landmark')\n data.append('2')\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % 'return_attributes')\n data.append(\n \"gender,age,smiling,headpose,facequality,blur,eyestatus,emotion,ethnicity,beauty,mouthstatus,eyegaze,skinstatus\")\n data.append('--%s--\\r\\n' % boundary)\n\n for i, d in enumerate(data):\n if isinstance(d, str):\n data[i] = d.encode('utf-8')\n\n http_body = b'\\r\\n'.join(data)\n\n # build http request\n req = urllib.request.Request(url=http_url, data=http_body)\n\n # header\n req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)\n\n try:\n # post data to server\n resp = urllib.request.urlopen(req, timeout=5)\n # get response\n qrcont = resp.read()\n # if you want to load as json, you should decode first,\n # for example: json.loads(qrcont.decode('utf-8'))\n text = json.loads(qrcont.decode('utf-8'))\n tt = text.get('faces')[0].get('landmark')\n return tt\n except urllib.error.HTTPError as e:\n print(e.read().decode('utf-8'))\n return 'wrong'\n\n\n# In[3]:\n\n\n# ---------------------------------------特征点分析----------------------------------------------#\nimport math\n\n\ndef qradium(a, b, t):\n list1 = []\n for i in range(a, b):\n tt = t.get('contour_left' + str(i))\n list1.append([tt.get('x'), tt.get('y')])\n x, y, r = circleLeastFit(list1)\n return r\n\n\ndef chinhu(t):\n list2 = []\n for i in range(14, 17):\n text2 = t.get('contour_left' + str(i))\n list2.append([text2.get('x'), text2.get('y')])\n text3 = t.get('contour_right' + str(i))\n list2.append([text3.get('x'), text3.get('y')])\n text4 = t.get('contour_chin')\n list2.append([text4.get('x'), text4.get('y')])\n x, y, r = circleLeastFit(list2);\n return r\n\n\n# -----------------------------调用这个方法得到七个特征---------------------------------#\ndef handle_features(text1):\n w1 = round(math.sqrt((text1.get('contour_right1').get('x') - text1.get('contour_left1').get('x')) ** 2 + (\n text1.get('contour_right1').get('y') - text1.get('contour_left1').get('y')) ** 2), 2)\n w2 = round(math.sqrt((text1.get('contour_right3').get('x') - text1.get('contour_left3').get('x')) ** 2 + (\n text1.get('contour_right3').get('y') - text1.get('contour_left3').get('y')) ** 2), 2)\n w3 = round(math.sqrt((text1.get('contour_right9').get('x') - text1.get('contour_left9').get('x')) ** 2 + (\n text1.get('contour_right9').get('y') - text1.get('contour_left9').get('y')) ** 2), 2)\n h = round(math.sqrt((text1.get('contour_chin').get('x') - text1.get('nose_bridge1').get('x')) ** 2 + (\n text1.get('contour_chin').get('y') - text1.get('nose_bridge1').get('y')) ** 2), 2)\n r1 = qradium(1, 7, text1)\n r2 = qradium(7, 14, text1)\n r3 = chinhu(text1)\n\n return w1 / h, w2 / h, w3 / h, h / h, r1 / h, r2 / h, r3 / h\n\n\nfrom sklearn.externals import joblib\nclf = joblib.load('./FHModel/faceShape/clf.pkl')\n\n\ndef get_face(img):\n x = []\n text = faceIdentify(img)\n w1, w2, w3, h, r1, r2, r3 = handle_features(text)\n x = [[w1 / h, w2 / h, w3 / h, h / h, r1 / h, r2 / h, r3 / h]]\n y = clf.predict(x)\n # print(y)\n return y\n", "repo_name": "e-yi/hairBook", "sub_path": "FHModel/faceShape/get_features.py", "file_name": "get_features.py", "file_ext": "py", "file_size_in_byte": 5723, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.time", "line_number": 71, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 102, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 102, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 102, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 109, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 109, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 109, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "urllib.request.error", "line_number": 117, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 117, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 153, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 155, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 157, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib.load", "line_number": 169, "usage_type": "call"}, {"api_name": "sklearn.externals.joblib", "line_number": 169, "usage_type": "name"}]} +{"seq_id": "27075655493", "text": "import time\nimport json\nimport os\n\nTIME_INTERVAL = int(os.environ['TIME_INTERVAL'])\nREQUEST_COUNT = int(os.environ['REQUEST_COUNT'])\n\n\nclass RedisMiddleWare:\n\n redis_reference = None\n\n @staticmethod\n def set_redis(redis_ref):\n RedisMiddleWare.redis_reference = redis_ref\n\n @staticmethod\n def get_redis():\n return RedisMiddleWare.redis_reference\n\n @staticmethod\n def check_limit(request_body):\n client_id = request_body.get('clientId')\n url = request_body.get('url')\n redis_con = RedisMiddleWare.get_redis()\n max_time = int(time.time())\n min_time = int(max_time-TIME_INTERVAL)\n existing_count = redis_con.zcount(client_id, min_time, max_time)\n if existing_count < REQUEST_COUNT:\n pickled_data = json.dumps({'url': url, 'time': max_time})\n redis_con.zadd(client_id, {pickled_data: max_time})\n redis_con.execute_command('EXPIRE ' + client_id + ' 60')\n return False\n return True\n\n\n", "repo_name": "arpitx165/proxy-service", "sub_path": "middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 1012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.environ", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "14475991412", "text": "import os\r\nimport sqlite3 as sl\r\nimport sys\r\nimport time\r\nfrom datetime import date\r\nfrom flask import Flask, flash, request, redirect, url_for, session, render_template\r\nfrom werkzeug.utils import secure_filename\r\nfrom modules.dummies import generate_dummypage\r\nfrom modules.database import *\r\nfrom modules.dummies import *\r\nfrom modules.constants import *\r\nfrom modules.likes import *\r\nimport bcrypt\r\n\r\napp = Flask(__name__, template_folder='templates')\r\nCURRENT_ADDRESS = \"http://127.0.0.1:5000/\"\r\n\r\n\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\napp.config['UPLOAD_AVATAR_FOLDER'] = UPLOAD_AVATAR_FOLDER\r\napp.config['SECRET_KEY'] = 'a03cb5d6aa4399201f230dedcbbb3ed8bec0018d19db9521415b547a'\r\n\r\n\r\ndef allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\n\r\n# The page for meme uploading\r\n@app.route(\"/uploads\", methods=['GET', 'POST'])\r\n@app.route(\"/upload\", methods=['GET', 'POST'])\r\n@app.route(\"/post\", methods=['GET', 'POST'])\r\n@app.route(\"/new\", methods=['GET', 'POST'])\r\ndef upload_meme():\r\n if 'login' not in session:\r\n return redirect(url_for('login_user'))\r\n if not is_user_active():\r\n log_out()\r\n return redirect(url_for('login_user'))\r\n if request.method == 'POST':\r\n # check if the post request has the file part\r\n if 'file' in request.files:\r\n username = session['login']\r\n file = request.files['file']\r\n if file.filename != '' and allowed_file(file.filename):\r\n filename = str(date.today()) + \"_time_\" + str(time.time()) + '.' + file.filename.rsplit('.', 1)[-1]\r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n connection = create_connection(DATABASE_PATH)\r\n add_data(connection=connection, tablename='post',\r\n dataclass_element=Post(id=0, author_id=get_userid_byname(username, DATABASE_PATH),\r\n text=request.form.get('comment'), image=filename,\r\n date=str(date.today()), like=0, dislike=0))\r\n return redirect(url_for('show_feed'))\r\n avatar = 'images/avatars/' + get_user_avatar_bId(session['id'], DATABASE_PATH)\r\n return render_template('upload.html', avatar=avatar)\r\n\r\n\r\n# The feed page\r\n@app.route('/')\r\n@app.route(\"/feed\", methods=['GET', 'POST'])\r\n@app.route(\"/index\", methods=['GET', 'POST'])\r\n@app.route(\"/main\", methods=['GET', 'POST'])\r\ndef show_feed(page=1):\r\n if 'login' not in session:\r\n return redirect(url_for('login_user'))\r\n if not is_user_active():\r\n log_out()\r\n return redirect(url_for('login_user'))\r\n if request.method == \"POST\":\r\n if 'delete' in request.form.keys():\r\n id = request.form['id']\r\n author_id = list(get_authorid_by_post(create_connection(DATABASE_PATH), id))[0][0]\r\n if session['admin'] >= 1 or session['id'] == author_id:\r\n delete_post_bID(id, create_connection(DATABASE_PATH), UPLOAD_FOLDER)\r\n return redirect(url_for('show_feed'))\r\n else:\r\n return render_template(\"403.html\")\r\n if 'dislike' in request.form.keys():\r\n id = request.form['id']\r\n import_history(id, session['id'], 2)\r\n if 'like' in request.form.keys():\r\n id = request.form['id']\r\n import_history(id, session['id'], 1)\r\n if request.method == \"GET\" and request.args.get('page'):\r\n page = int(request.args.get('page'))\r\n dataposts = list(get_all_tabledata(create_connection(DATABASE_PATH), 'Post'))\r\n pages, limit = calc_pages_and_limit(dataposts, page)\r\n posts = generate_posts(dataposts[::-1], page, limit)\r\n avatar = 'images/avatars/' + get_user_avatar_bId(session['id'], DATABASE_PATH)\r\n return render_template(\"index.html\", posts=posts, pages=pages, avatar=avatar)\r\n\r\n\r\ndef process_useraction(action, nickname):\r\n if action == 'Delete':\r\n delete_user(create_connection(DATABASE_PATH), nickname)\r\n elif action == 'Make admin':\r\n make_admin(create_connection(DATABASE_PATH), nickname)\r\n elif action == 'Make moderator':\r\n make_moderator(create_connection(DATABASE_PATH), nickname)\r\n elif action == 'Make user':\r\n make_user(create_connection(DATABASE_PATH), nickname)\r\n elif action == 'Ban':\r\n ban_user()\r\n else:\r\n return render_template('500.html')\r\n\r\n\r\n@app.route('/admin', methods=['GET', 'POST'])\r\n@app.route('/adminpannel', methods=['GET', 'POST'])\r\n@app.route('/adminpanel', methods=['GET', 'POST'])\r\n@app.route('/moderator', methods=['GET', 'POST'])\r\n@app.route('/moder', methods=['GET', 'POST'])\r\ndef admin_page():\r\n if 'login' not in session:\r\n return redirect(url_for('login_user'))\r\n if not is_user_active():\r\n log_out()\r\n return redirect(url_for('login_user'))\r\n if int(list(get_admin_status_bId(session['id'], DATABASE_PATH))[0][0]) == 0:\r\n randomizer = random.randint(1,3)\r\n if randomizer == 1:\r\n return render_template(\"notadmin_page.html\")\r\n return render_template(\"403.html\")\r\n return redirect(url_for('user_page'))\r\n \r\n\r\n@app.route('/me', methods=['GET', 'POST'])\r\n@app.route('/userpage', methods=['GET', 'POST'])\r\n@app.route('/user', methods=['GET', 'POST'])\r\ndef user_page():\r\n if 'login' not in session:\r\n return redirect(url_for('login_user'))\r\n if not is_user_active():\r\n log_out()\r\n return redirect(url_for('login_user'))\r\n userid = session['id']\r\n page = 1\r\n if request.method == \"GET\":\r\n if 'id' in request.args.keys():\r\n userid = int(request.args['id'])\r\n elif request.args.get('page'):\r\n page = int(request.args.get('page'))\r\n if request.method == 'POST':\r\n if 'exit' in request.form.keys():\r\n log_out()\r\n return redirect(url_for('login_user'))\r\n else:\r\n action = request.form['action']\r\n nickname = request.form['nickname']\r\n process_useraction(action, nickname)\r\n dataposts = list(get_author_posts(create_connection(DATABASE_PATH), userid))\r\n pages, limit = calc_pages_and_limit(dataposts, page)\r\n posts = generate_posts(dataposts[::-1], page, limit)\r\n avatar = 'images/avatars/' + get_user_avatar_bId(userid, DATABASE_PATH)\r\n userdata = {'id': userid, 'login': get_username_bId(userid, DATABASE_PATH),\r\n 'admin': int(list(get_admin_status_bId(userid, DATABASE_PATH))[0][0])}\r\n return render_template('userpage.html', posts=posts, pages=pages, avatar=avatar, userdata=userdata)\r\n\r\ndef is_user_active():\r\n \"\"\"Check if user exists in database\"\"\"\r\n con = create_connection(DATABASE_PATH)\r\n res = con.execute(f\"SELECT id,login FROM Users WHERE id={session['id']} and login='{session['login']}'\")\r\n return len(list(res)) == 1\r\n\r\ndef log_out():\r\n \"\"\"Clear a user session\"\"\"\r\n keys = list(session.keys())[:]\r\n for key in keys:\r\n session.pop(key)\r\n\r\ndef generate_posts(dataposts, page, limit):\r\n \"\"\"Returns array of dictionaries for posts\"\"\"\r\n posts = []\r\n # Generate posts\r\n for i in range((page - 1) * PAGES_POSTS, limit):\r\n author_name = get_username_bId(dataposts[i][1], DATABASE_PATH)\r\n avatar = get_user_avatar_bId(dataposts[i][1], DATABASE_PATH)\r\n reactions = list(get_reaction_bId(dataposts[i][0], DATABASE_PATH))\r\n post = {'id': dataposts[i][0], 'avatar': AVATAR_FOLDER + avatar, 'author_id': dataposts[i][1],\r\n 'author_name': author_name, 'date': dataposts[i][4], 'comment': dataposts[i][2],\r\n 'image': MEMES_FOLDER + dataposts[i][3], 'likes': reactions[0][0], 'dislikes': reactions[0][1]\r\n }\r\n posts.append(post)\r\n return posts\r\n\r\ndef calc_pages_and_limit(dataposts, page):\r\n \"\"\"Returns max pages and limit for current page\"\"\"\r\n pages = len(dataposts) // PAGES_POSTS\r\n if len(dataposts) % PAGES_POSTS != 0:\r\n pages += 1\r\n limit = page * PAGES_POSTS\r\n if limit > len(dataposts):\r\n limit = len(dataposts)\r\n return pages, limit\r\n\r\n\r\n@app.errorhandler(404)\r\ndef page_notexist(e):\r\n return render_template('404.html')\r\n\r\n@app.errorhandler(403)\r\ndef page_access_denied(e):\r\n return render_template('403.html')\r\n\r\n@app.errorhandler(500)\r\ndef internal_server_error(e):\r\n return render_template('500.html')\r\n\r\n@app.errorhandler(400)\r\ndef bad_request(e):\r\n return render_template('400.html')\r\n\r\n\r\n\r\n# Register page\r\n@app.route(\"/register\", methods=['POST', 'GET'])\r\n@app.route(\"/signup\", methods=['POST', 'GET'])\r\ndef register_user():\r\n if 'login' in session:\r\n return render_template(\"welcome_page.html\")\r\n if request.method == 'POST':\r\n login = request.form['login']\r\n password = bcrypt.hashpw(request.form['password'].encode(), bcrypt.gensalt())\r\n # email = request.form['email']\r\n file = ''\r\n if 'avatar' in request.files:\r\n file = request.files['avatar']\r\n filename = 'ava.jpg'\r\n path = ''\r\n if file == '':\r\n path = DEFAULT_AVATAR\r\n elif allowed_file(file.filename):\r\n filename = secure_filename(login + '_' + file.filename)\r\n path = os.path.join(app.config['UPLOAD_AVATAR_FOLDER'], filename)\r\n file.save(path)\r\n con = sl.connect(DATABASE_PATH)\r\n email = f\"user_email_{login}{path[len(path) - 4:]}@mail.ru\"\r\n print(filename)\r\n val = add_data(connection=con, tablename='users', dataclass_element=User(0, 0, login, password, filename, email),\r\n individual_fields=USERS_INDIVIDUAL_FIELDS)\r\n if val == 100:\r\n return render_template('cant_register.html')\r\n res = list(con.execute(f\"SELECT id,admin FROM Users WHERE login='{login}'\"))[0]\r\n session['id'] = res[0]\r\n session['login'] = login\r\n session['admin'] = res[1]\r\n return redirect(url_for('show_feed'))\r\n return render_template('register.html')\r\n\r\n\r\n# Login page\r\n@app.route(\"/login\", methods=['POST', 'GET'])\r\n@app.route(\"/auth\", methods=['POST', 'GET'])\r\n@app.route(\"/authorize\", methods=['POST', 'GET'])\r\ndef login_user():\r\n if 'login' in session:\r\n return render_template('welcome_page.html')\r\n if request.method == 'POST':\r\n login = request.form.get('login')\r\n password = request.form.get('password').encode()\r\n con = sl.connect(DATABASE_PATH)\r\n sql = f\"SELECT password,id, admin FROM users WHERE `login`='{login}'\"\r\n result = list(con.execute(sql))\r\n if len(result) == 0 or password != result[0][0]:\r\n return render_template(\"cant_login.html\")\r\n print(result[0][0])\r\n if bcrypt.checkpw(password, result[0][0]):\r\n session['login'] = login\r\n session['id'] = result[0][1]\r\n session['admin'] = result[0][2]\r\n return render_template(\"welcome_page.html\")\r\n return render_template('auth.html')\r\n\r\n\r\n# Programm run\r\n#if __name__ == '__main__':\r\n# app.run(host=\"0.0.0.0\", debug=True)\r\n", "repo_name": "mrglaster/flask-retromemes-app", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 11180, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 46, "usage_type": "name"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request.form.get", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form.keys", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.form.keys", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form.keys", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request.args.get", "line_number": 85, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 115, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 124, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 136, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.args.keys", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 140, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 140, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 141, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 142, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 142, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 143, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "name"}, {"api_name": "flask.request.form.keys", "line_number": 144, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 144, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 146, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 148, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 148, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.session.keys", "line_number": 167, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 167, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 169, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 199, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 203, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 207, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 219, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 221, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 221, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "bcrypt.hashpw", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "bcrypt.gensalt", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 226, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 226, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path", "line_number": 234, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 242, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 244, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 248, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 256, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 258, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 258, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 259, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 259, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 260, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 260, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 261, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 265, "usage_type": "call"}, {"api_name": "bcrypt.checkpw", "line_number": 267, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 268, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 269, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}]} +{"seq_id": "39602703042", "text": "from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Table, Column, Integer, ForeignKey\nfrom sqlalchemy import text\nimport json\nimport os\n\nBase = declarative_base()\n\n#init app\napp = Flask(__name__)\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n#Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'test3000.sqlite')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n#Init db\ndb = SQLAlchemy(app)\n\n#Init ma\nma = Marshmallow(app)\n\n\n\n#entitati\n\nclass Pacient(db.Model):\n __tablename__ = 'pacienti'\n id_pacient = db.Column(db.Integer, primary_key = True)\n nume_pacient = db.Column(db.String(100))\n copil = relationship(\"Disease\")\n\n def __init__(self, id_pacient, nume_pacient):\n self.id_pacient = id_pacient\n self.nume_pacient = nume_pacient\n\n\n\nclass Disease(db.Model):\n __tablename__ = 'boli'\n id_boala = db.Column(db.Integer, primary_key = True)\n id_pacient = Column(db.Integer, ForeignKey('pacienti.id_pacient'))\n nume = db.Column(db.String(50))\n descriere = db.Column(db.String(100))\n\n def __init__(self, id_boala, id_pacient, nume, descriere):\n self.id_boala = id_boala\n self.id_pacient = id_pacient\n self.nume = nume\n self.descriere = descriere\n\n\nclass PacientDoctorTable(db.Model):\n __tablename__ = \"asocieri\"\n id = db.Column(db.Integer, primary_key = True)\n id_doctor = db.Column(db.Integer, ForeignKey('doctor.doctor_id'))\n id_pacient = db.Column(db.Integer, ForeignKey('pacienti.id_pacient'))\n\n def __init__(self,id_doctor, id_pacient):\n self.id_doctor = id_doctor\n self.id_pacient = id_pacient\n\n# class PacientDiseaseTable:\n# __tablename__ = 'asociere_boala_pacient'\n# id_boala= db.Column(db.Integer, ForeignKey('boli.id_boala'))\n# id_pacient = db.Column(db.Integer, ForeignKey('pacient.id_pacient')) \n\n# def __init__(self, id_boala, id_pacient):\n# self.id_boala = id_boala\n# self.id_pacient = id_pacient\n\n\nclass Doctor(db.Model):\n\n doctor_id = db.Column(db.Integer, primary_key = True)\n nume = db.Column(db.String(100))\n specializare = db.Column(db.String(50))\n email = db.Column(db.String(50))\n parola = db.Column(db.String(50))\n \n def __init__(self,doctor_id, nume,specializare,email, parola):\n self.doctor_id = doctor_id\n self.nume = nume\n self.specializare = specializare\n self.email = email\n self.parola = parola\n\n #sfarsit entitati \n\n\n\nclass DoctorSchema(ma.Schema):\n class Meta:\n fields = ('doctor_id', 'nume','specializare','email','parola')\n\nclass DiseaseSchema(ma.Schema):\n class Meta:\n fields = ('id_boala','id_pacient','nume','descriere')\n\nclass PacientSchema(ma.Schema):\n class Meta:\n fields = ('id_pacient','nume_pacient') \n\nclass DoctorPatientSchema(ma.Schema):\n class Meta:\n fields = ('id_doctor', 'id_pacient')\n\n\ndoctor_schema = DoctorSchema()\ndoctors_schema = DoctorSchema(many=True)\n\ndisease_schema = DiseaseSchema()\ndiseases_schema = DiseaseSchema(many = True)\n\n\npatient_schema = PacientSchema()\npatients_schema = PacientSchema(many=True)\n\npatient_doctor_single = DoctorPatientSchema()\npatient_doctor_schema = DoctorPatientSchema(many = True)\n\n\n#ruta verificare doctor pacient\n@app.route('/relatii', methods = ['GET'])\ndef getRelatie():\n relatii = PacientDoctorTable.query.all()\n\n result = patient_doctor_schema.dump(relatii)\n\n return jsonify(result)\n\n#ruta de logare\n@app.route('/doctor//

', methods=['GET'])\ndef get_doctor(e, p):\n query = \"select * from doctor WHERE email = :em LIMIT 1\"\n \n doctor = db.session.query(Doctor).filter_by(email = e).filter(parola = p).first()\n db.session.commit()\n dt = doctor_schema.dump(doctor)\n print(dt)\n fl = open('cookie.json','w')\n json.dump(dt,fl)\n return jsonify(dt)\n\n@app.route('/isLoggedIn',methods=['GET'])\ndef isLoggedIn():\n fl = open('cookie.json','r')\n txt_Data=fl.read()\n if txt_Data != '':\n txt_Data = json.loads(txt_Data)\n else:\n txt_Data = json.loads('{}')\n return jsonify(txt_Data)\n\n#ruta de selectare a pacientilor in functie de doctorul care este logat\n@app.route('/myPacients/')\ndef get_my_doctors(doctor_id):\n \n query_asociere = db.session.query(PacientDoctorTable).filter_by(id_doctor = doctor_id)\n\n #array \n result = patient_doctor_schema.dump(query_asociere)\n\n pacient_arr = []\n pacienti = []\n for i in result:\n query_pt = db.session.query(Pacient).filter_by(id_pacient = i['id_pacient'])\n # db.session.commit()\n res = patients_schema.dump(query_pt)\n # print(res)\n if len(res) > 0:\n pacienti.append(res[0])\n\n return jsonify(pacienti)\n\n#ruta pentru register\n@app.route('/addDoctor', methods=['POST'])\ndef add_doctor():\n doctor_id = request.json['doctor_id']\n nume = request.json['nume']\n specializare = request.json['specializare']\n email = request.json['email']\n password = request.json['parola']\n \n new_doctor = Doctor(doctor_id, nume, specializare,email, password)\n\n db.session.add(new_doctor)\n db.session.commit()\n\n return doctor_schema.jsonify(new_doctor)\n\n\n\n#ruta de popularea a tabelei de asociere intre doctor si pacient\n@app.route('/insertRelationship', methods = ['POST'])\ndef insert_relationship():\n id_doctor = request.json['id_doctor']\n id_patient = request.json['id_pacient']\n\n rel = PacientDoctorTable(id_doctor,id_patient)\n\n db.session.add(rel)\n db.session.commit()\n\n\n return patient_doctor_single.jsonify(rel)\n\n\n#ruta de creare a pacientului\n@app.route('/addPacient', methods=['POST'])\ndef addPacient():\n pacient_id = request.json['id_pacient']\n nume = request.json['nume_pacient']\n\n pacient = Pacient(pacient_id, nume)\n db.session.add(pacient)\n db.session.commit()\n\n return patient_schema.jsonify(pacient)\n\n\n#ruta de creeare a unei boli\n@app.route('/addDisease', methods=['POST'])\ndef addDisease():\n id_boala = request.json['id_boala']\n id_pacient = request.json['id_pacient']\n nume = request.json['nume']\n descriere = request.json['descriere']\n\n disease = Disease(id_boala, id_pacient, nume, descriere)\n\n db.session.add(disease)\n db.session.commit()\n\n return disease_schema.jsonify(disease)\n\n\n#route for test\n@app.route('/', methods=['GET'])\ndef hello():\n return \"Hello World\"\n\n\n#ruta de selectare a tuturor doctorilor\n@app.route('/doctors', methods=['GET'])\ndef get_doctors():\n all_doctors = Doctor.query.all()\n \n result = doctors_schema.dump(all_doctors)\n\n return jsonify(result)\n\n\n#ruta de selectare a pecientilor\n@app.route('/pacients', methods=['GET'])\ndef get_patients():\n all_patients = Pacient.query.all()\n\n result = patients_schema.dump(all_patients)\n\n return jsonify(result)\n\n\n#ruta de selectare a bolilor\n@app.route('/diseases', methods=['GET'])\ndef get_diseases():\n all_diseases = Disease.query.all()\n\n result = diseases_schema.dump(all_diseases)\n\n return jsonify(result)\n\n\n#run server\n\n\nif __name__ == '__main__':\n app.run(host = '0.0.0.0',debug=True)\n", "repo_name": "BaditaGeorge/Licenta", "sub_path": "REST API Licenta/.venv/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 7326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_marshmallow.Marshmallow", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 147, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 148, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 179, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 184, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 185, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 186, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 188, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 188, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 202, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 202, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 203, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 203, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 218, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 218, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 230, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 230, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 231, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 231, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 256, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 276, "usage_type": "call"}]} +{"seq_id": "31576792159", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass StaticPublicKey(object):\n \"\"\"\n A static public key which is used to verify the JWT signature.\n \"\"\"\n\n #: A constant which can be used with the format property of a StaticPublicKey.\n #: This constant has a value of \"JSON_WEB_KEY\"\n FORMAT_JSON_WEB_KEY = \"JSON_WEB_KEY\"\n\n #: A constant which can be used with the format property of a StaticPublicKey.\n #: This constant has a value of \"PEM\"\n FORMAT_PEM = \"PEM\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new StaticPublicKey object with values from keyword arguments. This class has the following subclasses and if you are using this class as input\n to a service operations then you should favor using a subclass over the base class:\n\n * :class:`~oci.apigateway.models.JsonWebKey`\n * :class:`~oci.apigateway.models.PemEncodedPublicKey`\n\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param kid:\n The value to assign to the kid property of this StaticPublicKey.\n :type kid: str\n\n :param format:\n The value to assign to the format property of this StaticPublicKey.\n Allowed values for this property are: \"JSON_WEB_KEY\", \"PEM\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type format: str\n\n \"\"\"\n self.swagger_types = {\n 'kid': 'str',\n 'format': 'str'\n }\n\n self.attribute_map = {\n 'kid': 'kid',\n 'format': 'format'\n }\n\n self._kid = None\n self._format = None\n\n @staticmethod\n def get_subtype(object_dictionary):\n \"\"\"\n Given the hash representation of a subtype of this class,\n use the info in the hash to return the class of the subtype.\n \"\"\"\n type = object_dictionary['format']\n\n if type == 'JSON_WEB_KEY':\n return 'JsonWebKey'\n\n if type == 'PEM':\n return 'PemEncodedPublicKey'\n else:\n return 'StaticPublicKey'\n\n @property\n def kid(self):\n \"\"\"\n **[Required]** Gets the kid of this StaticPublicKey.\n A unique key ID. This key will be used to verify the signature of a\n JWT with matching \\\"kid\\\".\n\n\n :return: The kid of this StaticPublicKey.\n :rtype: str\n \"\"\"\n return self._kid\n\n @kid.setter\n def kid(self, kid):\n \"\"\"\n Sets the kid of this StaticPublicKey.\n A unique key ID. This key will be used to verify the signature of a\n JWT with matching \\\"kid\\\".\n\n\n :param kid: The kid of this StaticPublicKey.\n :type: str\n \"\"\"\n self._kid = kid\n\n @property\n def format(self):\n \"\"\"\n **[Required]** Gets the format of this StaticPublicKey.\n The format of the public key.\n\n Allowed values for this property are: \"JSON_WEB_KEY\", \"PEM\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The format of this StaticPublicKey.\n :rtype: str\n \"\"\"\n return self._format\n\n @format.setter\n def format(self, format):\n \"\"\"\n Sets the format of this StaticPublicKey.\n The format of the public key.\n\n\n :param format: The format of this StaticPublicKey.\n :type: str\n \"\"\"\n allowed_values = [\"JSON_WEB_KEY\", \"PEM\"]\n if not value_allowed_none_or_none_sentinel(format, allowed_values):\n format = 'UNKNOWN_ENUM_VALUE'\n self._format = format\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/apigateway/models/static_public_key.py", "file_name": "static_public_key.py", "file_ext": "py", "file_size_in_byte": 4112, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 121, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 126, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "21276128547", "text": "from django.shortcuts import render\nfrom django.views.generic import ListView,DetailView\nfrom .models import Course,Topic,CourseCategory\n# Create your views here.\n\n\nclass CourseListView(ListView):\n model = Course\n context_object_name = \"course\"\n template_name = \"learn.html\"\n\n def get_context_data(self,**kwargs):\n context = super(CourseListView,self).get_context_data(**kwargs)\n context['category'] = CourseCategory.objects.all()\n return context\n\nclass CourseDetailView(DetailView):\n model = Course\n context_object_name = \"course\"\n template_name = \"learn-detail.html\"\n\nclass TopicDetailView(DetailView):\n model = Topic\n context_object_name = \"topic\"\n template_name = \"topic-detail.html\"", "repo_name": "sammagafu/sammagafu", "sub_path": "learn/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 738, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.views.generic.ListView", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Course", "line_number": 8, "usage_type": "name"}, {"api_name": "models.CourseCategory.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.CourseCategory.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.CourseCategory", "line_number": 14, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Course", "line_number": 18, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 22, "usage_type": "name"}, {"api_name": "models.Topic", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "26286581768", "text": "import openai\nimport os\nimport requests\nimport tempfile\nimport asyncio\n\nfrom core.cartridges import addCartridge, update_cartridge_field\nfrom file_handling.s3 import write_file, read_file\nfrom tools.debug import eZprint\n\nopenai.api_key = os.getenv('OPENAI_API_KEY', default=None)\n\nasync def generate_temp_image(prompt):\n DEBUG_KEYS = ['FILE_HANDLING', 'IMAGE_GENERATION']\n eZprint(f'Generating image with prompt: {prompt}', DEBUG_KEYS)\n try:\n loop = asyncio.get_event_loop()\n response = await loop.run_in_executor(None, lambda: openai.Image.create(\n prompt=prompt,\n n=1,\n size='1024x1024'\n ))\n except Exception as e:\n eZprint(f'Error generating image: {e}', DEBUG_KEYS)\n return None\n \n image_url = response['data'][0]['url']\n response = requests.get(image_url)\n eZprint(f'Image URL: {image_url}', DEBUG_KEYS)\n processed_media = tempfile.NamedTemporaryFile(suffix=\".png\", delete=False)\n processed_media.write(response.content)\n processed_media.close()\n return processed_media\n \n\nasync def generate_images(prompts, sessionID, convoID, loadout):\n\n tasks = [generate_image(prompt, sessionID, convoID, loadout) for prompt in prompts]\n images = await asyncio.gather(*tasks)\n images_str = ', '.join(images)\n return images_str\n\n\nasync def generate_image(prompt, sessionID, convoID, loadout):\n\n loop = asyncio.get_event_loop()\n response = await loop.run_in_executor(None, lambda: openai.Image.create(\n prompt=prompt,\n n=1,\n size='1024x1024'\n ))\n \n image_url = response['data'][0]['url']\n print(f'Image URL: {image_url}')\n response = requests.get(image_url)\n \n name = prompt + '.png'\n cartVal = {\n 'label' : name,\n # 'text' : str(transcriptions),\n 'description' : 'Image generated by openAI with prompt: ' + prompt,\n 'file' : name,\n 'extension' : 'image/png',\n # 'media_url' : url,\n 'type' : 'media',\n 'enabled' : True,\n }\n\n cartKey = await addCartridge(cartVal, sessionID, loadout, convoID )\n url = await write_file(response.content, cartKey) \n print(url)\n await update_cartridge_field({'sessionID': sessionID, 'cartKey' : cartKey, 'fields': {'media_url': url}}, convoID, loadout, True)\n\n return name\n", "repo_name": "kpister/prompt-linter", "sub_path": "data/scraping/repos/samueltates~nova/file_handling~image_handling.py", "file_name": "file_handling~image_handling.py", "file_ext": "py", "file_size_in_byte": 2344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "openai.api_key", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "tools.debug.eZprint", "line_number": 15, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 17, "usage_type": "call"}, {"api_name": "openai.Image.create", "line_number": 18, "usage_type": "call"}, {"api_name": "openai.Image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tools.debug.eZprint", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "tools.debug.eZprint", "line_number": 29, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 30, "usage_type": "call"}, {"api_name": "asyncio.gather", "line_number": 39, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 46, "usage_type": "call"}, {"api_name": "openai.Image.create", "line_number": 47, "usage_type": "call"}, {"api_name": "openai.Image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 55, "usage_type": "call"}, {"api_name": "core.cartridges.addCartridge", "line_number": 69, "usage_type": "call"}, {"api_name": "file_handling.s3.write_file", "line_number": 70, "usage_type": "call"}, {"api_name": "core.cartridges.update_cartridge_field", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "22370248390", "text": "\"\"\"SustainAnalytics website Scrape\n\nThis script allows the user to scrape the companies' ESG ratings from the\nSustainAnalytics website\nWebsite link: \"https://www.sustainalytics.com/esg-ratings\"\n\nThis tool accepts Company's names list in comma separated value\nfile (.csv) format as input.\n\nThis script requires that `pandas` be installed within the Python\nenvironment you are running this script in.\n\nThe output is a .csv file with Company name and its corresponding ESG ratings\n\"\"\"\n\nimport pandas as pd\nfrom selenium.common.exceptions import NoSuchElementException\nfrom time import sleep\nfrom tqdm import tqdm\nfrom .scraper import WebScraper\n\n\ndef append_dict(temp: str) -> str:\n ''' Append the SustainAnalytics dictionary with Company name, Industry\\\n Name, and its ESG Risk rating\n\n Parameters\n ----------\n temp : str\n The previous company name appended to the dictionary\n\n Returns\n -------\n str\n The latest company name appended to the dictionary\n '''\n if temp == company:\n bot.append_empty_values(san)\n\n else:\n san['SA_Company'].append(company.text)\n san['SA_ESG_Risk'].append(esg_score.text)\n san['SA_Industry'].append(industry.text)\n temp = company\n return temp\n\n\n# Read input companies dataset\ncompanies_filename = WebScraper._get_filename()\nheader_name = WebScraper._get_headername()\nexport_path = WebScraper._get_exportpath()\ndf = pd.read_csv(companies_filename)\ndata_length = len(df)\n\n# Set up the webdriver\nURL = \"https://www.sustainalytics.com/esg-ratings\"\nbot = WebScraper(URL)\n\n# Scrape the website. Extract company names and their respective ESG score\n# and store it in the dictionary\ntemp = 0\nfor i in tqdm(range(data_length)):\n san = {'SA_Company': [], 'SA_ESG_Risk': [], 'SA_Industry': []}\n # Starting the search by finding the search bar and searching for the\n # company\n search_bar = bot.send_request_to_search_bar(\n header_name, df, i, xpath='//*[@id=\"searchInput\"]')\n\n try:\n key = bot.find_element('.//div[@class=\"list-group-item\"]')\n key.click()\n sleep(3)\n xpath = '/html/body/section[2]/section[1]/div/div[1]/div[1]/div[3]/ \\\n div[1]/div[1]/div[1]/span'\n esg_score = bot.find_element(xpath)\n company = bot.find_element(\n '/html/body/section[2]/section[1]/div/div[1]/div[1]/div[1]/div/h2')\n industry = bot.find_element(\n '/html/body/section[2]/section[1]/div/div[1]/div[1]/div[2]/ \\\n div[1]/p/strong')\n temp = append_dict(temp)\n\n except NoSuchElementException:\n bot.append_empty_values(san)\n\n # Save the data into a csv file\n df1 = bot.convert_dict_to_csv(san, export_path)\n", "repo_name": "shweta-29/Companies_ESG_Scraper", "sub_path": "esgmetrics/esgscraper/sustainanalytics.py", "file_name": "sustainanalytics.py", "file_ext": "py", "file_size_in_byte": 2726, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 28, "dataset": "github-code", "pt": "50", "api": [{"api_name": "scraper.WebScraper._get_filename", "line_number": 49, "usage_type": "call"}, {"api_name": "scraper.WebScraper", "line_number": 49, "usage_type": "name"}, {"api_name": "scraper.WebScraper._get_headername", "line_number": 50, "usage_type": "call"}, {"api_name": "scraper.WebScraper", "line_number": 50, "usage_type": "name"}, {"api_name": "scraper.WebScraper._get_exportpath", "line_number": 51, "usage_type": "call"}, {"api_name": "scraper.WebScraper", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "scraper.WebScraper", "line_number": 57, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "74817538396", "text": "from autobahn.wamp.types import PublishOptions\n\nfrom crossbar._util import dump_json\nfrom crossbar.bridge.rest.common import _CommonResource\n\n__all__ = ('PublisherResource', )\n\n\nclass PublisherResource(_CommonResource):\n \"\"\"\n A HTTP/POST to WAMP-Publisher bridge.\n \"\"\"\n def _process(self, request, event):\n\n if 'topic' not in event:\n return self._deny_request(request, 400, key=\"topic\", log_category=\"AR455\")\n\n topic = event.pop('topic')\n\n args = event['args'] if 'args' in event and event['args'] else []\n kwargs = event['kwargs'] if 'kwargs' in event and event['kwargs'] else {}\n options = event['options'] if 'options' in event and event['options'] else {}\n\n publish_options = PublishOptions(acknowledge=True,\n forward_for=options.get('forward_for', None),\n retain=options.get('retain', None),\n exclude_me=options.get('exclude_me', None),\n exclude_authid=options.get('exclude_authid', None),\n exclude_authrole=options.get('exclude_authrole', None),\n exclude=options.get('exclude', None),\n eligible_authid=options.get('eligible_authid', None),\n eligible_authrole=options.get('eligible_authrole', None),\n eligible=options.get('eligible', None))\n\n kwargs['options'] = publish_options\n\n # http://twistedmatrix.com/documents/current/web/howto/web-in-60/asynchronous-deferred.html\n\n d = self._session.publish(topic, *args, **kwargs)\n\n def on_publish_ok(pub):\n res = {'id': pub.id}\n body = dump_json(res, True).encode('utf8')\n self._complete_request(request, 200, body, log_category=\"AR200\", reason=\"OK\")\n\n def on_publish_error(err):\n self._fail_request(request, failure=err, log_category=\"AR456\")\n\n return d.addCallbacks(on_publish_ok, on_publish_error)\n", "repo_name": "crossbario/crossbar", "sub_path": "crossbar/bridge/rest/publisher.py", "file_name": "publisher.py", "file_ext": "py", "file_size_in_byte": 2162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2045, "dataset": "github-code", "pt": "50", "api": [{"api_name": "crossbar.bridge.rest.common._CommonResource", "line_number": 9, "usage_type": "name"}, {"api_name": "autobahn.wamp.types.PublishOptions", "line_number": 24, "usage_type": "call"}, {"api_name": "crossbar._util.dump_json", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "36762342879", "text": "import json\nimport logging\n\ntry:\n\tfrom urllib.request import Request, urlopen # Python 3\nexcept ImportError:\n\tfrom urllib2 import Request, urlopen # Python 2\n\nfrom django.conf import settings\n\ndef infoip(func):\n\tdef wrap(request, *args, **kwargs):\n\t\tinfo = None\n\t\ttry:\n\t\t\tinfo = __get_geoip_info(request)\n\t\texcept Exception as e:\n\t\t\tlogging.error(e)\n\n\t\trequest.infoip = info\n\n\t\treturn func(request)\n\treturn wrap\n\ndef __get_client_ip(request):\n\tx_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n\tif x_forwarded_for:\n\t\tip = x_forwarded_for.split(',')[0]\n\telse:\n\t\tip = request.META.get('REMOTE_ADDR')\n\treturn ip\n\ndef __get_geoip_info(request):\n\tprotocol = \"http\"\n\tif hasattr(settings, 'INFOIP_USE_HTTPS') and settings.INFOIP_USE_HTTPS:\n\t\tprotocol = \"https\"\n\n\turl = \"{0}://api.infoip.io/{1}\".format(protocol, __get_client_ip(request))\n\t\n\tq = Request(url)\n\n\tif hasattr(settings, 'INFOIP_API_KEY') and settings.INFOIP_API_KEY:\n\t\tq.add_header('x-infoip-token', settings.INFOIP_API_KEY)\n\telse:\n\t\tlogging.debug(\"No infoip API key found. Rate limiting may occur!\")\n\n\tdata = urlopen(q).read().decode(\"utf-8\")\n\treturn json.loads(data)", "repo_name": "ciokan/infoip-geoip-python-integrations", "sub_path": "infoip/django.py", "file_name": "django.py", "file_ext": "py", "file_size_in_byte": 1133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.error", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.conf.settings.INFOIP_USE_HTTPS", "line_number": 34, "usage_type": "attribute"}, {"api_name": "urllib2.Request", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.settings", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.conf.settings.INFOIP_API_KEY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.conf.settings.INFOIP_API_KEY", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "26641901066", "text": "from appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\n\ndesired_caps = {}\ndesired_caps['platformName'] = 'Android'\ndesired_caps['platdormVersion'] = '6.0'\ndesired_caps['deviceName'] = '192.168.208.101:5555'\ndesired_caps['unicodeKeyboard'] = True\ndesired_caps['resetKeyboard'] = True\ndriver = webdriver.Remote('http://localhost:4723/wd/hub',desired_caps)\ndriver.implicitly_wait(10)\n# print(driver.current_package)\n# print(driver.current_activity)\ndriver.start_activity('com.android.settings','.ChooseLockPattern')\n# TouchAction(driver).press(x=210,y=258).move_to(x=90,y=0).move_to(x=90,y=0).move_to(x=0,y=90).move_to(x=0,y=90).move_to(x=-90,y=0).move_to(x=0,y=-90).perform()\n# TouchAction(driver)\\\n# .press(x=210,y=258)\\\n# .move_to(x=300,y=258)\\\n# .move_to(x=390,y=258)\\\n# .move_to(x=390,y=348)\\\n# .move_to(x=390,y=438)\\\n# .move_to(x=300,y=438)\\\n# .move_to(x=300,y=348)\\\n# .move_to(x=210,y=438)\\\n# .release().perform()\n\n(TouchAction(driver)\n .press(x=210,y=258)\n .move_to(x=300,y=258)\n .move_to(x=390,y=258)\n .move_to(x=390,y=348)\n .move_to(x=390,y=438)\n .move_to(x=300,y=438)\n .move_to(x=300,y=348)\n .move_to(x=210,y=438)\n .release().perform())\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "sunny970/phone_automation", "sub_path": "移动自动化/appium/test06_fingeraction.py", "file_name": "test06_fingeraction.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 10, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "appium.webdriver.common.touch_action.TouchAction", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "72947300644", "text": "'''\nCreated on Apr 10, 2014\n\n@author: Harvey\n'''\nimport pulp\nimport openpyxl\n\ndef optionmargin(exchange, fwd, strikes, prem, otype):\n if otype == 'c':\n vv = [max(k-fwd,0) for k in strikes];\n else:\n vv = [max(fwd-k,0) for k in strikes];\n \n if exchange == 'CFFE':\n marginRatio = 0.15;\n minRatio = 0.667; \n margin=[p+max(marginRatio*fwd-v, minRatio*marginRatio*fwd) for (p, v) in zip(prem, vv)];\n \n return margin; \n\ndef strat2pos(init_strat): \n uc = init_strat['callsprd'];\n up = init_strat['putsprd'];\n vc = init_strat['callfly'];\n vp = init_strat['putfly'];\n wf = init_strat['callput'];\n nSize = len(wf);\n call = [0.0]*nSize;\n put = [0.0]*nSize;\n call[0] = uc[0] + vc[1] + wf[0];\n call[nSize-1] =-uc[nSize-2] + vc[nSize-2] + wf[nSize-1];\n put[0] =-up[0] + vp[1] - wf[0];\n put[nSize-1] = up[nSize-2] + vp[nSize-2] - wf[nSize-1];\n \n for i in range(1, nSize-1):\n call[i] = uc[i]-uc[i-1] + wf[i] + vc[i-1]-2*vc[i]+vc[i+1];\n put[i] =-up[i]+up[i-1] - wf[i] + vp[i-1]-2*vp[i]+vp[i+1]; \n return {'call':call, 'put':put}\n\ndef arboptimizer( mktdata, exchange, init_strat, capital_limit, fwdmargin ):\n fwdBid = mktdata['fwdbid'];\n fwdAsk = mktdata['fwdask'];\n fwdMid = (fwdBid + fwdAsk)/2.0;\n strikes = mktdata['strike'];\n callBids = mktdata['callbid'];\n callAsks = mktdata['callask'];\n callMids = [ (a + b)/2 for (a,b) in zip(callBids, callAsks)];\n putBids = mktdata['putbid'];\n putAsks = mktdata['putask'];\n putMids = [ (a + b)/2 for (a,b) in zip(putBids, putAsks)];\n nSize = len(strikes);\n \n uc0 = init_strat['callsprd'];\n up0 = init_strat['putsprd'];\n vc0 = init_strat['callfly'];\n vp0 = init_strat['putfly'];\n wf0 = init_strat['callput']; \n\n margin_c = optionmargin(exchange, fwdMid, strikes, callMids, 'c');\n margin_p = optionmargin(exchange, fwdMid, strikes, putMids, 'p');\n prob = pulp.LpProblem(\"ArbAllocator\",pulp.LpMaximize);\n\n uc = pulp.LpVariable.dict('uc_%s', range(nSize-1));\n up = pulp.LpVariable.dict('up_%s', range(nSize-1));\n vc = pulp.LpVariable.dict('vc_%s', range(nSize));\n vp = pulp.LpVariable.dict('vp_%s', range(nSize));\n wf = pulp.LpVariable.dict('wf_%s', range(nSize)); \n xuc = pulp.LpVariable.dict('xuc_%s', range(nSize-1), lowBound=0.0);\n xup = pulp.LpVariable.dict('xup_%s', range(nSize-1), lowBound=0.0); \n yc = pulp.LpVariable.dict('yc_%s', range(nSize), lowBound=0.0);\n yp = pulp.LpVariable.dict('yp_%s', range(nSize), lowBound=0.0);\n yf = pulp.LpVariable('yf', lowBound = 0.0, cat='Continuous'); \n zc = pulp.LpVariable.dict('zc_%s', range(nSize), lowBound=0.0);\n zp = pulp.LpVariable.dict('zp_%s', range(nSize), lowBound=0.0);\n zf = pulp.LpVariable('zf', lowBound = 0.0, cat='Continuous');\n \n for i in vc.viewkeys():\n vc[i].lowBound = -vc0[i];\n \n for i in vp.viewkeys():\n vp[i].lowBound = -vp0[i];\n\n prob += sum([(uc0[i]+uc[i])*(callMids[i+1]-callMids[i]+strikes[i+1]-strikes[i]) \\\n -(strikes[i+1]-strikes[i])*xuc[i] for i in range(nSize-1)]) \\\n + sum([(up0[i]+up[i])*(putMids[i]-putMids[i+1]+strikes[i+1]-strikes[i]) \\\n -(strikes[i+1]-strikes[i])*xup[i] for i in range(nSize-1)]) \\\n + sum([(vc0[i]+vc[i])*(2*callMids[i]-callMids[i-1]-callMids[i+1]) for i in range(1, nSize-1)]) \\\n + sum([(vp0[i]+vp[i])*(2*putMids[i] - putMids[i-1] - putMids[i+1]) for i in range(1, nSize-1)]) \\\n + sum([(wf0[i]+wf[i])*(putMids[i]-callMids[i]+fwdMid-strikes[i]) for i in range(nSize)]) \\\n - sum([yc[i]*(callAsks[i]-callMids[i]) for i in range(nSize)]) \\\n - sum([yp[i]*(putAsks[i]-putMids[i]) for i in range(nSize)]) \\\n - yf*(fwdAsk-fwdMid); \n \n prob += sum([zc[i]*margin_c[i]+zp[i]*margin_p[i] for i in range(nSize)])+ \\\n sum([(uc0[i]+uc[i])*(callMids[i]-callMids[i+1]) for i in range(nSize-1)]) + \\\n sum([(up0[i]+up[i])*(putMids[i+1]-putMids[i]) for i in range(nSize-1)]) + \\\n sum([(vc0[i]+vc[i])*(callMids[i-1]+callMids[i+1]-2*callMids[i]) for i in range(1, nSize-1)]) + \\\n sum([(vp0[i]+vp[i])*(putMids[i-1] +putMids[i+1] -2*putMids[i] ) for i in range(1, nSize-1)]) + \\\n sum([(wf0[i]+wf[i])*(callMids[i] - putMids[i]) for i in range(nSize)]) + \\\n + zf*fwdmargin*fwdMid <=capital_limit;\n \n prob += zf - sum([wf[i]+wf0[i] for i in range(nSize)]) >= 0;\n prob += zf + sum([wf[i]+wf0[i] for i in range(nSize)]) >= 0;\n prob += yf - sum([wf[i] for i in range(nSize)]) >= 0;\n prob += yf + sum([wf[i] for i in range(nSize)]) >= 0;\n \n prob += (uc0[0]+uc[0])+ (vc0[1]+vc[1]) + (wf0[0]+wf[0]) + zc[0] >= 0;\n prob += -(up0[0]+up[0])+ (vp0[1]+vp[1]) - (wf0[0]+wf[0]) + zp[0] >= 0;\n prob += -uc0[nSize-2]+vc0[nSize-2]+wf0[nSize-1] - uc[nSize-2]+vc[nSize-2]+wf[nSize-1] + zc[nSize-1] >= 0;\n prob += up0[nSize-2]+vp0[nSize-2]-wf0[nSize-1] + up[nSize-2]+vp[nSize-2]-wf[nSize-1] + zp[nSize-1] >= 0;\n \n prob += yc[0]+uc[0]+vc[1]+wf[0] >= 0;\n prob += yc[0]-uc[0]-vc[1]-wf[0] >= 0;\n prob += yp[0]-up[0]+vp[1]-wf[0] >= 0;\n prob += yp[0]+up[0]-vp[1]+wf[0] >= 0;\n prob += yc[nSize-1]-uc[nSize-2]+vc[nSize-2]+wf[nSize-1] >= 0;\n prob += yc[nSize-1]+uc[nSize-2]-vc[nSize-2]-wf[nSize-1] >= 0;\n prob += yp[nSize-1]+up[nSize-2]+vp[nSize-2]-wf[nSize-1] >= 0;\n prob += yp[nSize-1]-up[nSize-2]-vp[nSize-2]+wf[nSize-1] >= 0;\n prob += vc[0] == 0.0;\n prob += vc[nSize-1] == 0.0;\n prob += vp[0] == 0.0;\n prob += vp[nSize-1] == 0.0;\n \n for i in range(nSize-1):\n prob += xuc[i] - uc0[i] - uc[i] >= 0\n prob += xup[i] - up0[i] - up[i] >= 0\n \n for i in range(1, nSize-1): \n prob += uc0[i]-uc0[i-1]+vc0[i-1]+vc0[i+1]-2*vc0[i]+wf0[i]+uc[i]-uc[i-1]+vc[i-1]+vc[i+1]-2*vc[i]+wf[i]+zc[i] >= 0; \n prob += up0[i-1]-up0[i]+vp0[i-1]+vp0[i+1]-2*vp0[i]-wf0[i]+up[i-1]-up[i]+vp[i-1]+vp[i+1]-2*vp[i]-wf[i]+zp[i] >= 0;\n prob += yc[i]+uc[i]-uc[i-1]+vc[i-1]+vc[i+1]-2*vc[i]+wf[i] >= 0;\n prob += yc[i]-uc[i]+uc[i-1]-vc[i-1]-vc[i+1]+2*vc[i]-wf[i] >= 0;\n prob += yp[i]+up[i-1]-up[i]+vp[i-1]+vp[i+1]-2*vp[i]-wf[i] >= 0;\n prob += yp[i]-up[i-1]+up[i]-vp[i-1]-vp[i+1]+2*vp[i]+wf[i] >= 0;\n\n prob.solve(); \n solutions = {};\n for v in prob.variables():\n solutions[v.name] = v.varValue\n \n callsprd=[ solutions['uc_'+str(i)] for i in range(nSize-1)] + [0];\n putsprd =[ solutions['up_'+str(i)] for i in range(nSize-1)] + [0];\n callput= [ solutions['wf_'+str(i)] for i in range(nSize)]; \n callfly =[ solutions['vc_'+str(i)] for i in range(nSize) ];\n putfly = [ solutions['vp_'+str(i)] for i in range(nSize) ]; \n res = {'callput': callput, \n 'callsprd':callsprd, \n 'putsprd': putsprd, \n 'callfly': callfly, \n 'putfly' : putfly, \n 'value' : prob.objective.value(), \n 'status': pulp.LpStatus[prob.status]};\n return res;\n \ndef arboptimizer2( mktdata, exchange, capital_limit, fwdmargin ):\n fwdBid = mktdata['fwdbid'];\n fwdAsk = mktdata['fwdask'];\n fwdMid = (fwdBid + fwdAsk)/2.0;\n strikes = mktdata['strike'];\n callBids = mktdata['callbid'];\n callAsks = mktdata['callask'];\n callMids = [ (a + b)/2 for (a,b) in zip(callBids, callAsks)];\n putBids = mktdata['putbid'];\n putAsks = mktdata['putask'];\n putMids = [ (a + b)/2 for (a,b) in zip(putBids, putAsks)];\n nSize = len(strikes);\n\n margin_c = optionmargin(exchange, fwdMid, strikes, callMids, 'c');\n margin_p = optionmargin(exchange, fwdMid, strikes, putMids, 'p');\n prob = pulp.LpProblem(\"ArbAllocator\",pulp.LpMaximize);\n\n wc = pulp.LpVariable.dict('wc_%s', range(nSize-1), lowBound=0.0);\n wp = pulp.LpVariable.dict('wp_%s', range(nSize-1), lowBound=0.0);\n uc = pulp.LpVariable.dict('uc_%s', range(nSize-1), lowBound=0.0);\n up = pulp.LpVariable.dict('up_%s', range(nSize-1), lowBound=0.0);\n vc = pulp.LpVariable.dict('vc_%s', range(nSize), lowBound=0.0);\n vp = pulp.LpVariable.dict('vp_%s', range(nSize), lowBound=0.0);\n rc = pulp.LpVariable.dict('rc_%s', range(nSize), lowBound=0.0);\n rp = pulp.LpVariable.dict('rp_%s', range(nSize), lowBound=0.0);\n zc = pulp.LpVariable.dict('zc_%s', range(nSize), lowBound=0.0);\n zp = pulp.LpVariable.dict('zp_%s', range(nSize), lowBound=0.0);\n zf = pulp.LpVariable('zf', lowBound = 0.0, cat='Continuous');\n\n prob += sum([wc[i]*(callBids[i+1]-callAsks[i]) for i in range(nSize-1)]) + \\\n sum([wp[i]*(putBids[i]-putAsks[i+1]) for i in range(nSize-1)]) + \\\n sum([uc[i]*(callBids[i]-callAsks[i+1]-strikes[i+1]+strikes[i]) for i in range(nSize-1)]) + \\\n sum([up[i]*(putBids[i+1]-putAsks[i]-strikes[i+1]+strikes[i]) for i in range(nSize-1)]) + \\\n sum([vc[i]*(2*callBids[i] - callAsks[i-1] - callAsks[i+1]) for i in range(1, nSize-1)]) + \\\n sum([vp[i]*(2*putBids[i] - putAsks[i-1] - putAsks[i+1]) for i in range(1, nSize-1)]) + \\\n sum([rc[i]*(putBids[i] - callAsks[i] + fwdBid - strikes[i]) for i in range(nSize)]) + \\\n sum([rp[i]*(callBids[i] - putAsks[i] - fwdAsk + strikes[i]) for i in range(nSize)])\n \n prob += sum([zc[i]*margin_c[i]+zp[i]*margin_p[i] for i in range(nSize)])+ \\\n sum([(wc[i]-uc[i])*(callMids[i]-callMids[i+1]) for i in range(nSize-1)]) + \\\n sum([(wp[i]-up[i])*(putMids[i+1]-putMids[i]) for i in range(nSize-1)]) + \\\n sum([vc[i]*(callMids[i-1]+callMids[i+1]-2*callMids[i]) for i in range(1, nSize-1)]) + \\\n sum([vp[i]*(putMids[i-1] +putMids[i+1] -2*putMids[i] ) for i in range(1, nSize-1)]) + \\\n sum([(rc[i]-rp[i])*(callMids[i] - putMids[i]) for i in range(nSize)]) + \\\n + zf*fwdmargin*fwdMid <=capital_limit;\n prob += sum([rp[i]-rc[i] for i in range(nSize)]) - zf <= 0;\n prob += sum([rc[i]-rp[i] for i in range(nSize)]) - zf <= 0;\n prob += wc[0]-uc[0]+vc[1]+rc[0]-rp[0] + zc[0] >= 0;\n prob += -wp[0]+up[0]+vp[1]+rp[0]-rc[0]+ zp[0] >= 0;\n prob += -wc[nSize-2]+uc[nSize-2]+vc[nSize-2]+rc[nSize-1]-rp[nSize-1] + zc[nSize-1] >= 0;\n prob += wp[nSize-2]-up[nSize-2]+vp[nSize-2]+rp[nSize-1]-rc[nSize-1]+ zp[nSize-1] >= 0;\n prob += vc[0] == 0.0;\n prob += vc[nSize-1] == 0.0;\n prob += vp[0] == 0.0;\n prob += vp[nSize-1] == 0.0;\n for i in range(1, nSize-1):\n prob += wc[i]-uc[i]-(wc[i-1]-uc[i-1])+vc[i-1]+vc[i+1]-2*vc[i]+rc[i]-rp[i]+zc[i] >= 0; \n prob += wp[i-1]-up[i-1]-(wp[i]-up[i])+vp[i-1]+vp[i+1]-2*vp[i]+rp[i]-rc[i]+zp[i] >= 0;\n\n prob.solve(); \n solutions = {};\n for v in prob.variables():\n solutions[v.name] = v.varValue\n \n callsprd=[ solutions['wc_'+str(i)]-solutions['uc_'+str(i)] for i in range(nSize-1)];\n putsprd =[ solutions['wp_'+str(i)]-solutions['up_'+str(i)] for i in range(nSize-1)];\n callput=[ solutions['rc_'+str(i)]-solutions['rp_'+str(i)] for i in range(nSize)]; \n callfly = [ solutions['vc_'+str(i)] for i in range(1,nSize-1) ];\n putfly = [ solutions['vp_'+str(i)] for i in range(1, nSize-1) ]; \n res = {'callput': callput, \n 'callsprd':callsprd, \n 'putsprd': putsprd, \n 'callfly': callfly, \n 'putfly' : putfly, \n 'value' : prob.objective.value(), \n 'status': pulp.LpStatus[prob.status]};\n return res;\n\nif __name__ == \"__main__\":\n dataFile = 'test.xlsx';\n sheetName = 'mktData';\n wb = openpyxl.load_workbook('test.xlsx', data_only=True);\n ws = wb[sheetName];\n \n mktRange = 'A6:E18';\n wr = ws.range(mktRange);\n mktdata = {};\n mktdata['fwdbid'] = float(ws.cell(row=2,column=0).value);\n mktdata['fwdask'] = float(ws.cell(row=2,column=1).value); \n for i in range(len(wr)):\n row = wr[i];\n for j in range(len(row)):\n colname = str(wr[0][j].value).lower();\n if i == 0:\n mktdata[colname] = [];\n else:\n mktdata[colname].append(float(wr[i][j].value)); \n\n# init_strat = {'callsprd':[0]*8,\n# 'putsprd':[0]*8,\n# 'callfly':[0]*8,\n# 'putfly':[0]*8,\n# 'callput':[0,0,0,0,0,-1500,0,0],\n# }\n init_strat = {};\n posRange = 'G6:K18';\n wr = ws.range(posRange); \n for i in range(len(wr)):\n row = wr[i];\n for j in range(len(row)):\n colname = str(wr[0][j].value).lower();\n if i == 0:\n init_strat[colname] = [];\n else:\n init_strat[colname].append(float(wr[i][j].value));\n \n exchange = 'CFFE';\n fwdmargin = 0.12;\n capital_limit = 1000000;\n\n res1 = arboptimizer( mktdata, exchange, init_strat, capital_limit, fwdmargin );\n #res2 = arboptimizer2( mktdata, exchange, capital_limit, fwdmargin );\n pos = strat2pos(res1);\n res1['call'] = pos['call'];\n res1['put'] = pos['put'];\n outfile = \"test_output.xlsx\";\n outRange = 'N6:T18';\n wr = ws.range(outRange); \n for i, col in enumerate(['callsprd','putsprd','callfly','putfly','callput','call','put']):\n wr[0][i].value = col;\n for j, pos in enumerate(res1[col]):\n wr[j+1][i].value = pos; \n \n wb.save(outfile); \n pass\n ", "repo_name": "harvey1673/pyktrader", "sub_path": "tools/arboptimizer.py", "file_name": "arboptimizer.py", "file_ext": "py", "file_size_in_byte": 13349, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 160, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pulp.LpProblem", "line_number": 62, "usage_type": "call"}, {"api_name": "pulp.LpMaximize", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 64, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 64, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 65, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 66, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 67, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 68, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 69, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 70, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 71, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 72, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable", "line_number": 73, "usage_type": "call"}, {"api_name": "pulp.LpVariable.dict", "line_number": 74, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 75, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable", "line_number": 76, "usage_type": "call"}, {"api_name": "pulp.LpStatus", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pulp.LpProblem", "line_number": 172, "usage_type": "call"}, {"api_name": "pulp.LpMaximize", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 174, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 175, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 176, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 177, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 178, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 179, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 180, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 181, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 182, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable.dict", "line_number": 183, "usage_type": "call"}, {"api_name": "pulp.LpVariable", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pulp.LpVariable", "line_number": 184, "usage_type": "call"}, {"api_name": "pulp.LpStatus", "line_number": 232, "usage_type": "attribute"}, {"api_name": "openpyxl.load_workbook", "line_number": 238, "usage_type": "call"}]} +{"seq_id": "14232481944", "text": "from tkinter import *\nimport time\n\n\ntry:\n strt=int(input(\"Please write secret code to launch our downloder program : \"))\n print(\"welcome to your yt lite downloder..\")\n user=input(\"Enter your good name : \")\n print(\"welcome to digital next genration mini py YT 2.01 videos downloder<>, \",user)\nexcept ValueError:\n time.sleep(3)\n print(\"may be some mistake in your input , Please recheck Cearfully,.. secret code, hint:= {sum of first 2 digts of latitude & longitude of in which city your are in.. }\")\n userdata=input(\"enter again > \")\n print(\"congrats!\")\n\ndef ytube():\n from pytube import YouTube\n link=(str(hd.get()))\n yt=YouTube(link)\n stm=yt.streams.first()\n stm.download()\n print(\"downloder done vro\")\n print(\"enjoy your video!\")\n time.sleep(3)\n print(\"If any thing you want to say about this program or py so, Pease write here , and we definatily work on this and make more effective for you : \")\n feed=input(\"Write here your : \")\n print(\"please wait we are uploading your risponse.. ... ....\")\n time.sleep(3)\n print(\"we are work on your valuable feed thankyou for using..\",user)\n\nasr=Tk()\nasr.title(\"Youtube DOWNLODER 2.0\")\nasr.geometry(\"1300x700+00+00\")\nfilebg=PhotoImage(file='C:\\\\Users\\\\neha\\\\Links\\\\tubedownloder\\\\pikrepo.png')\nl_fr_bg=Label(asr,image=filebg)\nl_fr_bg.place(x=0,y=0)\nlabel=Label(text=\"Hey Buddy, Welcome,, Insert your link below\",fg=\"purple\",font=\"200x300\",bg='steel blue').pack()\nlbel2=Label(text=\"hey user there is some changes in your program you can'nt paste youtube link by mouse or touch, but you can paste link by 'CLTRL + V ' keys\",fg=\"yellow\",bg='purple' ).pack()\nbutton1=Button(text=\"download\",font=\"150x160\",bg=\"green\",fg=\"yellow\",command=ytube).pack()\nhd=StringVar()\nvalid=Entry(textvariable=hd,bd=4,bg='powder blue').pack()\n\nasr.mainloop()\n\n", "repo_name": "Anuragsingh2003/Ytube_v_downloder_v3.1_py", "sub_path": "youtune downdloderrr.py", "file_name": "youtune downdloderrr.py", "file_ext": "py", "file_size_in_byte": 1842, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "pytube.YouTube", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "9975427825", "text": "from pydantic import BaseModel\nfrom enum import Enum\n\n\nclass DogType(str, Enum):\n terrier = \"terrier\"\n bulldog = \"bulldog\"\n dalmatian = \"dalmatian\"\n\n\nclass Dog(BaseModel):\n name: str\n pk: int\n kind: DogType\n\n\nclass Timestamp(BaseModel):\n id: int\n timestamp: int", "repo_name": "Floly/devtools_hw2", "sub_path": "models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 11, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "22497253973", "text": "from datetime import date\nimport pandas as pd\nimport yfinance as yf\n\nclass YahooFiManager:\n def get_stock_df(self, tickers:str, start: date, end: date) -> pd.DataFrame:\n '''takes in all caps string of stock tickers separated by spaces, retur'''\n return yf.download(tickers=tickers, \n period='5d',\n interval='1m',\n start=start, \n end=end)", "repo_name": "bbeckenb/de_tools_playground", "sub_path": "dags/common/classes/YahooFiManager.py", "file_name": "YahooFiManager.py", "file_ext": "py", "file_size_in_byte": 458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datetime.date", "line_number": 6, "usage_type": "name"}, {"api_name": "yfinance.download", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "30868119829", "text": "# крч эта прога выдаёт график зависимости среднего значения очков игрального кубика от кол-ва бросков\r\nfrom random import randint\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\nclass Die:\r\n def __init__(self, count):\r\n self.count = count\r\n\r\n def random_score(self):\r\n return randint(1, 6)\r\n\r\n def randoming(self):\r\n z = 0\r\n summa = 0\r\n while z <= self.count:\r\n summa += randint(1, 6)\r\n z += 1\r\n return summa / self.count\r\n\r\n\r\nx = np.linspace(1, 5001, 5000)\r\ni = 0\r\ny = []\r\nfor i in range(1, 5001):\r\n ThisClass = Die(i)\r\n y.append(ThisClass.randoming())\r\nplt.plot(x, y)\r\nplt.show()\r\nJustDie = Die(1000)\r\nprint(JustDie.random_score())\r\n", "repo_name": "GlavnyNegodyai/test-ish-_repository", "sub_path": "code/rn.py", "file_name": "rn.py", "file_ext": "py", "file_size_in_byte": 809, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "random.randint", "line_number": 12, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "9349029011", "text": "from chrome import chrome_purchase as cPurchase\nfrom flask import Flask, Response, request\n\napp = Flask(__name__)\n\n@app.route('/purchase', methods=['POST'])\ndef purchase_event():\n\tprint(request.form)\n\turl = request.form.get('url')\n\tif url is not None:\n\t\tsPurchase = cPurchase.ChromePurchase()\n\t\tmsg = sPurchase.addToShoppingCard(url)\n\t\treturn Response(msg)\n\telse:\n\t\treturn \"Not purchase URL\", 400\t\n\n\t\n\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0', threaded=False, port = 5001)\n", "repo_name": "toponsky/h_project", "sub_path": "purchase/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 483, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 8, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 9, "usage_type": "name"}, {"api_name": "chrome.chrome_purchase.ChromePurchase", "line_number": 11, "usage_type": "call"}, {"api_name": "chrome.chrome_purchase", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "41093156840", "text": "from fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy import select, update\n\nfrom bad_responses import ErrorOrSucess\nfrom config import STATIC_ACCESS_KEY\nfrom database import get_async_session\nfrom auth.models import user\nfrom auth.base_config import fastapi_users\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom profile.schemas import GetUserProfile, UpdateUserProfileInfo\n\ncurrent_user = fastapi_users.current_user()\n# создаём новый роутер\nprofile_router = APIRouter(\n prefix='/profile',\n tags=['Profile']\n)\n\n\n@profile_router.get('/get_profile/{need_user_id}',\n responses={401: {'model': ErrorOrSucess, 'description': 'Unauthorized'},\n 400: {'model': ErrorOrSucess, 'description': \"User doesn't exist\"}},\n response_model=GetUserProfile)\nasync def get_user_profile(need_user_id: int, person: user = Depends(current_user),\n session: AsyncSession = Depends(get_async_session)) -> dict:\n \"\"\"Получение информации из профиля пользователя по id\"\"\"\n\n # проверка на то что id пользователя неотрицательный\n if need_user_id < 0:\n raise HTTPException(status_code=400, detail=\"User doesn't exist\")\n # делаем запрос к бд\n query = select(user).where(user.id == need_user_id)\n sqlalchemy_engine = await session.execute(query)\n data = sqlalchemy_engine.fetchall()\n # смотрим что пользователь есть в бд\n if len(data) > 0:\n # забираем все интересующие нас данные\n user_obj = data[0][0]\n ans = dict()\n ans['last_name'] = user_obj.last_name\n ans['first_name'] = user_obj.first_name\n ans['patronymic'] = user_obj.patronymic\n ans['username'] = user_obj.username\n ans['email'] = user_obj.email\n ans['desc'] = user_obj.description\n ans['birthday'] = user_obj.birthday\n ans['position'] = user_obj.position\n ans['experience_in_company'] = user_obj.experience_in_company\n ans['phone_number'] = user_obj.phone_number\n ans['about_me'] = user_obj.about_me\n ans['work_place'] = user_obj.work_place\n return ans\n raise HTTPException(status_code=400, detail=\"User doesn't exist\")\n\n\n@profile_router.post('/change_profile_info',\n responses={200: {'model': ErrorOrSucess, 'description': 'OK'},\n 401: {'model': ErrorOrSucess, 'description': \"User doesn't exist\"}})\nasync def post_user_profile(user_data: UpdateUserProfileInfo,\n person: user = Depends(current_user), session: AsyncSession = Depends(get_async_session)):\n \"\"\"Изменение информации в профиле пользователя\"\"\"\n data_in_dict_format = dict(user_data)\n stmt = update(user).where(user.id == person.id).values(**data_in_dict_format)\n await session.execute(stmt)\n await session.commit()\n raise HTTPException(status_code=200, detail='OK')\n\n\n@profile_router.put('/become_manager', responses={200: {'model': ErrorOrSucess, 'description': 'OK'},\n 401: {'model': ErrorOrSucess, 'description': \"User doesn't exist\"},\n 400: {'model': ErrorOrSucess, 'description': \"Already admin\"}})\nasync def expand_rights(access_key: str, person: user = Depends(current_user),\n session: AsyncSession = Depends(get_async_session)):\n \"\"\"Расширить права став администратором (менеджером)\"\"\"\n query = select(user).where(user.id == person.id)\n stmt = update(user).where(user.id == person.id).values(is_superuser=True)\n # выдача прав на суперпользователя только в том случае если пользователь ввёл правильный ключ\n if access_key == STATIC_ACCESS_KEY:\n data_sup_user = await session.execute(query)\n # проверяем что пользователь ещё не менеджер\n if not data_sup_user.fetchall()[0][0].is_superuser:\n await session.execute(stmt)\n await session.commit()\n return {'detail': 'you get more rights access'}\n else:\n raise HTTPException(status_code=400, detail=\"you are already an admin, re-granting rights is not possible\")\n else:\n raise HTTPException(status_code=400, detail=\"don't correct access key\")\n", "repo_name": "Raisin228/smart_office_hackathon_mobile_app", "sub_path": "backend/src/profile/router.py", "file_name": "router.py", "file_ext": "py", "file_size_in_byte": 4644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "auth.base_config.fastapi_users.current_user", "line_number": 13, "usage_type": "call"}, {"api_name": "auth.base_config.fastapi_users", "line_number": 13, "usage_type": "name"}, {"api_name": "fastapi.APIRouter", "line_number": 15, "usage_type": "call"}, {"api_name": "auth.models.user", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 26, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 25, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 26, "usage_type": "call"}, {"api_name": "database.get_async_session", "line_number": 26, "usage_type": "argument"}, {"api_name": "fastapi.HTTPException", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.select", "line_number": 33, "usage_type": "call"}, {"api_name": "auth.models.user", "line_number": 33, "usage_type": "argument"}, {"api_name": "auth.models.user.id", "line_number": 33, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 54, "usage_type": "call"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 22, "usage_type": "name"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 23, "usage_type": "name"}, {"api_name": "profile.schemas.GetUserProfile", "line_number": 24, "usage_type": "name"}, {"api_name": "profile.schemas.UpdateUserProfileInfo", "line_number": 60, "usage_type": "name"}, {"api_name": "auth.models.user", "line_number": 61, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 61, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 61, "usage_type": "call"}, {"api_name": "database.get_async_session", "line_number": 61, "usage_type": "argument"}, {"api_name": "sqlalchemy.update", "line_number": 64, "usage_type": "call"}, {"api_name": "auth.models.user", "line_number": 64, "usage_type": "argument"}, {"api_name": "auth.models.user.id", "line_number": 64, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 67, "usage_type": "call"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 58, "usage_type": "name"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 59, "usage_type": "name"}, {"api_name": "auth.models.user", "line_number": 73, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 74, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 73, "usage_type": "call"}, {"api_name": "fastapi.Depends", "line_number": 74, "usage_type": "call"}, {"api_name": "database.get_async_session", "line_number": 74, "usage_type": "argument"}, {"api_name": "sqlalchemy.select", "line_number": 76, "usage_type": "call"}, {"api_name": "auth.models.user", "line_number": 76, "usage_type": "argument"}, {"api_name": "auth.models.user.id", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sqlalchemy.update", "line_number": 77, "usage_type": "call"}, {"api_name": "auth.models.user", "line_number": 77, "usage_type": "argument"}, {"api_name": "auth.models.user.id", "line_number": 77, "usage_type": "attribute"}, {"api_name": "config.STATIC_ACCESS_KEY", "line_number": 79, "usage_type": "name"}, {"api_name": "fastapi.HTTPException", "line_number": 87, "usage_type": "call"}, {"api_name": "fastapi.HTTPException", "line_number": 89, "usage_type": "call"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 70, "usage_type": "name"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 71, "usage_type": "name"}, {"api_name": "bad_responses.ErrorOrSucess", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "26860049297", "text": "import re\nimport threading\nimport dataclasses\nfrom queue import Queue\nfrom typing import Optional\n\nimport pandas as pd\n\nfrom utils import Logger\n\nCOLUMNS = [\"Title\", \"Url\", \"Image\", \"Price\"]\n\nNON_NULL_COLUMNS = [\"Title\", \"Image\", \"Price\"]\n\n@dataclasses.dataclass\nclass Columns:\n \"\"\"Stores columns information\"\"\"\n title: str\n url: str\n image: str\n price: str\n\n@dataclasses.dataclass\nclass FileStats:\n \"\"\"Store file descriptions i.e. file name, products before and products after\"\"\"\n website: str\n category: str\n output_path: str\n products_count_before: Optional[int] = None\n products_count_after: Optional[int] = None\n\nclass DataHandler:\n \"\"\"Removes any rows with blank values for title, price or image\"\"\"\n def __init__(self, stats: list[FileStats]) -> None:\n self.logger = Logger(__class__.__name__)\n\n self.queue = Queue()\n\n self.stats = stats\n\n [threading.Thread(target=self.__work, daemon=True).start() for _ in range(3)]\n\n @staticmethod\n def __get_columns(columns: list[str]) -> Optional[Columns]: \n for column in columns:\n if re.search(r\"title\", column, re.I):\n title = column\n elif re.search(r\"url\", column, re.I):\n url = column\n elif re.search(r\"image\", column, re.I):\n image = column\n elif re.search(r\"price\", column, re.I):\n price = column\n \n try:\n return Columns(title=title, url=url, image=image, price=price)\n except: pass\n\n @staticmethod\n def __rename_columns(df: pd.DataFrame, columns: Columns) -> pd.DataFrame:\n return df.rename(columns={columns.title: COLUMNS[0],\n columns.url: COLUMNS[1],\n columns.image: COLUMNS[2],\n columns.price: COLUMNS[-1]})\n \n def __read_file(self, file_path: str) -> Optional[pd.DataFrame]:\n if re.search(r\".xlsx$\", file_path, re.I):\n return pd.read_excel(file_path)\n elif re.search(r\".csv$\", file_path, re.I):\n return pd.read_csv(file_path)\n elif re.search(r\".json$\", file_path, re.I):\n return pd.read_json(file_path)\n elif re.search(r\".xml$\", file_path, re.I):\n return pd.read_xml(file_path)\n elif re.search(r\".tsv$\", file_path, re.I):\n return pd.read_csv(file_path, sep=\"\\t\")\n \n extension = re.search(r\"\\.[a-zA-Z]{1,6}$\", file_path)\n\n self.logger.warn(\"Skipping removal of blanks as the script is not \"\n \"configured to handle {} file type\".format(extension))\n \n def __save(self, df: pd.DataFrame, file_path: str, name: str) -> None:\n if re.search(r\".xlsx$\", file_path, re.I):\n df.to_excel(file_path, index=False)\n elif re.search(r\".csv$\", file_path, re.I):\n df.to_csv(file_path, index=False)\n elif re.search(r\".json$\", file_path, re.I):\n df.to_json(file_path, index=False)\n elif re.search(r\".xml$\", file_path, re.I):\n df.to_xml(file_path, index=False)\n elif re.search(r\".tsv$\", file_path, re.I):\n df.to_csv(file_path, sep=\"\\t\")\n \n filename = file_path\n\n if re.search(r\"\\/\", file_path):\n filename = file_path.split(\"/\")[-1]\n elif re.search(r\"\\\\\", file_path):\n filename = file_path.split(\"\\\\\")[-1]\n\n stats = [dataclasses.asdict(record) for record in self.stats]\n\n [record.pop(\"output_path\") for record in stats]\n\n pd.DataFrame(stats).to_csv(\"./stats/stats.csv\", index=False)\n\n self.logger.info(\"{}: Non-null records saved to {}\".format(name, filename))\n \n def __work(self) -> None:\n while True:\n name, output_path = self.queue.get()\n\n stats = self.__get_stats(output_path)\n\n df = self.__read_file(output_path)\n\n stats.products_count_before = len(df)\n\n if df is None:\n self.queue.task_done()\n\n continue\n\n columns = self.__get_columns(list(df.columns.values))\n\n df = self.__rename_columns(df, columns)\n\n [df.dropna(subset=column, inplace=True) for column in NON_NULL_COLUMNS]\n\n stats.products_count_after = len(df)\n\n self.__save(df, output_path, name)\n\n self.queue.task_done()\n \n def __get_stats(self, output_path: str) -> FileStats:\n for stats in self.stats:\n if stats.output_path == output_path: return stats", "repo_name": "Manue-Towett/webharvy", "sub_path": "apps/data_handler.py", "file_name": "data_handler.py", "file_ext": "py", "file_size_in_byte": 4581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "dataclasses.dataclass", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 30, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.Logger", "line_number": 35, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 37, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 41, "usage_type": "call"}, {"api_name": "re.search", "line_number": 46, "usage_type": "call"}, {"api_name": "re.I", "line_number": 46, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 48, "usage_type": "call"}, {"api_name": "re.I", "line_number": 48, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 50, "usage_type": "call"}, {"api_name": "re.I", "line_number": 50, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 52, "usage_type": "call"}, {"api_name": "re.I", "line_number": 52, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 67, "usage_type": "call"}, {"api_name": "re.I", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pandas.read_excel", "line_number": 68, "usage_type": "call"}, {"api_name": "re.search", "line_number": 69, "usage_type": "call"}, {"api_name": "re.I", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "re.I", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pandas.read_json", "line_number": 72, "usage_type": "call"}, {"api_name": "re.search", "line_number": 73, "usage_type": "call"}, {"api_name": "re.I", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pandas.read_xml", "line_number": 74, "usage_type": "call"}, {"api_name": "re.search", "line_number": 75, "usage_type": "call"}, {"api_name": "re.I", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 76, "usage_type": "call"}, {"api_name": "re.search", "line_number": 78, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 66, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 84, "usage_type": "call"}, {"api_name": "re.I", "line_number": 84, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 86, "usage_type": "call"}, {"api_name": "re.I", "line_number": 86, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 88, "usage_type": "call"}, {"api_name": "re.I", "line_number": 88, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 90, "usage_type": "call"}, {"api_name": "re.I", "line_number": 90, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}, {"api_name": "re.I", "line_number": 92, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 97, "usage_type": "call"}, {"api_name": "re.search", "line_number": 99, "usage_type": "call"}, {"api_name": "dataclasses.asdict", "line_number": 102, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "10702564379", "text": "\"\"\"\nTests for the tedana stats module\n\"\"\"\nimport random\n\nimport numpy as np\nimport pytest\n\nfrom tedana.stats import computefeats2, get_coeffs, getfbounds\n\n\ndef test_break_computefeats2():\n \"\"\"\n Ensure that computefeats2 fails when input data do not have the right\n shapes.\n \"\"\"\n n_samples, n_vols, n_comps = 10000, 100, 50\n data = np.empty((n_samples, n_vols))\n mmix = np.empty((n_vols, n_comps))\n mask = np.empty((n_samples))\n\n data = np.empty((n_samples))\n with pytest.raises(ValueError):\n computefeats2(data, mmix, mask, normalize=True)\n\n data = np.empty((n_samples, n_vols))\n mmix = np.empty((n_vols))\n with pytest.raises(ValueError):\n computefeats2(data, mmix, mask, normalize=True)\n\n mmix = np.empty((n_vols, n_comps))\n mask = np.empty((n_samples, n_vols))\n with pytest.raises(ValueError):\n computefeats2(data, mmix, mask, normalize=True)\n\n mask = np.empty((n_samples + 1))\n with pytest.raises(ValueError):\n computefeats2(data, mmix, mask, normalize=True)\n data.shape[1] != mmix.shape[0]\n mask = np.empty((n_samples))\n mmix = np.empty((n_vols + 1, n_comps))\n with pytest.raises(ValueError):\n computefeats2(data, mmix, mask, normalize=True)\n\n\ndef test_smoke_computefeats2():\n \"\"\"\n Ensures that computefeats2 works with random inputs and different optional parameters\n \"\"\"\n n_samples, n_times, n_components = 100, 20, 6\n data = np.random.random((n_samples, n_times))\n mmix = np.random.random((n_times, n_components))\n mask = np.random.randint(2, size=n_samples)\n\n assert computefeats2(data, mmix) is not None\n assert computefeats2(data, mmix, mask=mask) is not None\n assert computefeats2(data, mmix, normalize=False) is not None\n\n\ndef test_get_coeffs():\n \"\"\"\n Check least squares coefficients.\n \"\"\"\n # Simulate one voxel with 40 TRs\n data = np.empty((2, 40))\n data[0, :] = np.arange(0, 200, 5)\n data[1, :] = np.arange(0, 200, 5)\n X = np.arange(0, 40)[:, np.newaxis]\n mask = np.array([True, False])\n\n betas = get_coeffs(data, X, mask=None, add_const=False)\n betas = np.squeeze(betas)\n assert np.allclose(betas, np.array([5.0, 5.0]))\n\n betas = get_coeffs(data, X, mask=None, add_const=True)\n betas = np.squeeze(betas)\n assert np.allclose(betas, np.array([5.0, 5.0]))\n\n betas = get_coeffs(data, X, mask=mask, add_const=False)\n betas = np.squeeze(betas)\n assert np.allclose(betas, np.array([5, 0]))\n\n betas = get_coeffs(data, X, mask=mask, add_const=True)\n betas = np.squeeze(betas)\n assert np.allclose(betas, np.array([5, 0]))\n\n\ndef test_break_get_coeffs():\n \"\"\"\n Ensure that get_coeffs fails when input data do not have the right\n shapes.\n \"\"\"\n n_samples, n_echos, n_vols, n_comps = 10000, 5, 100, 50\n data = np.empty((n_samples, n_vols))\n X = np.empty((n_vols, n_comps))\n mask = np.empty((n_samples))\n\n data = np.empty((n_samples))\n with pytest.raises(ValueError):\n get_coeffs(data, X, mask, add_const=False)\n\n data = np.empty((n_samples, n_vols))\n X = np.empty((n_vols))\n with pytest.raises(ValueError):\n get_coeffs(data, X, mask, add_const=False)\n\n data = np.empty((n_samples, n_echos, n_vols + 1))\n X = np.empty((n_vols, n_comps))\n with pytest.raises(ValueError):\n get_coeffs(data, X, mask, add_const=False)\n\n data = np.empty((n_samples, n_echos, n_vols))\n mask = np.empty((n_samples, n_echos, n_vols))\n with pytest.raises(ValueError):\n get_coeffs(data, X, mask, add_const=False)\n\n mask = np.empty((n_samples + 1, n_echos))\n with pytest.raises(ValueError):\n get_coeffs(data, X, mask, add_const=False)\n\n\ndef test_smoke_get_coeffs():\n \"\"\"\n Ensure that get_coeffs returns outputs with different inputs and optional paramters\n \"\"\"\n n_samples, _, n_times, n_components = 100, 5, 20, 6\n data_2d = np.random.random((n_samples, n_times))\n x = np.random.random((n_times, n_components))\n mask = np.random.randint(2, size=n_samples)\n\n assert get_coeffs(data_2d, x) is not None\n # assert get_coeffs(data_3d, x) is not None TODO: submit an issue for the bug\n assert get_coeffs(data_2d, x, mask=mask) is not None\n assert get_coeffs(data_2d, x, add_const=True) is not None\n\n\ndef test_getfbounds():\n good_inputs = range(1, 12)\n\n for n_echos in good_inputs:\n getfbounds(n_echos)\n\n\ndef test_smoke_getfbounds():\n \"\"\"\n Ensures that getfbounds returns outputs when fed in a random number of echo\n \"\"\"\n n_echos = random.randint(3, 10) # At least two echos!\n f05, f025, f01 = getfbounds(n_echos)\n\n assert f05 is not None\n assert f025 is not None\n assert f01 is not None\n", "repo_name": "ME-ICA/tedana", "sub_path": "tedana/tests/test_stats.py", "file_name": "test_stats.py", "file_ext": "py", "file_size_in_byte": 4716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 149, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.empty", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 23, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 28, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 33, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 37, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 42, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 52, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tedana.stats.computefeats2", "line_number": 55, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 56, "usage_type": "call"}, {"api_name": "tedana.stats.computefeats2", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 98, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 99, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 103, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 104, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 109, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 113, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 114, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 117, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 118, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 131, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 133, "usage_type": "call"}, {"api_name": "tedana.stats.get_coeffs", "line_number": 134, "usage_type": "call"}, {"api_name": "tedana.stats.getfbounds", "line_number": 141, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 148, "usage_type": "call"}, {"api_name": "tedana.stats.getfbounds", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "31618877189", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n##########################################################\n## global paraemters for the model\n##########################################################\n\nnt = 30 # num of temp points\nN = 50 # size of lattice\nequil_steps = 1000 # num of equil steps\nmc_steps = 1000 # num of mc steps\n\ntemp = 1.0\n\n\n##########################################################\n## the model code\n##########################################################\n\ndef initial_state(N):\n ''' gen random spin config for lattice size N x N with values -1 or 1 '''\n state = 2*np.random.randint(2, size=(N, N))-1\n return state\n\ndef mc_moves(config, N, temp):\n ''' execute mc moves using metropolis alg '''\n for i in range(N):\n for j in range(N):\n ## random site i in spot [a, b] that we assign to spin s\n a = np.random.randint(0, N)\n b = np.random.randint(0, N)\n s = config[a, b]\n ## calculating delta E = 2 * s * (top, bot, left, right, H=0)\n ## including mod N if a or b are at the boundaries\n ## they lap over to the other side\n neighbors = config[(a+1)%N,b] + config[a,(b+1)%N] + config[(a-1)%N,b] + config[a,(b-1)%N]\n energy_change = 2*s*neighbors\n ## if energy change is <= 0, accept\n if energy_change <= 0:\n s *= -1\n ## elif accept with prob A = e^(-energy_change/T)\n elif np.random.rand() < np.exp(-energy_change/temp):\n s *= -1\n ## apply change to the spot\n config[a, b] = s\n return config\n\n##########################################################\n## calculating quantitites\n##########################################################\n\n## quantities can all be calculated with energy and mag\n\ndef calc_energy(config):\n energy = 0\n for i in range(len(config)):\n for j in range(len(config)):\n S = config[i,j]\n nb = config[(i+1)%N, j] + config[i,(j+1)%N] + config[(i-1)%N, j] + config[i,(j-1)%N]\n energy += -nb*S\n return energy/4.\n\n\ndef calc_mag(config):\n ## simply sum all values\n return np.sum(config)\n\n\nTemp = np.linspace(1, 4, nt)\nEnergy = np.zeros(nt)\nMagnetization = np.zeros(nt)\nSpecificHeat = np.zeros(nt)\nSusceptibility = np.zeros(nt)\n\n\n##########################################################\n## running the code / plotting\n##########################################################\n\n## gen model\nmodel2 = initial_state(N)\nprint(model2)\n\ncount = 0\nfor i in range(equil_steps):\n mc_moves(model2, N, temp)\n print(count)\n count += 1\n\n## plotting stuff\ncmap = mpl.colors.ListedColormap(['yellow', 'blue'])\nbounds = [-N, -N, N, N]\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n\nimg = plt.imshow(model2, interpolation='none', cmap = cmap)\n\nplt.show()\n\n## iterate monte carlo moves\n# for m in range(len(Temp)):\n# e0 = 0\n# e1 = 0\n# m0 = 0\n# m1 = 0\n# model = initial_state(N)\n# E = calc_energy(model)\n# M = calc_mag(model)\n#\n# for i in range(equil_steps):\n# mc_moves(model, N, Temp[m])\n#\n# for i in range(mc_steps):\n# mc_moves(model, N, Temp[m])\n# E = calc_energy(model)\n# M = calc_mag(model)\n#\n#\n# print(E,M,m,i)\n#\n# # iterate\n# e0 = e0 + E\n# m0 = m0 + M\n# m1 = m1 + (M*M)\n# e1 = e1 + (E*E)\n#\n# # change values in the arrays to the values\n# Energy[m] = e0/(mc_steps*N*N)\n# Magnetization[m] = m0/(mc_steps*N*N)\n# SpecificHeat[m] = ( e1/mc_steps - e0*e0/(mc_steps*mc_steps) )/(N*Temp[m]*Temp[m])\n#\n# Susceptibility[m] = ( m1/mc_steps - m0*m0/(mc_steps*mc_steps))\n\n\n\n\n\n#######\n\n# f = plt.figure(figsize=(18, 10), dpi=80, facecolor='w', edgecolor='k');\n#\n# sp = f.add_subplot(2, 2, 1 );\n# plt.plot(Temp, Energy, 'o', color=\"red\");\n# plt.xlabel(\"Temperature (T)\");\n# plt.ylabel(\"Energy \", fontsize=20);\n#\n# sp = f.add_subplot(2, 2, 2 );\n# plt.plot(Temp, abs(Magnetization), 'o', color=\"blue\");\n# plt.xlabel(\"Temperature (T)\");\n# plt.ylabel(\"Magnetization \");\n#\n#\n# sp = f.add_subplot(2, 2, 3 );\n# plt.plot(Temp, SpecificHeat, 'o', color=\"black\");\n# plt.xlabel(\"Temperature (T)\");\n# plt.ylabel(\"Specific Heat \");\n#\n#\n# sp = f.add_subplot(2, 2, 4 );\n# plt.plot(Temp, Susceptibility, 'o', color=\"green\");\n# plt.xlabel(\"Temperature (T)\");\n# plt.ylabel(\"Susceptibility\");\n# plt.show()\n# plt.legend(loc='best', fontsize=15);\n", "repo_name": "kevinbelleville/Computational-Physics", "sub_path": "project_3/kevin.py", "file_name": "kevin.py", "file_ext": "py", "file_size_in_byte": 4593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.colors.ListedColormap", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 92, "usage_type": "attribute"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.colors", "line_number": 94, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "23420599162", "text": "import loader\nfrom functools import reduce\n\ndef checkSlope(_data, _right, _down):\n WIDTH=len(_data[0].strip())\n\n count=0\n pos=0\n for i in range(0, len(_data), _down):\n if _data[i][pos]=='#':\n count+=1\n pos+=_right\n pos%=WIDTH\n \n return count\n\nif __name__ == '__main__':\n data=loader.loadInLines('day3.in')\n\n # PART 1\n print(checkSlope(data,3,1))\n\n # PART 2\n tocheck=[\n # Right 1, down 1.\n (1,1),\n # Right 3, down 1. (This is the slope you already checked.)\n (3,1),\n # Right 5, down 1.\n (5,1),\n # Right 7, down 1.\n (7,1),\n # Right 1, down 2.\n (1,2)\n ]\n\n answers=[checkSlope(data,i[0],i[1]) for i in tocheck]\n out=reduce(lambda a, b: a*b, answers)\n print(out)\n ", "repo_name": "natfaulk/Advent_of_code_2020", "sub_path": "src/day3.py", "file_name": "day3.py", "file_ext": "py", "file_size_in_byte": 719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "loader.loadInLines", "line_number": 18, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "7670907406", "text": "import os\nimport argparse\nimport json\nimport numpy as np\nimport cv2\nfrom thermal_faces.convolution import detect_heads\nfrom thermal_faces.seq_reader import extract_metadata, convert_to_temperature, seq_frames\nfrom thermal_faces.utils import save_image, JsonNumpyEncoder\nfrom tqdm import tqdm\n\n# Find heads in the average temperature over the entire video. This should\n# find the most stable head locations, and these people are most likely\n# watching the performance.\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Calculate the average temperature in a SEQ-video file and find faces in the average temperature. These are recorded in a json file for later use.\")\n parser.add_argument(\"-f\", \"--filename\", required=True, help=\"The name of the SEQ video file.\")\n parser.add_argument(\"-o\", \"--outfile\", default=\"head_locations.json\", help=\"The output json file name.\")\n parser.add_argument(\"-i\", \"--imagefile\", default=\"mean.png\", help=\"The output image file name.\")\n parser.add_argument(\"-p\", \"--npy\", default=\"mean.npy\", help=\"The name of the pickled numpy file containing the mean temperature.\")\n parser.add_argument(\"--firstframe\", default=0, help=\"First frame to include.\")\n parser.add_argument(\"--lastframe\", default=None, help=\"Last frame to include.\")\n parser.add_argument(\"--minwidth\", default=30, help=\"Minimum head width.\")\n parser.add_argument(\"--maxwidth\", default=50, help=\"Maximum head width.\")\n parser.add_argument(\"--threshhold\", default=2, help=\"Threshhold for detecting a head. Lower threshold means more heads and more false positives.\")\n parser.add_argument(\"--keep\", default=[], nargs='*', help=\"A list of subject ids to keep. By default keep all.\")\n args = parser.parse_args()\n\n keep = [int(k) for k in args.keep]\n first_frame = int(args.firstframe)\n if args.lastframe is None:\n last_frame = float(\"inf\")\n else:\n last_frame = int(args.lastframe)\n\n # Video file data\n metadata = extract_metadata(args.filename)\n width = int(metadata[\"Raw Thermal Image Width\"])\n height = int(metadata[\"Raw Thermal Image Height\"])\n bitdepth = 16\n frame_size = width * height * (bitdepth // 8)\n\n # If not already done, find the mean temperature.\n if not os.path.isfile(args.npy):\n frame_index = 0\n temperature_sum = None\n for frame in tqdm(seq_frames(args.filename)):\n # read temperature data\n raw_data = np.frombuffer(frame[len(frame)-frame_size:], dtype=np.uint16).reshape(height, width)\n\n if frame_index > first_frame and frame_index < last_frame:\n temperature = convert_to_temperature(raw_data, metadata)\n if temperature_sum is None:\n temperature_sum = temperature\n else:\n temperature_sum += temperature\n\n if frame_index % 1000 == 0:\n temperature = convert_to_temperature(raw_data, metadata)\n if not os.path.exists('temperature_frames'):\n os.makedirs('temperature_frames')\n save_image(temperature, f\"temperature_frames/{frame_index}.png\", scaled=True)\n\n frame_index += 1\n\n if args.lastframe is None:\n last_frame = frame_index\n \n frames = last_frame - first_frame\n\n # Get the mean\n temperature_mean = temperature_sum/frames\n np.save(args.npy, temperature_mean)\n\n else:\n temperature_mean = np.load(args.npy)\n\n\n heads = detect_heads(\n temperature_mean,\n min_width=int(args.minwidth),\n max_width=int(args.maxwidth),\n threshold=float(args.threshhold)\n )\n\n if keep:\n heads = [h for i, h in enumerate(heads) if i in keep]\n\n for i, head in enumerate(heads):\n y, x = head[\"y\"], head[\"x\"]\n h, w = head[\"height\"]//2, head[\"width\"]//2\n y_max, x_max = head[\"max temp y\"], head[\"max temp x\"]\n cv2.ellipse(temperature_mean, (x,y), (w, h), 0, 0, 360, thickness=2, color=255)\n cv2.circle(temperature_mean, (x_max, y_max), 0, thickness=5, color=255)\n cv2.putText(temperature_mean, str(i), (x+w,y+h), cv2.FONT_HERSHEY_PLAIN, 3, 255, 2, cv2.LINE_AA)\n\n head[\"subject_id\"] = i\n\n save_image(temperature_mean, args.imagefile, scaled=True)\n\n with open(args.outfile, \"w\") as file:\n json.dump(heads, file, indent=4, cls=JsonNumpyEncoder)\n\n\n\nif __name__ == \"__main__\":\n main()\n\n", "repo_name": "rantahar/seq_file", "sub_path": "mean_temp_image.py", "file_name": "mean_temp_image.py", "file_ext": "py", "file_size_in_byte": 4450, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "thermal_faces.seq_reader.extract_metadata", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 47, "usage_type": "call"}, {"api_name": "thermal_faces.seq_reader.seq_frames", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 49, "usage_type": "attribute"}, {"api_name": "thermal_faces.seq_reader.convert_to_temperature", "line_number": 52, "usage_type": "call"}, {"api_name": "thermal_faces.seq_reader.convert_to_temperature", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 61, "usage_type": "call"}, {"api_name": "thermal_faces.utils.save_image", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 76, "usage_type": "call"}, {"api_name": "thermal_faces.convolution.detect_heads", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.ellipse", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 95, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 95, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 95, "usage_type": "attribute"}, {"api_name": "thermal_faces.utils.save_image", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 102, "usage_type": "call"}, {"api_name": "thermal_faces.utils.JsonNumpyEncoder", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "15950108784", "text": "import matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = 'CIFAR100'\nbatch_size = 256\ntrials = 10\nEPOCHS = 200\nfinish_parallel_by=50\n\ndef plot(epoch_range, train, test, heading, extra = 1):\n epochs = range(1,epoch_range+1)\n plt.plot(epochs, train, 'g', label='Training ' + heading)\n plt.plot(epochs, test, 'b', label='Test ' + heading)\n plt.title('Training and Test ' + heading)\n plt.xlabel('Epochs')\n plt.ylabel(heading)\n plt.legend()\n plt.savefig(heading + str(extra) + '.png')\n plt.close()\n\ndef write_list_to_csv(dictionary, heading):\n dataframe = pd.DataFrame(dictionary)\n dataframe.to_csv(heading + '.csv')\n", "repo_name": "myisu-rese-arch/pytorch-cifar", "sub_path": "config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "27220339832", "text": "from discord.ext.commands import Bot as BotBase\nfrom discord.ext.commands import Context\nfrom discord.ext.commands import CommandNotFound, BadArgument\nfrom discord.ext.commands import MissingRequiredArgument, MissingRole, MissingPermissions\nfrom discord.errors import Forbidden, HTTPException\nfrom discord import Embed, File\nfrom glob import glob\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom apscheduler.triggers.cron import CronTrigger\nfrom datetime import datetime \nfrom asyncio import sleep\nfrom ..db import db\n\nPREFIX = \"!\"\nOWNER_IDS = [572353145963806721]\nCOGS = [path.split(\"\\\\\")[-1][:-3] for path in glob(\"./lib/cogs/*.py\")]\nIGNORE_EXCEPTIONS = [CommandNotFound, BadArgument]\n\nclass Ready(object):\n def __init__(self):\n for cog in COGS:\n setattr(self, cog, False)\n def ready_up(self, cog):\n setattr(self, cog, True)\n print(f\"{cog} cog is ready\")\n\n def all_ready(self):\n return all([getattr(self, cog) for cog in COGS])\n\nclass Bot(BotBase):\n def __init__(self):\n self.PREFIX = PREFIX\n self.ready = False\n self.cogs_ready = Ready()\n self.guild = None\n self.scheduler = AsyncIOScheduler()\n db.autosave(self.scheduler)\n\n super().__init__(command_prefix=PREFIX, OWNER_ID=OWNER_IDS)\n\n def setup(self):\n for cog in COGS:\n self.load_extension(f\"lib.cogs.{cog}\")\n print(f\"{cog} cog Loaded\")\n\n def run(self, version):\n self.VERSION = version\n print(\"running setup\")\n self.setup()\n\n with open(\"./lib/bot/token\", \"r\", encoding=\"utf-8\") as tf:\n self.TOKEN = tf.read()\n\n print(\"Running Shinobu...\")\n super().run(self.TOKEN, reconnect=True)\n\n async def process_commands(self,message):\n ctx = await self.get_context(message, cls=Context)\n if ctx.command is not None and ctx.guild is not None:\n if self.ready:\n await self.invoke(ctx)\n else:\n await self.send(\"Wait for Oneechan to be ready!! \")\n \n async def on_connect(self):\n print(\"Ara Ara!\")\n\n async def on_disconnect(self):\n print(\"Ara Ara Sionara!\")\n\n async def on_error(self, err, *args, **kwargs):\n if err == 'on_command_error':\n await args[0].send(\"Somthing went wrong.\")\n channel = self.get_channel(710051662563115052)\n await channel.send(\"An error Occurrred\")\n raise \n\n async def on_command_error(self, ctx, exc):\n if any([isinstance(exc, error) for error in IGNORE_EXCEPTIONS]):\n pass\n\n elif isinstance(exc, MissingRequiredArgument):\n await ctx.send(\"One or More Argument missing in the command\")\n # elif hasattr(exc, \"original\"):\n # raise exc.original\n elif isinstance(exc, MissingPermissions):\n await ctx.send(\"You are not allowed to Use This command\")\n\n elif isinstance(exc, MissingRole):\n await ctx.send(\"You do not have the necessary role to use this command\")\n \n elif hasattr(exc, \"original\"):\n if isinstance(exc.original, Forbidden):\n await ctx.send(\"Shinobu doesn't have permission to do that!!\")\n elif isinstance(exc.original, HTTPException):\n await ctx.send(\"Unable To send Messages\")\n else:\n raise exc.original\n else:\n raise exc\n\n async def rules_reminder(self):\n self.stdout.send(\"Remember to follow the rules\")\n\n async def on_ready(self):\n if not self.ready:\n self.guild = self.get_guild(710051662563115049)\n self.stdout = self.get_channel(710051662563115052)\n self.scheduler.start()\n self.scheduler.add_job(self.rules_reminder, CronTrigger(day_of_week=0, hour=12))\n\n # embed = Embed(title=\"Ara Ara!!\", description=\"Shinobu is now here\", colour=0xFF0000, timestamp=datetime.utcnow())\n # embed.add_field(name=\"Name\", value=\"Value\",inline=False)\n # embed.set_author(name=\"LunaticSatoshi\", icon_url=self.guild.icon_url)\n # embed.set_thumbnail(url=self.guild.icon_url)\n # embed.set_image(url=self.guild.icon_url)\n # embed.set_footer(text=\"This is a footer\")\n # await channel.send(embed=embed)\n while not self.cogs_ready.all_ready():\n await sleep(0.5)\n self.ready = True\n print(\"Shinobu ready\")\n await self.stdout.send(\"Now Online\")\n else:\n print(\"Shinobu reconnected\")\n\n async def on_message(self, message):\n if not message.author.bot:\n await self.process_commands(message)\n\nbot = Bot()", "repo_name": "Lunaticsatoshi/Shinobu", "sub_path": "lib/bot/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 4729, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "glob.glob", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 17, "usage_type": "name"}, {"api_name": "discord.ext.commands.BadArgument", "line_number": 17, "usage_type": "name"}, {"api_name": "discord.ext.commands.Bot", "line_number": 30, "usage_type": "name"}, {"api_name": "apscheduler.schedulers.asyncio.AsyncIOScheduler", "line_number": 36, "usage_type": "call"}, {"api_name": "db.db.autosave", "line_number": 37, "usage_type": "call"}, {"api_name": "db.db", "line_number": 37, "usage_type": "name"}, {"api_name": "discord.ext.commands.Context", "line_number": 58, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 82, "usage_type": "argument"}, {"api_name": "discord.ext.commands.MissingPermissions", "line_number": 86, "usage_type": "argument"}, {"api_name": "discord.ext.commands.MissingRole", "line_number": 89, "usage_type": "argument"}, {"api_name": "discord.errors.Forbidden", "line_number": 93, "usage_type": "argument"}, {"api_name": "discord.errors.HTTPException", "line_number": 95, "usage_type": "argument"}, {"api_name": "apscheduler.triggers.cron.CronTrigger", "line_number": 110, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "18023886271", "text": "# -*- coding:utf-8 -*-\n\"\"\"\n@author: guoxiaorui\n@file: 2_add_spaces\n@time: 2021/12/19 11:00 上午\n@desc: \n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def addSpaces(self, s: str, spaces: List[int]) -> str:\n s_array = []\n point = 0\n for i in range(len(s)):\n if point < len(spaces) and i == spaces[point]:\n s_array.append(\" \")\n point += 1\n s_array.append(s[i])\n return \"\".join(s_array)\n\n\nif __name__ == '__main__':\n s = \"LeetcodeHelpsMeLearn\"\n spaces = [8, 13, 15]\n so = Solution()\n print(so.addSpaces(s, spaces))\n", "repo_name": "sun10081/leetcode_practice_xiaorui", "sub_path": "questions/week/2021/2021_12_19/2_add_spaces.py", "file_name": "2_add_spaces.py", "file_ext": "py", "file_size_in_byte": 609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "43321918678", "text": "import time\n\nimport pytest\nfrom selenium import webdriver\n\nfrom pageObjects.Frames import Frame\nfrom pageObjects.Homepage import HomePage\nfrom Utilities.BaseClass import BaseClass\nfrom Utilities.Screenshot import Screenshot\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--browser_name\", action=\"store\", default=\"chrome\"\n )\n\n\n\n@pytest.fixture(scope=\"class\")\ndef setup(request):\n browser_name = request.config.getoption(\"browser_name\") # to get the browser name during runtime\n ss_path = \"/conftest/\"\n\n if browser_name == \"chrome\":\n options = webdriver.ChromeOptions()\n options.add_argument(\"--disable-popup-blocking\")\n driver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\indrasen\\\\Documents\\\\chromedriver_win32\\\\chromedriver.exe\",chrome_options=options)\n\n elif browser_name == \"firefox\": # firefox gecko driver\n # driver = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\indrasen\\\\Documents\\\\geckodriver-v0.26.0-win64\\\\geckodriver.exe\")\n driver = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\indrasen\\\\Downloads\\\\geckodriver-v0.26.0-win32\\\\geckodriver.exe\")\n\n driver.get(\"https://c6.avaamo.com/web_channels/444588bc-92fe-477f-87c1-88a92946346a/demo.html?theme=avm-messenger&banner=true&demo=true&banner_text=%20&banner_title=This%20is%20how%20the%20chat%20agent%20shows%20up\")\n\n baseclass = BaseClass()\n logs = baseclass.getLogger()\n ss = Screenshot(driver)\n\n expected_title = \"Test agent - IRA\"\n\n try:\n assert driver.title == expected_title\n logs.info(\"Webpage loaded successfully\")\n\n except Exception as e:\n logs.info(\"Webpage failed to load\",e)\n ss.screenShot(ss_path+\"webpage_load_fail.png\")\n\n driver.maximize_window()\n driver.implicitly_wait(4)\n\n home_page = HomePage(driver)\n\n try:\n if home_page.IraIcon().is_displayed():\n home_page.notificationButton().click()\n\n except Exception as e:\n logs.critical(\"IraIcon is not displayed in the webpage\",e)\n ss.screenShot(ss_path+\"IraIcon_Unavailable.png\")\n\n request.cls.driver = driver\n yield\n driver.close()", "repo_name": "indrakumar0812/Avaamo", "sub_path": "testcases/conftest.py", "file_name": "conftest.py", "file_ext": "py", "file_size_in_byte": 2127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 25, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 27, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 31, "usage_type": "name"}, {"api_name": "Utilities.BaseClass.BaseClass", "line_number": 35, "usage_type": "call"}, {"api_name": "Utilities.Screenshot.Screenshot", "line_number": 37, "usage_type": "call"}, {"api_name": "pageObjects.Homepage.HomePage", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "34232362834", "text": "#! /usr/bin/env python\n#Ryan A. Melnyk\n#schmelnyk@gmail.com\n\nimport os, argparse, subprocess, errno, sys, string\nimport multiprocessing as mp\nimport pyparanoid.pyparanoid as pp\nfrom Bio import SeqIO\n\ndef parse_args():\n\tparser = argparse.ArgumentParser(description='''\nTakes a complete PyParanoid directory (base and propagated) and generate list of orthologs. Using the 'threshold'\nargument relaxes the cutoff and includes homologs that occur exactly once in some fraction of all strains (e.g. 90%).\n\t''')\n\tparser.add_argument('outdir', type=str,help='path to PyParanoid folder')\n\tparser.add_argument('prefix',type=str,help='output folder for data')\n\tparser.add_argument('--threshold',type=float,help='proportion of strains to be considered an ortholog')\n\tparser.add_argument('--cpus',type=int,help='number of CPUs to use for tasks. Defaults to # of cores available.')\n\tparser.add_argument('--clean',action=\"store_true\",help=\"clean up intermediate files\")\n\tparser.add_argument('--strains',type=str,help='specify if a subset of strains are to be identified')\n\tparser.add_argument('--orthos',type=str,help=\"specify to use previously calculated groups\")\n\tparser.add_argument('--use_MP',action=\"store_true\",help=\"use the python multiprocessing module for dramatic speed up\")\n\treturn parser.parse_args()\n\n\ndef parse_matrix(strains):\n\torthos = []\n\tprint(\"Parsing matrix to identify orthologs...\")\n\theader = open(os.path.join(outdir,\"homolog_matrix.txt\")).readline().rstrip().split(\"\\t\")\n\ttry:\n\t\tindices = [header.index(s) for s in strains]\n\texcept:\n\t\tprint(s,\"not found in matrix. Check strainlist.\")\n\tfor line in open(os.path.join(outdir,\"homolog_matrix.txt\")):\n\t\tvals = line.rstrip().split(\"\\t\")\n\t\tif vals[0] == \"\":\n\t\t\tcontinue\n\t\telse:\n\t\t\tstrain_vals = [vals[i] for i in indices]\n\t\t\tif set(strain_vals[1:]) == set([\"1\"]):\n\t\t\t\torthos.append(vals[0])\n\tprint(len(orthos), \"orthologs found.\")\n\treturn orthos\n\ndef parse_threshold_matrix(t,strains):\n\torthos = []\n\tprint(\"Parsing matrix to identify orthologs...\")\n\theader = open(os.path.join(outdir,\"homolog_matrix.txt\")).readline().rstrip().split(\"\\t\")\n\ttry:\n\t\tindices = [header.index(s) for s in strains]\n\texcept:\n\t\tprint(s,\"not found in matrix. Check strainlist.\")\n\tfor line in open(os.path.join(outdir,\"homolog_matrix.txt\")):\n\t\tvals = line.rstrip().split(\"\\t\")\n\t\tif vals[0] == \"\":\n\t\t\tcontinue\n\t\telse:\n\t\t\tstrain_vals = [vals[i] for i in indices]\n\t\t\tif float(strain_vals.count(\"1\"))/float(len(strain_vals)) > t:\n\t\t\t\torthos.append(vals[0])\n\tprint(len(orthos), \"orthologs found.\")\n\treturn orthos\n\ndef get_orthos(orthos,strains):\n\tseqdata = {}\n\tprint(\"Parsing homolog.faa...\")\n\tfor seq in SeqIO.parse(open(os.path.join(outdir,\"homolog.faa\"),'r'),'fasta'):\n\t\tvals = seq.id.split(\"|\")\n\t\tif vals[0] in strains:\n\t\t\tif vals[2] in orthos:\n\t\t\t\tif vals[2] in seqdata:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tseqdata[vals[2]] = {}\n\t\t\t\tif vals[0] not in seqdata[vals[2]]:\n\t\t\t\t\tseq.id = vals[0]\n\t\t\t\t\tseqdata[vals[2]][vals[0]] = seq\n\tprint(\"Parsing prop_homolog.faa...\")\n\tfor seq in SeqIO.parse(open(os.path.join(outdir,\"prop_homolog.faa\"),'r'),'fasta'):\n\t\tvals = seq.id.split(\"|\")\n\t\tif vals[0] in strains:\n\t\t\tif vals[2] in orthos:\n\t\t\t\tif vals[2] in seqdata:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tseqdata[vals[2]] = {}\n\t\t\t\tif vals[0] not in seqdata[vals[2]]:\n\t\t\t\t\tseq.id = vals[0]\n\t\t\t\t\tseqdata[vals[2]][vals[0]] = seq\n\tfor group in seqdata:\n\t\to = open(os.path.join(prefix,\"orthos\",group+\".faa\"),'w')\n\t\tfor s in seqdata[group]:\n\t\t\tSeqIO.write(seqdata[group][s],o,'fasta')\n\t\to.close()\n\treturn\n\ndef align_orthos(orthos,cpus):\n\tcount = len(orthos)\n\tprint(\"Aligning {} ortholog files...\".format(str(count)))\n\tif use_MP:\n\t\tpool = mp.Pool(processes=cpus)\n\t\t[pool.apply_async(hmmalign, args=(o,)) for o in orthos]\n\t\tpool.close()\n\t\tpool.join()\n\telse:\n\t\tfor o in orthos:\n\t\t\thmmalign(o)\n\t\t\tcount -= 1\n\treturn\n\ndef hmmalign(o):\n\tcmds = \"hmmalign -o {} {} {}\".format(os.path.join(prefix,\"ortho_align\",o+\".sto\"),os.path.join(prefix,\"hmms\",o+\".hmm\"),os.path.join(prefix,\"orthos\",o+\".faa\"))\n\tproc = subprocess.Popen(cmds.split())\n\tproc.wait()\n\treturn\n\ndef extract_hmms(orthos):\n\tcount = len(orthos)\n\tpresent = [f.split(\".\")[0] for f in os.listdir(os.path.join(prefix,\"hmms\"))]\n\tprint(\"Extracting {} HMM files...{} already found.\".format(str(count),str(len(present))))\n\tFNULL = open(os.devnull, 'w')\n\tfor o in orthos:\n\t\tcount -= 1\n\t\tif o in present:\n\t\t\tpass\n\t\telse:\n\t\t\tcmds = \"hmmfetch -o {} {} {}\".format(os.path.join(prefix,\"hmms\",o+\".hmm\"),os.path.join(outdir,\"all_groups.hmm\"),o)\n\t\t\tproc = subprocess.Popen(cmds.split(),stdout=FNULL,stderr=FNULL)\n\t\t\tproc.wait()\n\t\tif count % 100 == 0:\n\t\t\tprint(\"\\t\"+str(count), \"remaining...\")\n\t\telse:\n\t\t\tpass\n\tif count == 0:\n\t\tprint(\"\\tDone!\")\n\tFNULL.close()\n\treturn\n\ndef create_master_alignment(orthos,strains):\n\n\talign_data = {k : [] for k in strains}\n\tcount = len(orthos)\n\ttotal_leng = 0 ###DEBUG\n\tprint(\"Creating master alignment...Parsing {} homologs...\".format(str(count)))\n\ttranslation = str.maketrans('','',string.ascii_lowercase+\".\")\n\tfor o in orthos:\n\t\tcount -= 1\n\t\tpresent = []\n\t\tfor line in open(os.path.join(prefix,\"hmms\",o+\".hmm\")):\n\t\t\tif line.startswith(\"LENG\"):\n\t\t\t\tlength = int(line.rstrip().split()[1])\n\t\t\t\ttotal_leng += length ###DEBUG\n\t\t\t\tbreak\n\t\tfor line in open(os.path.join(prefix,\"ortho_align\",o+\".sto\")):\n\t\t\tif line.startswith(\"#\") or line.startswith(\"//\"):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tvals = line.rstrip().split()\n\t\t\t\tif len(vals) < 1:\n\t\t\t\t\tcontinue\n\t\t\t\telif vals[0] in align_data:\n\t\t\t\t\talign_data[vals[0]].append(vals[1].translate(translation))\n\t\t\t\t\tif vals[0] not in present:\n\t\t\t\t\t\tpresent.append(vals[0])\n\t\t\t\telse:\n\t\t\t\t\talign_data[vals[0]] = [vals[1].translate(translation)]\n\t\t\t\t\tif vals[0] not in present:\n\t\t\t\t\t\tpresent.append(vals[0])\n\t\tfor s in strains:\n\t\t\tif s not in present:\n\t\t\t\talign_data[s].append(\"-\"*length)\n\t\t\tif len(\"\".join(align_data[s])) != total_leng:\n\t\t\t\tprint(s, \"is short!\")\n\t\t\t\tprint(total_leng, len(\"\".join(align_data[s])))\n\t\t\t\tprint(align_data[s])\n\t\t\t\tprint(o)\n\t\t\t\tsys.exit()\n\t\tif count % 100 == 0:\n\t\t\tprint(\"\\t\"+str(count), \"remaining...\")\n\t\telse:\n\t\t\tpass\n\tprint(\"Done!\")\n\tprint(\"Writing alignment...\")\n\tout = open(os.path.join(prefix,\"master_alignment.faa\"),'w')\n\tfor a in align_data:\n\t\tout.write(\">{}\\n{}\\n\".format(a,\"\".join(align_data[a]).upper().replace(\".\",\"-\")))\n\tout.close()\n\tout = open(os.path.join(prefix,\"orthos.txt\"),'w')\n\t[out.write(\"{}\\n\".format(orth)) for orth in orthos]\n\tout.close()\n\treturn\n\ndef get_strains():\n\tstrains = [line.rstrip() for line in open(os.path.join(outdir,\"strainlist.txt\"))]\n\t[strains.append(s) for s in [line.rstrip() for line in open(os.path.join(outdir,\"prop_strainlist.txt\"))]]\n\treturn strains\n\ndef index_hmms():\n\tprint(\"Indexing all_groups.hmm...\")\n\tcmds = \"hmmfetch --index {}\".format(os.path.join(outdir,\"all_groups.hmm\"))\n\tproc = subprocess.Popen(cmds.split())\n\tproc.wait()\n\treturn\n\ndef main():\n\targs = parse_args()\n\tglobal prefix\n\tprefix = os.path.abspath(args.prefix)\n\tglobal outdir\n\toutdir = os.path.abspath(args.outdir)\n\n\tpp.createdirs(prefix,[\"orthos\",\"ortho_align\",\"hmms\"])\n\tif args.strains:\n\t\tstrains = [line.rstrip() for line in open(os.path.abspath(args.strains),'r')]\n\telse:\n\t\tstrains = get_strains()\n\n\tif args.orthos:\n\t\torthos = [line.rstrip() for line in open(os.path.abspath(args.orthos),'r')]\n\telse:\n\t\tif args.threshold:\n\t\t\torthos = parse_threshold_matrix(args.threshold,strains)\n\t\telse:\n\t\t\torthos = parse_matrix(strains)\n\n\tif args.cpus:\n\t\tcpus = args.cpus\n\telse:\n\t\tcpus = mp.cpu_count()\n\n\tglobal use_MP\n\tif args.use_MP:\n\t\tuse_MP = True\n\telse:\n\t\tuse_MP = False\n\n\tindex_hmms()\n\textract_hmms(orthos)\n\n\tget_orthos(orthos,strains)\n\talign_orthos(orthos,cpus)\n\tcreate_master_alignment(orthos,strains)\n\tif args.clean:\n\t\tpp.cleanup(os.path.join(prefix,\"ortho_align\"))\n\t\tpp.cleanup(os.path.join(prefix,\"orthos\"))\n\t\tpp.cleanup(os.path.join(prefix,\"hmms\"))\n\n\nif __name__ == '__main__':\n\tmain()\n", "repo_name": "ryanmelnyk/PyParanoid", "sub_path": "IdentifyOrthologs.py", "file_name": "IdentifyOrthologs.py", "file_ext": "py", "file_size_in_byte": 7808, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 32, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.parse", "line_number": 67, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.parse", "line_number": 79, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 79, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.write", "line_number": 93, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 93, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 113, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.devnull", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 128, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 208, "usage_type": "call"}, {"api_name": "os.path", "line_number": 208, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pyparanoid.pyparanoid.createdirs", "line_number": 212, "usage_type": "call"}, {"api_name": "pyparanoid.pyparanoid", "line_number": 212, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 214, "usage_type": "call"}, {"api_name": "os.path", "line_number": 214, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 229, "usage_type": "call"}, {"api_name": "pyparanoid.pyparanoid.cleanup", "line_number": 244, "usage_type": "call"}, {"api_name": "pyparanoid.pyparanoid", "line_number": 244, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path", "line_number": 244, "usage_type": "attribute"}, {"api_name": "pyparanoid.pyparanoid.cleanup", "line_number": 245, "usage_type": "call"}, {"api_name": "pyparanoid.pyparanoid", "line_number": 245, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "pyparanoid.pyparanoid.cleanup", "line_number": 246, "usage_type": "call"}, {"api_name": "pyparanoid.pyparanoid", "line_number": 246, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path", "line_number": 246, "usage_type": "attribute"}]} +{"seq_id": "30121077240", "text": "from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructField, StructType, IntegerType, StringType, DoubleType\nimport time\n\n#Q3: What was the best in term of revenue Animation movie of 1995?\n\nstart = time.perf_counter()\n\nsc = SparkSession \\\n .builder \\\n .appName(\"SQL Spark CSV query 3 execution\") \\\n .getOrCreate() \n\nmovie_schema = StructType([\n StructField(\"movie_id\", IntegerType(), True),\n StructField(\"name\", StringType(), True),\n StructField(\"description\", StringType(), True),\n StructField(\"year\", StringType(), True),\n StructField(\"duration\", IntegerType(), True),\n StructField(\"cost\", IntegerType(), True),\n StructField(\"revenue\", IntegerType(), True),\n StructField(\"rating\", DoubleType(), True)])\n\nmovies_df = sc.read.format('csv').options(header='false').schema(movie_schema).load(\"hdfs://master:9000/home/user/project/csv/movies.csv\") \nmovies_df.registerTempTable(\"movies\")\n\n\nmovie_genre_schema = StructType([\n StructField(\"movie_id\", IntegerType(), True),\n \t StructField(\"genre\", StringType(), True)])\n\nmovie_genres_df = sc.read.format('csv').options(header='false').schema(movie_genre_schema).load(\"hdfs://master:9000/home/user/project/csv/movie_genres.csv\") \nmovie_genres_df.registerTempTable(\"movie_genres\")\n\n#find best in term of revenue Animation movie of 1995\nquery = \"SELECT movies.name FROM movies INNER JOIN movie_genres ON movies.movie_id = movie_genres.movie_id WHERE movie_genres.genre = 'Animation' and movies.year = 1995 and movies.revenue > 0 ORDER BY movies.revenue DESC\"\nmovie = sc.sql(query)\n\nprint(\"The best in term of revenue Animation movie of 1995 was\",movie.collect()[0][0])\n\n# Time\ntime_avg = time.perf_counter() - start\nwith open('time.txt', 'a') as fd:\n fd.write('Q3: ')\n fd.write(str(time_avg))\n fd.write('\\n')", "repo_name": "geo-chalk/Msc-Data-Science-AUEB", "sub_path": "Large Scale Data Management/Project/Part1/Part1_Task4/4_2csv/Code/4_2csv_Q3.py", "file_name": "4_2csv_Q3.py", "file_ext": "py", "file_size_in_byte": 1860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "time.perf_counter", "line_number": 7, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder.appName", "line_number": 9, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession.builder", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 9, "usage_type": "name"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 14, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 15, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 15, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 16, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 16, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 17, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 17, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 18, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 18, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 19, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 19, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 20, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 20, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 21, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 22, "usage_type": "call"}, {"api_name": "pyspark.sql.types.DoubleType", "line_number": 22, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructType", "line_number": 28, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 29, "usage_type": "call"}, {"api_name": "pyspark.sql.types.IntegerType", "line_number": 29, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StructField", "line_number": 30, "usage_type": "call"}, {"api_name": "pyspark.sql.types.StringType", "line_number": 30, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "11485225179", "text": "#用Adaboost和决策树的模型比较\n#在sklearn中的Adaboost默认采用的是决策树的模型,我们可以随机生成一些数据,然后对比下Adaboost中的弱分类器,决策树分类器和Adaboost模型在分类准确率上的表现\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn import datasets\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\n\n#设置Adaboost迭代次数\nn_estimators = 200\n#使用\nx,y = datasets.make_hastie_10_2(n_samples=12000,random_state=1)\n#从12000个数据中取前2000进行测试集,其余作为训练集\ntrain_x,train_y = x[2000:],y[2000:]\ntest_x,test_y = x[:2000],y[:2000]\n#弱分类器\ndt_stump = DecisionTreeClassifier(max_depth=1,min_samples_leaf=1)\ndt_stump.fit(train_x,train_y)\ndt_stump_err = 1.0-dt_stump.score(test_x,test_y)\n#决策树分类器\ndt = DecisionTreeClassifier()\ndt.fit(train_x,train_y)\ndt_err = 1.0-dt.score(test_x,test_y)\n#AdaBoost分类器\nada = AdaBoostClassifier(base_estimator=dt_stump,n_estimators=n_estimators)\nada.fit(train_x,train_y)\n#三个分类器的错误率可视化\nfig = plt.figure()\n#设置plt正确显示中文\nplt.rcParams['font.sans-serif'] = ['SimHei']\nax = fig.add_subplot(111)\nax.plot([1,n_estimators],[dt_stump_err]*2,'k-',label = '决策树弱分类器 错误率')\nax.plot([1,n_estimators],[dt_err]*2,'k--',label = '决策树模型 错误率')\nada_err = np.zeros((n_estimators,))\n#遍历每次迭代的结果i为迭代次数,pred_y为预测结果\nfor i,pred_y in enumerate(ada.staged_predict(test_x)):\n #统计错误率\n ada_err[i] = zero_one_loss(pred_y,test_y)\n#绘制每次迭代的Adaboost错误率\nax.plot(np.arange(n_estimators)+1,ada_err,label = 'Adaboost Test 错误率',color='R')\nax.set_xlabel('迭代次数')\nax.set_ylabel('错误率')\nleg = ax.legend(loc='upper right',fancybox=True)\nplt.show()", "repo_name": "13060923171/Data_analysis-Algorithmic_combat", "sub_path": "实战-AdaBoost算法/main2.py", "file_name": "main2.py", "file_ext": "py", "file_size_in_byte": 1919, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sklearn.datasets.make_hastie_10_2", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 14, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.ensemble.AdaBoostClassifier", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 32, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.metrics.zero_one_loss", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "37737599729", "text": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# author: Olivier Noguès\r\n\r\n\r\nimport os\r\nimport time\r\nimport inspect\r\nimport json\r\nimport importlib\r\n\r\nimport ares.Lib.js.fncs\r\nimport ares.Lib.js.objects\r\nimport ares.utils.AresSiphash\r\n\r\n\r\n# Factory wii all the javascript information\r\nfactory = None\r\n\r\n\r\nDSC = {\r\n 'eng': '''\r\n:dsc:\r\nThe Ares Js object is the object in charge of defining the functions to apply to the different recordSets once they are \r\ndefined on the Javascript side. This object will not be in charge of encoding the data (this is done as part of the \r\npandas DataFrame in the module **AresFilePandas.py**), it will be on charge of ensuring that all your containers using the same \r\ndata will rely on the same javascript variable. The only differences will be the javascript transformation to end up \r\nto an object definition which will fit the containers.\r\n__\r\nEach chart and table containers will have specific requirements and this specific transformation is done in objects/ conversion folder. \r\nThose conversion are defined based on the output function in the Js Object and each chart family will use it to type the data.\r\n__\r\nFor example for the **ChartJs object** you get the below data conversion\r\n\r\n```python\r\ndef plotChartJs(self, chartType, data=None, width=100, widthUnit=\"%\", height=300, heightUnit='px', title='', chartOptions=None, toolsbar=None, htmlCode=None, globalFilter=None):\r\n \r\nparams = (list(data._schema['values']), list(data._schema['keys']))\r\nreturn self.add(graph.AresHtmlGraphChartJs.Chart(self, chartType, data.output('ChartJs', chartType, params), width, widthUnit, height, heightUnit, title, chartOptions, toolsbar, htmlCode, globalFilter), sys._getframe().f_code.co_name)\r\n```\r\n\r\nThe data.output() function in this case will type the object to be a ChartJs one. The definition of the conversion from a recordSet to a ChartJs object is done in the module **jsCharts.py** specific to the ChartJs framework.\r\nThe Javascript part used to convert the data to this container is then defined in a static and generic manner in the value string variable.\r\n\r\n```javascript\r\nvar temp = {}; var labels = []; var uniqLabels = {};\r\nseriesNames.forEach(function(series){temp[series] = {}}) ;\r\ndata.forEach(function(rec) { \r\n seriesNames.forEach(function(name){\r\n if(rec[name] !== undefined) {\r\n if (!(rec[xAxis] in uniqLabels)){labels.push(rec[xAxis]); uniqLabels[rec[xAxis]] = true};\r\n temp[name][rec[xAxis]] = rec[name]}})\r\n});\r\nresult = {datasets: [], labels: labels};\r\nseriesNames.forEach(function(series){\r\n dataSet = {label: series, data: []};\r\n labels.forEach(function(x){\r\n if (temp[series][x] == undefined) {dataSet.data.push(null)} else {dataSet.data.push(temp[series][x])}\r\n }); result.datasets.push(dataSet)})\r\n```\r\n\r\n## How to add Javascript transformations\r\n\r\n\r\n## How to use the debug flag\r\n\r\n\r\n## Use and Create a regression test \r\n\r\nThis part is still in progress (do not hesitate to help), but the idea is quite simple to add various tests to your javascript functions.\r\nIndeed any changes in the Javascript is quite painful and difficult to test / debug, the idea with this is to store all the relevant input and output in a text file automatically leveraging on the AReS architecture to make this easier.\r\n\r\nA possible extension to this model could be to create specificity per type of browser in order to have a font end code which can adapt according to the browser.\r\n\r\n'''}\r\n\r\n\r\ndef load(forceReload=False):\r\n \"\"\"\r\n :category: Javascript\r\n :rubric: JS\r\n :type: Factory\r\n :dsc:\r\n Load the factory with the all the different Javascript functions defined in the framework.\r\n It will store all the different Javascript function used to transform the recordSet or the ones in charge\r\n of changing the recordSet to fit the different containers in the AReS components.\r\n :return: The factory data\r\n \"\"\"\r\n global factory\r\n\r\n if factory is None or forceReload:\r\n tempFactory, aliasChecks = {}, {}\r\n jsStructures = {'fncs': ares.Lib.js.fncs, 'objs': ares.Lib.js.objects}\r\n for jsType, jsMod in jsStructures.items():\r\n for mod in os.listdir(os.path.dirname(jsMod.__file__)):\r\n if not mod.endswith('.py') or mod == '__init__.py':\r\n continue\r\n\r\n pyMod = importlib.import_module('%s.%s' % (jsMod.__name__, mod.replace(\".py\", \"\")))\r\n for name, obj in inspect.getmembers(pyMod):\r\n if inspect.isclass(obj) and obj.alias is not None:\r\n if hasattr(obj, 'alias') and hasattr(obj, 'value'):\r\n inst = obj()\r\n alias = [inst.alias] if not hasattr(inst, 'chartTypes') else [\"%s_%s\" % (inst.alias, cType.replace(\"-\", \"\")) for cType in inst.chartTypes]\r\n params = \"data\" if getattr(inst, 'params', None) is None else \"data, %s\" % \", \".join(inst.params)\r\n for a in alias:\r\n if jsType == 'fncs':\r\n a = \"ares_%s\" % a.replace(\"-\", \"\")\r\n if a in aliasChecks:\r\n raise Exception(\"Alias %s found two time in the framework %s and %s\" % (a, obj.__name__, aliasChecks[a]))\r\n\r\n aliasChecks[a] = obj.__name__\r\n text = \"\".join([jsLine.strip() for jsLine in inst.value.strip().split(\"\\n\")])\r\n tempFactory.setdefault(jsType, {})[a] = {\"text\": text, 'params': params, 'module': pyMod.__name__, 'class': obj}\r\n factory = tempFactory\r\n return factory\r\n\r\n\r\nclass Js(object):\r\n \"\"\"\r\n :category:\r\n :rubric:\r\n :type:\r\n :dsc:\r\n\r\n \"\"\"\r\n\r\n def __init__(self, aresObj, pyDf, keys=None, values=None, debug=False):\r\n load()\r\n self._schema = {'fncs': [], 'out': None, 'post': [], 'keys': set() if keys is None else set(keys),\r\n 'values': set() if values is None else set(values), 'debug': getattr(aresObj, 'DEBUG', debug)}\r\n self._dataId = id(pyDf) # Store the memory ID of the original object (the one known by all the components\r\n if not hasattr(pyDf, 'htmlCode'):\r\n dataCode = None\r\n # For input data not defined as dataframe\r\n for key, dataSrc in aresObj.jsSources.items():\r\n if dataSrc.get('dataId') == self._dataId:\r\n dataCode = key\r\n break\r\n\r\n pyDf = aresObj.df(pyDf, htmlCode=\"ares_id_%s\" % len(aresObj.jsSources) if dataCode is None else dataCode)\r\n self._jqId = pyDf.htmlCode # Original recordSet, this will never change\r\n self.aresObj, self.jqId, self._data = aresObj, pyDf.htmlCode, pyDf\r\n\r\n def setId(self, jqId):\r\n \"\"\"\r\n :category: Javascript Object\r\n :rubric: JS\r\n :type: System\r\n :dsc:\r\n Change the Id variable name for the javascript data source.\r\n :return: The Python object\r\n \"\"\"\r\n self.jqId = jqId if jqId is not None else self._jqId\r\n return self\r\n\r\n def attach(self, htmlObj):\r\n \"\"\"\r\n :category: Javascript Object\r\n :rubric: JS\r\n :type: Front End\r\n :dsc:\r\n Attach the Dataframe to a HTML Object. This function is normally used in the different components in order\r\n to guarantee the link of the data. This will also ensure that the same data set will be store only once in the page\r\n \"\"\"\r\n if not self._jqId in self.aresObj.jsSources:\r\n self.aresObj.jsSources[self._jqId] = {'dataId': self._dataId, 'containers': [], 'data': self._data}\r\n self.aresObj.jsSources[self._jqId]['containers'].append(htmlObj)\r\n self.aresObj.jsSources[self._jqId]['data'] = self._data # In case of replacements\r\n\r\n def output(self, outFamily, outType, args):\r\n \"\"\"\r\n :category: Formatting\r\n :rubric: JS\r\n :type: Front End\r\n :dsc:\r\n Format the recordSet to a defined container.\r\n \"\"\"\r\n self._schema['out'] = {\"family\": outFamily, 'type': outType, 'params': args}\r\n self._schema['out']['name'] = \"%s_%s\" % (outFamily, outType.replace(\"-\", \"\")) if \"%s_%s\" % (outFamily, outType.replace(\"-\", \"\")) in factory['objs'] else outFamily\r\n return self\r\n\r\n def fncs(self, fncNames, systemInfo=None):\r\n \"\"\"\r\n :category: Formatting\r\n :rubric: JS\r\n :type: Front End\r\n :dsc:\r\n Post process functions on the recordSet. It will potentially enhance the columns to be processed if the systemInfo parameters are defined.\r\n This will allow to pass extra parameters to the different functions in the framework according to the system.\r\n Information can be computed in the bespoke reports or coming directly from the source system.\r\n In order to use this the category should be defined in the Javascript function defined in the module jsFncsRecords.\r\n :return: The Python Js object\r\n \"\"\"\r\n if not fncNames:\r\n return self\r\n\r\n if not isinstance(fncNames, list):\r\n fncNames = [fncNames]\r\n for fncName in fncNames:\r\n args = None\r\n if isinstance(fncName, tuple):\r\n # This mean that some parameters are expected in the configuration\r\n if fncName[0] == 'order':\r\n countPerAxis, orderSeries = {}, []\r\n self._data['order'] = 0\r\n if fncName[1] is not None:\r\n for rec in self._data[fncName[1]]:\r\n countPerAxis[rec] = countPerAxis.get(rec, -1) + 1\r\n orderSeries.append(countPerAxis[rec])\r\n self._data['order'] = orderSeries\r\n continue\r\n\r\n args = list(fncName)[1:]\r\n fncName = \"ares_%s\" % fncName[0].replace(\"-\", \"\")\r\n factory['fncs'][fncName]['class'].extendColumns(self._schema, args)\r\n if systemInfo is not None:\r\n for category, sysCols in systemInfo.items():\r\n args = factory['fncs'][fncName]['class'].extendArgs(category, args, sysCols)\r\n self._schema['fncs'].append({'name': fncName, 'args': args})\r\n return self\r\n\r\n def post(self, fncNames):\r\n \"\"\"\r\n :category: Formatting\r\n :rubric: JS\r\n :type: Front End\r\n :dsc:\r\n\r\n :return:\r\n \"\"\"\r\n if not isinstance(fncNames, list):\r\n fncNames = [fncNames]\r\n for fncName in fncNames:\r\n args = None\r\n if isinstance(fncName, tuple):\r\n # This mean that some parameters are expected in the configuration\r\n args = list(fncName)[1:]\r\n fncName = fncName[0].replace(\"-\", \"\")\r\n self._schema['post'].append({'name': \"ares_%s\" % fncName, 'args': args})\r\n return self\r\n\r\n def getJs(self, fncs=None):\r\n \"\"\"\r\n :category: Formatting\r\n :rubric: JS\r\n :type: Front End\r\n :dsc:\r\n\r\n :return:\r\n \"\"\"\r\n val = self.jqId\r\n if fncs is not None:\r\n self.post(fncs)\r\n fncContentTemplate = [\"var result = []\", \"if (data !== null){%s} else {data = []}\", \"return result\"]\r\n if self._schema['debug']:\r\n fncContentTemplate = [\"var t0 = performance.now()\", \"var result = []\", \"if (data !== null){%s} else {data = []}\",\r\n \"console.log('Function: '+ arguments.callee.name +', count records: '+ data.length +', time: '+ (performance.now()-t0) +' ms.')\",\r\n \"console.log('Arguments -')\",\r\n \"for(var i = 1; i < arguments.length; i++){console.log(' ' + i +': '+ JSON.stringify(arguments[i]))}\",\r\n \"console.log()\",\r\n \"console.log('Input Data -')\", \"console.log(arguments[0])\", \"console.log()\",\r\n \"console.log('Output Data -')\", \"console.log(result)\", \"console.log()\", \"return result\"]\r\n fncContentTemplate = \"; \".join(fncContentTemplate)\r\n # Add the different filtering rules\r\n filters, jsFncs = [], self._schema['fncs']\r\n for k, v in self.aresObj.jsSources.get(self.jqId, {}).get('filters', {}).items():\r\n if k == 'allIfEmpty':\r\n v = list(v)\r\n filters.append(\"'%s': %s\" % (k, v))\r\n if len(filters) > 0:\r\n jsFncs = [{'args_js': ['{%s}' % \", \".join(filters)], 'name': 'ares_filter'}] + jsFncs\r\n # Set all the Javascript functions\r\n for fnc in jsFncs:\r\n if fnc['name'] in factory['fncs']:\r\n if hasattr(self.aresObj, \"jsGlobal\"):\r\n self.aresObj.jsGlobal.fnc(\"%s(%s)\" % (fnc['name'], factory['fncs'][fnc['name']]['params']), fncContentTemplate % factory['fncs'][fnc['name']]['text'])\r\n else:\r\n print(\"function %s(%s) {var result = []; %s;return result; };\" % (fnc['name'], factory['fncs'][fnc['name']]['params'], factory['fncs'][fnc['name']]['text']))\r\n if fnc.get('args') is not None:\r\n val = \"%s(%s, %s)\" % (fnc['name'], val, \", \".join([json.dumps(a) for a in fnc['args']]))\r\n elif fnc.get('args_js') is not None:\r\n # TODO: Remove this hack done for the filter function\r\n val = \"%s(%s, %s)\" % (fnc['name'], val, \", \".join([a for a in fnc['args_js']]))\r\n else:\r\n val = \"%s(%s)\" % (fnc['name'], val)\r\n # Container formatting\r\n if self._schema['out'] is not None:\r\n if self._schema['out']['name'] in factory['objs']:\r\n if hasattr(self.aresObj, \"jsGlobal\"):\r\n self.aresObj.jsGlobal.fnc(\"Ares%s(%s)\" % (self._schema['out']['name'], factory['objs'][self._schema['out']['name']]['params']), fncContentTemplate % factory['objs'][self._schema['out']['name']]['text'])\r\n else:\r\n print(\"function Ares%s(%s) {%s};\" % (self._schema['out']['name'], fncContentTemplate % factory['objs'][self._schema['out']]['params'], factory['objs'][self._schema['out']['name']]['text']))\r\n params = [json.dumps(a) for a in self._schema['out']['params']]\r\n val = \"Ares%s(%s, %s)\" % (self._schema['out']['name'], val, \", \".join(params))\r\n # Post process function\r\n for fnc in self._schema['post']:\r\n fncName = fnc['name']\r\n if fncName in factory['fncs']:\r\n if hasattr(self.aresObj, \"jsGlobal\"):\r\n self.aresObj.jsGlobal.fnc(\"%s(%s)\" % (fncName, factory['fncs'][fncName]['params']), fncContentTemplate % factory['fncs'][fncName]['text'])\r\n else:\r\n print(\"function %s(%s) {%s\" % (fncName, factory['fncs'][fncName]['params'], fncContentTemplate % factory['fncs'][fncName]['text']))\r\n if fnc['args']:\r\n val = \"%s(%s, %s)\" % (fncName, val, \", \".join([json.dumps(a) for a in fnc['args']]))\r\n else:\r\n val = \"%s(%s)\" % (fncName, val)\r\n return val\r\n\r\n def toTsv(self, process='input'):\r\n \"\"\"\r\n :category:\r\n :rubric:\r\n :type:\r\n :dsc:\r\n\r\n :return: A String with the Javascript function to be used\r\n \"\"\"\r\n tsv = ares.Lib.js.objects.jsText.JsTextTsv()\r\n self.aresObj.jsGlobal.fnc(\"ToTsv(data, colNames)\", \"%s; return result\" % tsv.value)\r\n return \"ToTsv(%s, %s)\" % (self.jqId, json.dumps(list(self._schema['keys'] | self._schema['values'])))\r\n\r\n # --------------------------------------------------------------------------------------------------------------\r\n #\r\n # Testing part\r\n # --------------------------------------------------------------------------------------------------------------\r\n def addTest(self, fncName, data, outpath=None):\r\n \"\"\"\r\n :category:\r\n :type:\r\n :rubric:\r\n :dsc:\r\n\r\n \"\"\"\r\n self.fncs(fncName)\r\n if outpath is None:\r\n outpath = os.path.join(os.path.dirname(__file__), \"tests\")\r\n fncName, module = self._fncs[0]['name'], factory['fncs'][self._fncs[0]['name']]['module'].split(\".\")[-1]\r\n testFilePath = os.path.join(outpath, \"%s_%s.json\" % (module, fncName))\r\n if os.path.exists(testFilePath):\r\n inFile = open(testFilePath)\r\n testObj = json.loads(inFile.read())\r\n inFile.close()\r\n else:\r\n testObj = {\"fnc\": fncName, \"tests\": {}, 'author': ''}\r\n for alias in self._fncs:\r\n testObj[\"def\"] = \"function %s(%s) {var result = []; %s ;return result; };\" % (\r\n fncName, factory['fncs'][fncName]['params'], factory['fncs'][fncName]['text'])\r\n testObj['module'] = factory['fncs'][fncName]['module']\r\n #\r\n hashObj = ares.utils.AresSiphash.SipHash()\r\n testObj['tests'][hashObj.hashId(json.dumps(data))] = {'data': data, \"time\": time.time()}\r\n\r\n with open(r\"%s\\%s_%s.json\" % (outpath, module, fncName), \"w\") as f:\r\n json.dump(testObj, f, indent=2)\r\n\r\n def doReg(self, moduleName, fncName, outpath=None):\r\n \"\"\"\r\n :category:\r\n :type:\r\n :rubric:\r\n :dsc:\r\n\r\n \"\"\"\r\n if outpath is None:\r\n outpath = os.path.join(os.path.dirname(__file__), \"tests\")\r\n testFilePath = os.path.join(outpath, \"%s_%s.json\" % (moduleName, fncName))\r\n inFile = open(testFilePath)\r\n testObj = json.loads(inFile.read())\r\n inFile.close()\r\n for testId, testDef in testObj['tests'].items():\r\n print(\"var data_%s = %s\" % (testId, testDef['data']))\r\n print(\"%s(data_%s)\" % (testObj['fnc'], testId))\r\n\r\n\r\ndef docEnum(aresObj, outStream, lang='eng'):\r\n \"\"\"\r\n :category: Javascript\r\n :rubric: PY\r\n :type: Documentation\r\n \"\"\"\r\n outStream.link(\"Breadcrumb definition\", \"\")\r\n outStream.link(\"Markdown definition\", \"\")\r\n outStream.link(\"RecordSets Functions\", \"\")\r\n outStream.link(\"Date Functions\", \"\")\r\n outStream.link(\"Charts Functions\", \"\")\r\n outStream.link(\"Polyfill Functions\", \"\")\r\n outStream.link(\"Regression Test\", \"\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n data = [\r\n {\"name\": \"Olivier\", \"job\": \"BNP\", \"value\": 11, \"value2\": 2},\r\n {\"name\": \"Olivier\", \"job\": \"BNP\", \"value\": 4, \"value2\": 3},\r\n {\"name\": \"Aurelie\", \"job\": \"BNP\", \"value\": 4, \"value2\": None},\r\n ]\r\n jsObj = Js(None, 'jsYoupi')\r\n jsObj.doReg('jsRecords', 'sum')\r\n #jsObj.addTest(('sum', ['name'], ['value']), data)\r\n #print( jsObj.fncs([('sum', ['name'], ['value']), ('rename', {'count': 'youpi'})]).output(('C3', [''], [''])).getJs() )\r\n\r\n# data = [\r\n# {\"name\": \"A\", \"job\": \"BNP\", \"value\": 3, \"value2\": 2},\r\n# {\"name\": \"A\", \"job\": \"BNP\", \"value\": 4, \"value2\": 3},\r\n# {\"name\": \"B\", \"job\": \"BNP\", \"value\": 4, \"value2\": None},\r\n# ]\r\n#\r\n# jsObj = Js(None)\r\n#\r\n# # [(\"sum\", ), \"rename\"])\r\n# #jsObj.fncs( [ (\"sum\", ['name'], ['value']), ('rename', {\"value\": \"label\"} )] )\r\n# #jsObj.fncs( [(\"count(distinct)\", ['name', 'value2'] ), ('rename', {'count': 'youpi'}), ('C3', [''], ['']) ] )\r\n# #jsObj.fncs( [(\"sum\", ['name'], ['value', 'value2'] ) ] )\r\n#\r\n# print(os.path.dirname(__file__))\r\n# outpath = r'C:\\Users\\olivier\\Documents\\youpi\\ares\\Lib\\js\\tests'\r\n# #jsObj.fncs([(\"stats(Column)\", 'value2')])\r\n# jsObj.addTest(\"toMarkUp\", data=\"test data **youpi**\")\r\n#\r\n# #jsObj.output(('D3_bubble', ['value2'], 'name'))\r\n# #jsObj.fncs( [(\"sum\", ['name'], ['value', 'value2'] )] )\r\n#\r\n# # jsObj.test(outpath)\r\n#\r\n", "repo_name": "jeamick/ares-visual", "sub_path": "Lib/js/AresJs.py", "file_name": "AresJs.py", "file_ext": "py", "file_size_in_byte": 18274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "ares.Lib.js.fncs.Lib", "line_number": 93, "usage_type": "attribute"}, {"api_name": "ares.Lib.js.fncs", "line_number": 93, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "importlib.import_module", "line_number": 99, "usage_type": "call"}, {"api_name": "inspect.getmembers", "line_number": 100, "usage_type": "call"}, {"api_name": "inspect.isclass", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 281, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 294, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 305, "usage_type": "call"}, {"api_name": "ares.Lib.js.fncs.Lib.js.objects.jsText.JsTextTsv", "line_number": 319, "usage_type": "call"}, {"api_name": "ares.Lib.js.fncs.Lib", "line_number": 319, "usage_type": "attribute"}, {"api_name": "ares.Lib.js.fncs", "line_number": 319, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 321, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path", "line_number": 337, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 342, "usage_type": "call"}, {"api_name": "ares.Lib.js.fncs.utils.AresSiphash.SipHash", "line_number": 351, "usage_type": "call"}, {"api_name": "ares.Lib.js.fncs.utils", "line_number": 351, "usage_type": "attribute"}, {"api_name": "ares.Lib.js.fncs", "line_number": 351, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 352, "usage_type": "call"}, {"api_name": "time.time", "line_number": 352, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 355, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path", "line_number": 366, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 366, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 367, "usage_type": "call"}, {"api_name": "os.path", "line_number": 367, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 369, "usage_type": "call"}]} +{"seq_id": "8709266345", "text": "\n# inmporting nessesry libreries\nfrom collections import OrderedDict\nfrom ast import Try\nfrom operator import concat\nimport numpy as np\nimport pandas as pd\nfrom openpyxl.styles.borders import Border, Side\nfrom openpyxl import load_workbook\n\n#load excel file\ntry:\n book = load_workbook(filename=\"octant_input.xlsx\")\nexcept:\n print(\"unable to find the input file\")\n exit()\n \n#open workbook\nsheet = book.active\n \n#definging the property of border\nthin_border = Border(left=Side(style='thin'), \n right=Side(style='thin'), \n top=Side(style='thin'), \n bottom=Side(style='thin'))\n\n# ------------------------------------------------taking input file---------------------------------------------------------------\n# appling try and except for checking the file \ntry:\n #reading the input file and storing its value in the df \n df = pd.read_excel('octant_input.xlsx')\nexcept:\n print(\"unable to find the input file\")\n exit()\n\nprint(\"Column headings:\")\nprint(df)\n# ----------------------------------------------calculating the value of mean of column U,V and W-----------------------------------\ntry:\n meanU=df['U'].mean()\n meanV=df['V'].mean()\n meanW=df['W'].mean()\nexcept:\n print(\"unable to find the mean\")\n\nsheet = book.active\n\n\nmeanU=df['U'].mean()\nmeanV=df['V'].mean()\nmeanW=df['W'].mean()\n# adding the mean value to the new column\nsheet.cell(row=1, column=5).value =\"Uavg\"\nsheet.cell(row=2, column=5).value =meanU\n\nsheet.cell(row=1, column=6).value =\"Vavg\"\nsheet.cell(row=2, column=6).value =meanV\n\nsheet.cell(row=1, column=7).value =\"Wavg\"\nsheet.cell(row=2, column=7).value =meanW \n\n\n# saving the output to the output file\nbook.save(\"octant_output_ranking_excel.xlsx\")\n# reading the same saved output value to new again and store it into df\ndf = pd.read_excel('octant_output_ranking_excel.xlsx')\n# ---------------------------------------------- calculating error in U,V and W----------------------------------\ndf['U-Uavg']=df['U']-meanU\ndf['V-Vavg']=df['V']-meanV\ndf['W-Wavg']=df['W']-meanW\n# ----------------------------------------------below code for desiding the octant----------------------------------------------------------\n # creating a list for octant values\noctant=[] \n # calculating the Octant value .\ntry:\n for (l, m, q) in zip(df['U-Uavg'],df['V-Vavg'],df['W-Wavg']):\n # appanding the octannt values to the list\n try:\n if l>0 and m>0 and q<0:\n octant.append(-1)\n elif l>0 and m>0 and q>0:\n octant.append(1)\n elif l<0 and m>0 and q<0:\n octant.append(-2)\n elif l<0 and m>0 and q>0:\n octant.append(2)\n elif l<0 and m<0 and q<0:\n octant.append(-3)\n elif l<0 and m<0 and q>0:\n octant.append(3)\n elif l>=0 and m<=0 and q<0:\n octant.append(-4)\n elif l>0 and m<0 and q>0:\n octant.append(4) \n except:\n print(\"error in appending the value\")\n df['octant'] = octant\nexcept:\n print(\"somting wrong with the columns U-Uavg,V-Vavg,W-Wavg \")\n\n# saving the file in out put file\nwith pd.ExcelWriter('octant_output_ranking_excel.xlsx') as writer:\n\n\n df.to_excel(writer, sheet_name='Sheet_name_1',index=False)\nprint(\"sucssesfully done!\")\n# reading the same file again in workbook\ntry:\n book = load_workbook(filename=\"octant_output_ranking_excel.xlsx\")\nexcept:\n print(\"error in readin the book\")\n exit()\n#open workbook\nsheet = book.active\n\n# making the desire heading\nsheet.cell(row=4, column=12).value =\"User input\"\nsheet.cell(row=4, column=12).border = thin_border\n\n\n# mod value\nmod=5000\nsheet = book.active\nsheet.cell(row=4, column=13).value =\"mod \"+str(mod)\nsheet.cell(row=4, column=13).border = thin_border\n\n# making our desire desired heading\n\n\n\nsheet.cell(row=2, column=13).value =\"Octant ID\"\nsheet.cell(row=2, column=13).border = thin_border\n\nsheet.cell(row=2, column=14).value =1\nsheet.cell(row=2, column=14).border = thin_border\n\nsheet.cell(row=2, column=15).value =-1\nsheet.cell(row=2, column=15).border = thin_border\n\nsheet.cell(row=2, column=16).value =2\nsheet.cell(row=2, column=16).border = thin_border\n\nsheet.cell(row=2, column=17).value =-2\nsheet.cell(row=2, column=17).border = thin_border\n\nsheet.cell(row=2, column=18).value =3\nsheet.cell(row=2, column=18).border = thin_border\n\nsheet.cell(row=2, column=19).value =-3\nsheet.cell(row=2, column=19).border = thin_border\n\nsheet.cell(row=2, column=20).value =4\nsheet.cell(row=2, column=20).border = thin_border\n\nsheet.cell(row=2, column=21).value =-4\nsheet.cell(row=2, column=21).border = thin_border\n\nsheet.cell(row=1, column=22).value =1\nsheet.cell(row=1, column=22).border = thin_border\nsheet.cell(row=2, column=22).value =\"Rank 1\"\nsheet.cell(row=2, column=22).border = thin_border\n\nsheet.cell(row=1, column=23).value =-1\nsheet.cell(row=1, column=23).border = thin_border\nsheet.cell(row=2, column=23).value =\"Rank 2\"\nsheet.cell(row=2, column=23).border = thin_border\n\nsheet.cell(row=1, column=24).value =2\nsheet.cell(row=1, column=24).border = thin_border\nsheet.cell(row=2, column=24).value =\"Rank 3\"\nsheet.cell(row=2, column=24).border = thin_border\n\nsheet.cell(row=1, column=25).value =-2\nsheet.cell(row=1, column=25).border = thin_border\nsheet.cell(row=2, column=25).value =\"Rank 4\"\nsheet.cell(row=2, column=25).border = thin_border\n\nsheet.cell(row=1, column=26).value =3\nsheet.cell(row=1, column=26).border = thin_border\nsheet.cell(row=2, column=26).value =\"Rank 5\"\nsheet.cell(row=2, column=26).border = thin_border\n\nsheet.cell(row=1, column=27).value =-3\nsheet.cell(row=1, column=27).border = thin_border\nsheet.cell(row=2, column=27).value =\"Rank 6\"\nsheet.cell(row=2, column=27).border = thin_border\n\nsheet.cell(row=1, column=28).value =4\nsheet.cell(row=1, column=28).border = thin_border\nsheet.cell(row=2, column=28).value =\"Rank 7\"\nsheet.cell(row=2, column=28).border = thin_border\n\nsheet.cell(row=1, column=29).value =-4\nsheet.cell(row=1, column=29).border = thin_border\nsheet.cell(row=2, column=29).value =\"Rank 8\"\nsheet.cell(row=2, column=29).border = thin_border\n\nsheet.cell(row=2, column=30).value =\"Rank1 Octant ID\"\nsheet.cell(row=2, column=30).border = thin_border\n\nsheet.cell(row=2, column=31).value =\"Rank1 Octant Name\"\nsheet.cell(row=2, column=31).border = thin_border\n\nsheet.cell(row=3, column=13).value =\"Overall Count\"\nsheet.cell(row=3, column=13).border = thin_border\n\n# below code is for calculating the longest subsequence ans its count\ndict = {}\nfor k in range(1,5):\n maxo=0\n maxi=0\n for i in range(14,22,2):\n \n if(sheet.cell(row=2, column=i).value==k):\n sheet.cell(row=3, column=i).value =df['octant'].value_counts()[k]\n sheet.cell(row=3, column=i).border = thin_border\n dict[df['octant'].value_counts()[k]]=k\n for i in range(15,22,2):\n \n if(sheet.cell(row=2, column=i).value==-1*k):\n sheet.cell(row=3, column=i).value =df['octant'].value_counts()[-1*k]\n sheet.cell(row=3, column=i).border = thin_border\n dict[df['octant'].value_counts()[-1*k]]=-1*k\n\n# Creates a sorted dictionary (sorted by key)\n\n# sorting the disctionery\ndict1 = OrderedDict(sorted(dict.items()))\nk=8\nq=0\ndict2 ={}\nfor key in dict1:\n\n dict2[dict1[key]]=k\n k=k-1\nfor k in range(1,5):\n for i in range(23,30,2):\n if(sheet.cell(row=1, column=i).value==-1*k):\n sheet.cell(row=3, column=i).value =dict2[-1*k]\n sheet.cell(row=3, column=i).border = thin_border\n\n for i in range(22,30,2):\n if(sheet.cell(row=1, column=i).value==k):\n sheet.cell(row=3, column=i).value =dict2[k]\n sheet.cell(row=3, column=i).border = thin_border\n if(dict2[k]==1):\n q=k \n if(dict2[-1*k]==1):\n q=k \n\ndict0={1:\"Internal outward interaction\",-1:\"External outward interaction\",2:\"External Ejection\",-2:\"Internal Ejection\",3:\"External inward interaction\",-3:\"Internal inward interaction\",4:\"Internal sweep\",-4:\"External sweep\"}\nsheet.cell(row=3, column=30).value =q\nsheet.cell(row=3, column=30).border = thin_border\nsheet.cell(row=3, column=31).value =dict0[q]\nsheet.cell(row=3, column=31).border = thin_border\n \ntry:\n leng=df['octant'].count()\nexcept:\n print(\"errpr in leng\")\n\n\nprint(leng) \n\n\n \n# putting the value of mod in k\nk=mod\n\n\nz=int(leng/k)+1\n\n# no rows in output\nR = int(z)\n\n# column where my loop ends\nC = int(22)\n \n\n# creating the list for octant value\nnumb=[1,-1,2,-2,3,-3,4,-4]\n \n\n\n\n# making a desire dectionary calulation of the output below box\ndict5={\"Internal outward interaction\":0,\"External outward interaction\":0,\"External Ejection\":0,\"Internal Ejection\":0,\"External inward interaction\":0,\"Internal inward interaction\":0,\"Internal sweep\":0,\"External sweep\":0}\n\n\nfor i in range(R):\n \n \n dict = {}\n for j in range(13,C): \n if j==13:\n # condition for last row of output to cheak the length of input so it stop when input length reached\n if i==R-1:\n u=str(i*k)+\"-\"+str(leng) \n sheet.cell(row=i+5, column=j).value =u\n sheet.cell(row=i+5, column=j).border = thin_border\n else: \n u=str(i*k)+\"-\"+str(k*(i+1)-1) \n sheet.cell(row=i+5, column=j).value =u\n sheet.cell(row=i+5, column=j).border = thin_border\n else: \n # for counting the values of diffrent octant i am making a new variable count\n count=0\n for xx in range(i*k,k*(i+1)):\n if xx==leng:\n break\n \n if df['octant'][xx]==numb[j-14]:\n count=count+1\n # below code for storing the value of noumber of count of certain octant in a dectionary count:octant manner\n dict[count]=numb[j-14]\n sheet.cell(row=i+5, column=j).value =count\n sheet.cell(row=i+5, column=j).border = thin_border\n\n \n \n# sorting the dectionary by its value order\n dict1 = OrderedDict(sorted(dict.items()))\n # using this vable to find the rank in a order\n kk=8\n# below variable is for Octant name \n q=0\n# making new dectionary for storing the rank in desired order\n dict2 ={}\n dict3={1:\"Internal outward interaction\",-1:\"External outward interaction\",2:\"External Ejection\",-2:\"Internal Ejection\",3:\"External inward interaction\",-3:\"Internal inward interaction\",4:\"Internal sweep\",-4:\"External sweep\"}\n for key in dict1:\n # storing the octant value with value equal its rank decreasing order\n dict2[dict1[key]]=kk\n kk=kk-1\n\n # below code for the matrix adter input value \n for qq in range(1,5):\n # positive 1,2,3,4\n for j in range(23,30,2):\n if(sheet.cell(row=1, column=j).value==-1*qq):\n sheet.cell(row=i+5, column=j).value =dict2[-1*qq]\n sheet.cell(row=i+5, column=j).border = thin_border\n# for negative -1,-2,-3,-4\n for j in range(22,30,2):\n if(sheet.cell(row=1, column=j).value==qq):\n sheet.cell(row=i+5, column=j).value =dict2[qq]\n sheet.cell(row=i+5, column=j).border = thin_border\n # below for rank 1 octant value\n if(dict2[qq]==1):\n q=qq\n if(dict2[-1*qq]==1):\n q=-1*qq \n # wite in a cell\n sheet.cell(row=i+5, column=30).value =q\n sheet.cell(row=i+5, column=30).border = thin_border\n \n sheet.cell(row=i+5, column=31).value =dict3[q]\n dict5[dict3[q]]+=1\n sheet.cell(row=i+5, column=31).border = thin_border\n\n\n\n# below variable treck record of no colmns for helping the postion for below matrix\nRR=R+8\n# below matrix where its colmns starts\ncolnew=14\n\n# heading of below matrix\nsheet.cell(row=RR, column=14).value =\"Octant ID\"\nsheet.cell(row=RR, column=14).border = thin_border\n\nsheet.cell(row=RR, column=15).value =\"Octant Name \"\nsheet.cell(row=RR, column=15).border = thin_border\n\nsheet.cell(row=RR, column=16).value =\"Count of Rank 1 Mod Values\"\nsheet.cell(row=RR, column=16).border = thin_border\n# keep tracing no rows\nRR+=1\n\n# below matrix vlue calculating\nfor i in range(1,5):\n# for positve value i\n sheet.cell(row=RR, column=14).value =i\n sheet.cell(row=RR, column=14).border = thin_border\n\n sheet.cell(row=RR, column=15).value =dict3[i]\n sheet.cell(row=RR, column=15).border = thin_border\n\n sheet.cell(row=RR, column=16).value =dict5[dict3[i]]\n sheet.cell(row=RR, column=16).border = thin_border\n # keep tracing no rows\n RR+=1\n# for negative vlue of i\n sheet.cell(row=RR, column=14).value =-1*i\n sheet.cell(row=RR, column=14).border = thin_border\n\n sheet.cell(row=RR, column=15).value =dict3[-1*i]\n sheet.cell(row=RR, column=15).border = thin_border\n\n sheet.cell(row=RR, column=16).value =dict5[dict3[-1*i]]\n sheet.cell(row=RR, column=16).border = thin_border\n# keep tracing no rows\n RR+=1\n\nbook.save(\"octant_output_ranking_excel.xlsx\")", "repo_name": "Rohit312003/2001EE56_2022", "sub_path": "tut05/tut05.py", "file_name": "tut05.py", "file_ext": "py", "file_size_in_byte": 13337, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 13, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Border", "line_number": 22, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Side", "line_number": 22, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Side", "line_number": 23, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Side", "line_number": 24, "usage_type": "call"}, {"api_name": "openpyxl.styles.borders.Side", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 66, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 102, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 109, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 228, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "17096234390", "text": "from datetime import *\n\n# The Spark Challenge Fall 2021\n# Student Check-In Validation and Tracking\n# Date Modified : 12/06/2021\n# Import ECE Student PUID from verification file\n# Import Matching Data From Records\n# Track the number of visits\n# Logs the transcript of visits\n\n# Time Check\n# now = datetime.now()\n# current_time = now.strftime(TIME_FORMAT)\n\n# Visit Limit\nCOUNT_LIMIT = 1\n\n# Import verification\nSTUDENT_ID_FILE = 'verification.txt'\n\n# Visit Logs\nVISITATION_LOG_FILE = 'checkout_logs.txt'\n\n# datetime standard format\nTIME_FORMAT = '%H:%M:%S'\n\n\n# Data Structures\nclass student:\n def __init__(self, puid):\n self.puid = puid\n self.count = 1\n\n#Import Student ID's from verification file\n\ndef initialize():\n print('Reading the Database...')\n\n # Load ECE student IDs from file\n with open(STUDENT_ID_FILE, 'r') as myFile:\n for line in myFile.readlines():\n ECEStudents.append(line.strip())\n\n print('Validating visit history...')\n # Load visit transcript file - populate student dictionary\n with open(VISITATION_LOG_FILE, 'r') as myFile:\n for line in myFile.readlines():\n line = line.split('-')\n\n # Input verification check\n if(len(line) != 3):\n print('ERROR READING IN STUDENT HISTORY DATABASE')\n return\n\n #Extract information from line\n [PUID, count, time] = line\n\n if PUID in studentDB.keys():\n # Student previously added - UPDATE COUNT\n if int(count) > studentDB[PUID].count:\n studentDB[PUID].count = int(count)\n else:\n # New Student, add to dictionary\n newstudent = student(PUID.strip())\n studentDB[PUID.strip()] = newstudent\n studentDB[PUID].count = int(count)\n\n #populates the student list with checkin_puid.txt\n with open(\"checkin_logs.txt\", 'r') as myFile:\n for line in myFile.readlines():\n line = line.split('-')\n\n # Input verification check\n if(len(line) != 3):\n print('ERROR READING IN STUDENT HISTORY DATABASE')\n return\n #Extract information from line\n [PUID, count, time] = line\n\n if PUID in studentDB.keys():\n # Student previously added - UPDATE COUNT\n if int(count) > studentDB[PUID].count:\n studentDB[PUID].count = int(count)\n else:\n # New Student, add to dictionary\n newstudent = student(PUID.strip())\n studentDB[PUID.strip()] = newstudent\n studentDB[PUID].count = int(count)\n\n print(studentDB.keys);\n\ndef verfifyStudent():\n PUID = ''\n count = 0\n\n # Get PUID Number from scanner\n while PUID == '':\n PUID = input('Swipe PUID...')\n PUID = PUID.strip()\n PUID = PUID.split('=')\n\n try:\n PUID = PUID[2]\n except IndexError:\n print('ERROR READING CARD')\n PUID = '' # Your while loop is running when PUID is empty --> Need to reset to prevent continuing to next part even though the swipe failed because it still produces something\n \n PUID = PUID[1:] #commented out so I don't have to go brain damage mode to input my stuff and test\n #uncomment the following ^ line based on verification file: PUID = PUID[1:] if verification file is in the format:\n # 12312312 as opposed to 0012312312\n\n if PUID == 'exit':\n print('Exit Success')\n exit(0)\n\n print(studentDB.keys())\n # Check if student has visited before\n if PUID in studentDB.keys():\n # Verify visit is valid\n if studentDB[PUID].count >= 2:\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n!!!Already Checked-Out!!!\\n')\n print(f'Swipe Count More Than {studentDB[PUID].count}\\n')\n else:\n #Add Vivek's code here after checking with altay\n #if time <30 mins, send a print statement that asks if they are sure they want to check out\n #If response == no|NO|No|nO or whatever, break, do not execute the rest of the loop\n #Else respnse == YESSIR IDGAF ABOUT MY GRADE, excecute the below statement\n #checkout() should be its own helper function rather than a subfunction of checkin.py\n #Is this the checkout?\n\n ##Gets the amount of time the student has been there\n with open('checkin_logs.txt', 'r') as logs:\n for line in logs.readlines():\n # Finding check in time based on PUID\n if (line.split('-')[0] == PUID):\n checkinTime = line.split('-')[2][:-1]\n checkoutTime = datetime.now().strftime(TIME_FORMAT)\n checkoutTime = datetime.strptime(checkoutTime, TIME_FORMAT)\n checkinTime = datetime.strptime(checkinTime, TIME_FORMAT)\n diff = checkoutTime - checkinTime\n\n if diff.total_seconds() > 1800: #If the student has been there for more than 30(1800s) minutes, they can checkout\n studentDB[PUID].count += 1\n count = studentDB[PUID].count\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nCheck-Out Successful!')\n print(f'Swipe Count: {studentDB[PUID].count}')\n\n checkoutTime = datetime.now().strftime(TIME_FORMAT)\n print(f'\\nCheckout Time: {checkoutTime}')\n \n # Converts to datetime object\n checkoutTime = datetime.strptime(checkoutTime, TIME_FORMAT)\n \n # Add visit information to transcript log\n with open('checkout_logs.txt', 'a') as myFile:\n myFile.writelines(f'{PUID}-{count}-{datetime.now().strftime(TIME_FORMAT)}\\n')\n\n with open('checkout_puid.txt', 'a') as myFile:\n myFile.writelines(f'{PUID}\\n')\n\n with open('checkout_time.txt', 'a') as myFile:\n myFile.writelines(f'{checkoutTime}\\n')\n\n # Opening previous logs with PUID, num checkin, and time checkin\n with open('checkin_logs.txt', 'r') as logs:\n for line in logs.readlines():\n # Finding check in time based on PUID\n if (line.split('-')[0] == PUID):\n checkinTime = line.split('-')[2][:-1] # The [:-2] ignores the last two characters \"\\n\"\n print(f'Checkin Time: {checkinTime}')\n\n # Converts to datetime object\n checkinTime = datetime.strptime(checkinTime, TIME_FORMAT)\n\n # Calculates difference between checkout time and check in time\n diff = checkoutTime - checkinTime\n print(f'Time Stayed: {diff}')\n\n # Write the time difference to file\n with open('checkout_difference.txt', 'a') as myFile:\n myFile.writelines(f'{PUID}-{str(diff)}\\n')\n else: #Print saying what conditions are not met\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n30 minutes have not elapsed yet.\\nTime left till 30 minutes have elapsed:\", (int)(30 - diff.total_seconds()/60))\n \n # New Student visitor, verify ECE student and add to transcript\n else:\n if PUID in ECEStudents:\n checkinTime = datetime.now().strftime(TIME_FORMAT)\n studentDB[PUID] = student(PUID)\n\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nCheck-In Successful!')\n print(f'Check-In Time : {checkinTime}')\n print(f'Swipe Count : {studentDB[PUID].count}\\n')\n\n with open('checkin_logs.txt', 'a') as myFile:\n myFile.writelines(f'{PUID}-1-{checkinTime}\\n')\n\n with open('checkin_puid.txt', 'a') as myFile:\n myFile.writelines(f'{PUID}\\n')\n\n with open('checkin_time.txt', 'a') as myFile:\n myFile.writelines(f'{checkinTime}\\n')\n else:\n print('\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n!!!NOT AN ECE STUDENT!!!')\n\nif __name__ == \"__main__\":\n # Establish Data Structures\n studentDB = {}\n ECEStudents = []\n\n # Initialize\n print('Spark Check-In Script Initalizing')\n initialize()\n\n print('System initialized successfully. Type exit to halt program safely.\\n\\n')\n # Begin program\n while(True):\n response = verfifyStudent()\n \n", "repo_name": "altayatik/sparkchallenge", "sub_path": "checkin.py", "file_name": "checkin.py", "file_ext": "py", "file_size_in_byte": 8852, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 138, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 139, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.now", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "22867255519", "text": "from math import gcd\nfrom functools import reduce\nfrom warnings import warn\nimport argparse\nfrom recip import RecipParser\n\ndef lcm(a, b):\n return (a*b) // gcd(a, b)\n\ndef run():\n parser = argparse.ArgumentParser()\n parser.add_argument('mode',\n choices=['violations', 'meter', 'timebase'])\n parser.add_argument('recip_file')\n parser.add_argument('--timebase', type=int, default=1,\n help='represent durations on timebase, '\n 'incompatible with timebase mode')\n args = parser.parse_args()\n # Read the data\n with open(args.recip_file) as f:\n recip_lines = [l.strip() for l in f]\n\n parser = RecipParser(timebase=args.timebase)\n\n tolerance = 10e-9 # floating-point error tolerance\n report = {}\n note_denominators = {1} # set of note denominators\n if args.mode == 'meter':\n print('name,meter_segment,start_time,numerator,denominator')\n for i, line in enumerate(recip_lines):\n name, *tokens = line.split(' ')\n violations = []\n period, last_barline, last_meter_time, last_meter = None, None, None, None\n meter_segment, bar_position, absolute_time = -1, 0, 0\n #if name != 'NLB122808_01': continue\n for parsed in parser.parse(tokens):\n #print(parsed)\n token_type = parsed['type']\n if token_type == METER:\n if args.mode == 'meter':\n # Deal with a bunch of stupid edge case\n meter = (parsed['numerator'], parsed['denominator'])\n if last_meter_time != absolute_time and meter != last_meter:\n meter_segment += 1\n # Only update when the meter has changed\n if meter != last_meter:\n print('{},{},{},{},{}'.format(name, meter_segment, absolute_time, meter[0], meter[1]))\n last_meter_time = absolute_time\n last_meter = meter\n period = parser.meter_period(parsed)\n if period is not None:\n if token_type in DURATION_TYPES:\n dt = parser.duration(parsed)\n bar_position += dt\n absolute_time += dt\n #print(absolute_time)\n denom = parsed['value']\n if bar_position > period + tolerance:\n violation = 'Duration of bar {} exceeded: {}'.format(('initial' if last_barline is None else last_barline['number']), bar_position)\n violations.append(violation)\n if denom % 1 != 0:\n warn('Non-integer denominator encountered')\n elif denom != 0:\n note_denominators.add(int(denom))\n #print(bar_position)\n if token_type in BARLINE_TYPES:\n barnum = parsed['number']\n if parsed['final']:\n barnum = 'final'\n if last_barline is None: \n if absolute_time > 0:\n absolute_time = period # we're at the end of a pickup \n barnum = 'initial'\n if barnum not in ['final', 'initial'] and bar_position + tolerance < period:\n violation = ('Bar {} is incomplete: ' .format(barnum) +\n 'bar position ({}) not equal to period ({})'.format(bar_position, period))\n violations.append(violation)\n bar_position = 0\n last_barline = parsed\n #print(absolute_time)\n if len(violations) > 0:\n report[name, i+1] = violations\n\n if args.mode == 'timebase':\n print(reduce(lcm, note_denominators))\n if args.mode == 'violations':\n print('name')\n for key in report.keys():\n print(key[0])\n # print('Number of violations: {}'.format(len(report)))\n # print('Files with violations: {}'.format(', '.join(map(lambda x: x[0], report.keys()))))\n # print('Violations per file:\\n{}'.format(\n # '\\n'.join('{}:\\n{}'.format(key, '\\n'.join('\\t{}'.format(v) for v in report[key])) for key in report.keys())))\n\nif __name__ == '__main__':\n report = run()\n", "repo_name": "bjvanderweij/rhythm", "sub_path": "rhythm/reciptools.py", "file_name": "reciptools.py", "file_ext": "py", "file_size_in_byte": 4321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "math.gcd", "line_number": 8, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "recip.RecipParser", "line_number": 23, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 62, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "14070815598", "text": "from logging import raiseExceptions\nimport numpy as np\n\nimport random\nimport pickle\n\nfrom collections import defaultdict, Counter\nfrom itertools import product\nimport gzip\n\nfrom google.cloud import storage\n\n\nclass MarkovGenerator:\n def __init__(self, n):\n self.n = n\n\n self.ngram = defaultdict(list)\n self.frequency_table = None\n self.probability_table = None\n self.vocab = set([\"$\", \"#\"])\n self.v2i = dict()\n self.g2i = dict()\n self.lite = False\n\n def add_gram(self, gram, next_state):\n if gram not in self.g2i:\n self.g2i[gram] = len(self.g2i)\n self.ngram[self.g2i[gram]].append(next_state)\n\n def add_the_rest_gram(self):\n non_terminal_vocab = self.vocab - set([\"$\", \"#\"])\n for i in range(1, self.n+1):\n all_gram = product(list(non_terminal_vocab), repeat=i)\n for gram in all_gram:\n gr = \"$\"*(self.n-i) + \"\".join(gram)\n if gr not in self.g2i:\n self.g2i[gr] = len(self.g2i)\n\n def fit(self, corpus, smooth=True):\n if self.lite:\n raise ValueError(\"Lite model cannot be updated\")\n\n for doc in corpus:\n self.vocab |= set(doc)\n for idx in range(self.n):\n start = \"$\"*(self.n-idx) + doc[:idx+1]\n self.add_gram(start[:self.n], start[-1])\n for idx in range(len(doc)-self.n):\n self.add_gram(doc[idx:idx+self.n],doc[idx+self.n])\n self.add_gram(doc[-(self.n):], \"#\")\n\n if self.v2i:\n for v in sorted(list(self.vocab)):\n if v not in self.v2i:\n self.v2i[v] = len(self.v2i)\n else:\n self.v2i = {v: i for i, v in enumerate(sorted(list(self.vocab)))}\n self.i2v = {i: v for i, v in enumerate(sorted(list(self.v2i)))}\n valid_gram = set()\n\n if smooth:\n # Insert Other valid ngram to g2i\n self.add_the_rest_gram()\n self.probability_table = np.ones(\n (len(self.g2i), len(self.vocab)), dtype=np.int32)\n self.probability_table[:, self.v2i[\"$\"]] = 0\n self.probability_table = np.cumsum(\n self.probability_table / len(self.vocab)-1, 1\n )\n else:\n self.probability_table = np.zeros(\n (len(self.g2i), len(self.vocab)), dtype=np.float32)\n \n for gram_idx, next_list in self.ngram.items():\n valid_gram.add(gram_idx)\n counter = Counter(next_list)\n frequency_table = np.full(\n (len(self.vocab),), int(smooth), dtype=np.int32)\n frequency_table[self.v2i[\"$\"]] = 0\n\n for ch, freq in counter.items():\n frequency_table[self.v2i[ch]] += freq\n self.probability_table[gram_idx] = np.cumsum(\n frequency_table /\n (len(next_list)+int(smooth)*(len(self.vocab)-1))\n )\n\n def generate(self, seed=\"\", max_length=100, max_words=99999):\n current_state = \"$\"*(self.n-len(seed)) + seed\n generated_string = seed\n white_space_counter = 0\n while (\n (current_state[-1] != \"#\") and\n (len(generated_string) <= max_length)\n ):\n rng = random.random()\n char_idx = np.argmax(self.probability_table[self.g2i[current_state]] > rng)\n char = self.i2v[char_idx]\n if char == \" \":\n white_space_counter += 1\n if white_space_counter >= max_words:\n char = \"#\"\n if char != \"#\":\n generated_string += char\n current_state = current_state[1:] + char\n return generated_string\n \n def save(self, fname, lite=False):\n param_to_save = {\n \"probability_table\": self.probability_table,\n \"n\": self.n,\n \"g2i\": self.g2i,\n \"i2v\": self.i2v,\n \"vocab\": set(),\n \"model\": defaultdict(list),\n \"v2i\": dict(),\n \"lite_mode\": lite\n }\n if lite:\n with gzip.open(fname, \"wb+\") as f:\n pickle.dump(param_to_save, f)\n return\n\n param_to_save[\"model\"] = self.ngram\n param_to_save[\"v2i\"] = self.v2i\n param_to_save[\"vocab\"] = self.vocab\n with open(fname, \"wb\") as f:\n pickle.dump(param_to_save, f)\n\n @staticmethod\n def load(fname, bucket_name=\"\"):\n if bucket_name:\n data = _open_gcs(fname, bucket_name)\n else:\n data = _open_file(fname)\n markov = MarkovGenerator(data[\"n\"])\n markov.ngram = data[\"model\"]\n markov.vocab = data[\"vocab\"]\n markov.probability_table = data[\"probability_table\"]\n markov.v2i = data[\"v2i\"]\n markov.i2v = data[\"i2v\"]\n markov.g2i = data[\"g2i\"]\n return markov\n\n\ndef _open_file(fname):\n try:\n with open(fname, \"rb\") as f:\n return pickle.load(f)\n except pickle.UnpicklingError:\n with gzip.open(fname, \"rb\") as f:\n return pickle.load(f)\n \n\ndef _open_gcs(fname, bucket_name):\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n fblob = bucket.get_blob(fname)\n with fblob.open(\"rb\") as f:\n content = gzip.GzipFile(fileobj=f).read()\n return pickle.loads(content)\n", "repo_name": "ajmalkurnia/name-generator", "sub_path": "model/markov.py", "file_name": "markov.py", "file_ext": "py", "file_size_in_byte": 4740, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "collections.defaultdict", "line_number": 18, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 84, "usage_type": "call"}, {"api_name": "random.random", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 98, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 116, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 121, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 122, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 129, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 150, "usage_type": "call"}, {"api_name": "pickle.UnpicklingError", "line_number": 151, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 152, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 153, "usage_type": "call"}, {"api_name": "google.cloud.storage.Client", "line_number": 157, "usage_type": "call"}, {"api_name": "google.cloud.storage", "line_number": 157, "usage_type": "name"}, {"api_name": "gzip.GzipFile", "line_number": 161, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 162, "usage_type": "call"}]} +{"seq_id": "34470995537", "text": "# -*- coding: utf-8 -*-\r\n# 六面体数据\r\n# ------------------------------------------------------\r\n# v4----- v5\r\n# /| /|\r\n# v0------v1|\r\n# | | | |\r\n# | v7----|-v6\r\n# |/ |/\r\n# v3------v2\r\n\r\n# 顶点集\r\n\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nfrom OpenGL.arrays import vbo\r\nimport numpy as np\r\n\r\nglutInit()\r\nglutCreateWindow(b'Example 3')\r\n\r\nvertices = np.array([\r\n -0.5, 0.5, 0.5,\r\n 0.5, 0.5, 0.5,\r\n 0.5, -0.5, 0.5,\r\n -0.5, -0.5, 0.5,\r\n -0.5, 0.5, -0.5,\r\n 0.5, 0.5, -0.5,\r\n 0.5, -0.5, -0.5,\r\n -0.5, -0.5, -0.5\r\n], dtype=np.float32)\r\n\r\n# 索引集\r\n\r\nindices = np.array([\r\n 0, 1, 2, 3, # v0-v1-v2-v3 (front)\r\n 4, 5, 1, 0, # v4-v5-v1-v0 (top)\r\n 3, 2, 6, 7, # v3-v2-v6-v7 (bottom)\r\n 5, 4, 7, 6, # v5-v4-v7-v6 (back)\r\n 1, 5, 6, 2, # v1-v5-v6-v2 (right)\r\n 4, 0, 3, 7 # v4-v0-v3-v7 (left)\r\n], dtype=np.int)\r\n\r\nvbo_vertices = vbo.VBO(vertices)\r\nvbo_vertices.bind()\r\nglInterleavedArrays(GL_V3F, 0, None)\r\n\r\nvbo_indices = vbo.VBO(indices, target=GL_ELEMENT_ARRAY_BUFFER)\r\nvbo_indices.bind()\r\n\r\n\r\ndef draw():\r\n glDrawElements(GL_QUADS, int(vbo_indices .size/4), GL_UNSIGNED_INT, None)\r\n glutSwapBuffers() # 切换缓冲区,以显示绘制内容\r\n\r\n\r\nif __name__ == '__main__':\r\n glutDisplayFunc(draw)\r\n glutMainLoop()\r\n", "repo_name": "listenzcc/CT-Display", "sub_path": "opengl-examples/example3.py", "file_name": "example3.py", "file_ext": "py", "file_size_in_byte": 1359, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 44, "usage_type": "attribute"}, {"api_name": "OpenGL.arrays.vbo.VBO", "line_number": 46, "usage_type": "call"}, {"api_name": "OpenGL.arrays.vbo", "line_number": 46, "usage_type": "name"}, {"api_name": "OpenGL.arrays.vbo.VBO", "line_number": 50, "usage_type": "call"}, {"api_name": "OpenGL.arrays.vbo", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "32265486136", "text": "import numpy as np\r\nimport sklearn.mixture as skm\r\nimport cv2\r\n\r\ndef update_data_loss(data_loss, pixels, pcoors, gmms):\r\n for i in range(2):\r\n _ds = -gmms[i].score_samples(pixels)\r\n for j, coors in enumerate(pcoors):\r\n data_loss[coors[0], coors[1], i] = _ds[j]\r\n\r\ndef update_ks(ks, im, gmms, alphas):\r\n for i in range(2):\r\n _coors = np.where(alphas == i)\r\n _pxs = im[_coors]\r\n _ks = gmms[i].predict(_pxs)\r\n for j, _k in enumerate(_ks):\r\n ks[_coors[0][j], _coors[1][j]] = _k\r\n\r\ndef update_rows(alphas, data_loss, ws_hor, ws_ver, flag, s, h, w):\r\n for i in range(h):\r\n if flag[i]:\r\n ds_temp = np.zeros((w, 2))\r\n #es_temp = np.zeros((w, 2))\r\n pre = np.zeros((w, 2))\r\n ds_temp[:, 0] += data_loss[i, :, 0]\r\n ds_temp[:, 1] += data_loss[i, :, 1]\r\n if i > 0:\r\n ds_temp[:, 0] += ws_ver[i - 1, :] * s[(alphas[i - 1, :], np.zeros(w).astype(np.int))]\r\n ds_temp[:, 1] += ws_ver[i - 1, :] * s[(alphas[i - 1, :], np.ones(w).astype(np.int))]\r\n if i < h - 1:\r\n ds_temp[:, 0] += ws_ver[i, :] * s[(alphas[i + 1, :], np.zeros(w).astype(np.int))]\r\n ds_temp[:, 1] += ws_ver[i, :] * s[(alphas[i + 1, :], np.ones(w).astype(np.int))]\r\n #es_temp = ds_temp[:]\r\n for j in range(1, w):\r\n if ds_temp[j - 1, 0] < ds_temp[j - 1, 1]:\r\n ds_temp[j, 0] += ds_temp[j - 1, 0]\r\n pre[j, 0] = 0\r\n smth = ds_temp[j - 1, 0] + ws_hor[i, j - 1]\r\n if ds_temp[j - 1, 1] < smth:\r\n ds_temp[j, 1] += ds_temp[j - 1, 1]\r\n pre[j, 1] = 1\r\n else:\r\n ds_temp[j, 1] += smth\r\n pre[j, 1] = 0\r\n else:\r\n ds_temp[j, 1] += ds_temp[j - 1, 1]\r\n pre[j, 1] = 1\r\n smth = ds_temp[j - 1, 1] + ws_hor[i, j - 1]\r\n if ds_temp[j - 1, 0] < smth:\r\n ds_temp[j, 0] += ds_temp[j - 1, 0]\r\n pre[j, 0] = 0\r\n else:\r\n ds_temp[j, 0] += smth\r\n pre[j, 0] = 1\r\n new_alphas = np.array([-1]*w)\r\n if ds_temp[-1, 0] < ds_temp[-1, 1]:\r\n new_alphas[-1] = 0\r\n else:\r\n new_alphas[-1] = 1\r\n for col in range(2, w + 1):\r\n new_alphas[-col] = pre[-col + 1, new_alphas[-col + 1]]\r\n alphas[i] = new_alphas[:]\r\n\r\ndef update_cols(alphas, data_loss, ws_hor, ws_ver, flag, s, h, w):\r\n for i in range(w):\r\n if flag[i]:\r\n ds_temp = np.zeros((h, 2))\r\n #es_temp = np.zeros((w, 2))\r\n pre = np.zeros((h, 2))\r\n ds_temp[:, 0] += data_loss[:, i, 0]\r\n ds_temp[:, 1] += data_loss[:, i, 1]\r\n if i > 0:\r\n ds_temp[:, 0] += ws_hor[:, i - 1] * s[(alphas[:, i - 1], np.zeros(h).astype(np.int))]\r\n ds_temp[:, 1] += ws_hor[:, i - 1] * s[(alphas[:, i - 1], np.ones(h).astype(np.int))]\r\n if i < w - 1:\r\n ds_temp[:, 0] += ws_hor[:, i] * s[(alphas[:, i + 1], np.zeros(h).astype(np.int))]\r\n ds_temp[:, 1] += ws_hor[:, i] * s[(alphas[:, i + 1], np.ones(h).astype(np.int))]\r\n #es_temp = ds_temp[:]\r\n for j in range(1, h):\r\n if ds_temp[j - 1, 0] < ds_temp[j - 1, 1]:\r\n ds_temp[j, 0] += ds_temp[j - 1, 0]\r\n pre[j, 0] = 0\r\n smth = ds_temp[j - 1, 0] + ws_ver[j - 1, i]\r\n if ds_temp[j - 1, 1] < smth:\r\n ds_temp[j, 1] += ds_temp[j - 1, 1]\r\n pre[j, 1] = 1\r\n else:\r\n ds_temp[j, 1] += smth\r\n pre[j, 1] = 0\r\n else:\r\n ds_temp[j, 1] += ds_temp[j - 1, 1]\r\n pre[j, 1] = 1\r\n smth = ds_temp[j - 1, 1] + ws_ver[j - 1, i]\r\n if ds_temp[j - 1, 0] < smth:\r\n ds_temp[j, 0] += ds_temp[j - 1, 0]\r\n pre[j, 0] = 0\r\n else:\r\n ds_temp[j, 0] += smth\r\n pre[j, 0] = 1\r\n new_alphas = np.array([-1]*h)\r\n if ds_temp[-1, 0] < ds_temp[-1, 1]:\r\n new_alphas[-1] = 0\r\n else:\r\n new_alphas[-1] = 1\r\n for row in range(2, h + 1):\r\n new_alphas[-row] = pre[-row + 1, new_alphas[-row + 1]]\r\n alphas[:, i] = new_alphas[:]\r\n\r\ndef energy(alphas, data_loss, ws_hor, ws_ver, s, h, w):\r\n de = 0.\r\n se = 0.\r\n for i in range(h):\r\n for j in range(w):\r\n de += data_loss[i, j, alphas[i, j]]\r\n if i < h - 1:\r\n se += s[alphas[i, j], alphas[i + 1, j]] * ws_ver[i, j]\r\n if j < w - 1:\r\n se += s[alphas[i, j], alphas[i, j + 1]] * ws_hor[i, j]\r\n return de, se\r\n\r\ndef update_gmms(gmms, im, ks, alphas, n_compo):\r\n pis = np.zeros((2, n_compo))\r\n for a in range(2):\r\n for k in range(n_compo):\r\n k_pixels = im[np.logical_and(ks == k, alphas == a)]\r\n pis[a, k] = k_pixels.shape[0]\r\n gmms[a].means_[k] = np.mean(k_pixels, axis=0)\r\n gmms[a].covariances_[k] = np.cov(k_pixels.T)\r\n pis /= np.tile(np.sum(pis, axis=-1, keepdims=True), (1, n_compo))\r\n gmms[0].weights_ = pis[0]\r\n gmms[1].weights_ = pis[1]\r\n\r\nn_compo = 5\r\nmax_iter = 100\r\nlmbd = 10\r\nlmbd2 = 10\r\nsamples = ['flower', 'sponge', 'person']\r\n\r\nfor sample in samples:\r\n print(sample)\r\n im = cv2.imread('{}.png'.format(sample)).astype(np.float)\r\n height, width = im.shape[0], im.shape[1]\r\n fg_mask = cv2.imread('{}_fg.png'.format(sample))\r\n bg_mask = cv2.imread('{}_bg.png'.format(sample))\r\n fg_coors = (np.where(fg_mask > 0)[0][::3], np.where(fg_mask > 0)[1][::3])\r\n fg_coors_zipped = list(zip(fg_coors[0], fg_coors[1]))\r\n bg_coors = (np.where(bg_mask > 0)[0][::3], np.where(bg_mask > 0)[1][::3])\r\n bg_coors_zipped = list(zip(bg_coors[0], bg_coors[1]))\r\n fg_pixels = im[fg_coors] #np.reshape(im[fg_coors], (-1, 3))\r\n bg_pixels = im[bg_coors] #np.reshape(im[bg_coors], (-1, 3))\r\n fg_gmm = skm.GaussianMixture(n_components=n_compo, max_iter=200)\r\n bg_gmm = skm.GaussianMixture(n_components=n_compo, max_iter=200)\r\n fg_gmm.fit(fg_pixels)\r\n bg_gmm.fit(bg_pixels)\r\n ks = np.empty((height, width))\r\n ks_fg_prior = fg_gmm.predict(fg_pixels)\r\n ks_bg_prior = bg_gmm.predict(bg_pixels)\r\n for i, k in enumerate(ks_fg_prior):\r\n ks[fg_coors_zipped[i][0], fg_coors_zipped[i][1]] = k\r\n for i, k in enumerate(ks_bg_prior):\r\n ks[bg_coors_zipped[i][0], bg_coors_zipped[i][1]] = k\r\n alphas = np.array([[-1] * width] * height)\r\n alphas[fg_coors] = 0\r\n alphas[bg_coors] = 1\r\n\r\n uninit_coors = [(i, j) for i in range(height) for j in range(width) if alphas[i, j] == -1]\r\n uninit = np.array([im[coors[0], coors[1], :] for coors in uninit_coors])\r\n\r\n ks_fg = fg_gmm.predict(uninit)\r\n ds_fg = -fg_gmm.score_samples(uninit)\r\n ks_bg = bg_gmm.predict(uninit)\r\n ds_bg = -bg_gmm.score_samples(uninit)\r\n ks_fg_bg = np.array([ks_fg, ks_bg])\r\n fg_or_bg = [0 if ds_fg[i] < ds_bg[i] else 1 for i in range(len(uninit))]\r\n\r\n for i, fob in enumerate(fg_or_bg):\r\n alphas[uninit_coors[i][0], uninit_coors[i][1]] = fob\r\n ks[uninit_coors[i][0], uninit_coors[i][1]] = ks_fg_bg[fob, i]\r\n #dataLoss[uninit_coors[i][0], uninit_coors[i][1], 0] = ds_fg[i]\r\n #dataLoss[uninit_coors[i][0], uninit_coors[i][1], 1] = ds_bg[i]\r\n bayes_seg = (np.ones_like(alphas) - alphas) * 255\r\n cv2.imwrite('{}_bayes.png'.format(sample), bayes_seg.astype(np.uint8))\r\n\r\n ws_hor = np.zeros((height, width))\r\n ws_ver = np.zeros((height, width))\r\n temp = 0.\r\n count = 0\r\n for i in range(height):\r\n for j in range(width):\r\n if i + 1 < height:\r\n temp += np.sum((im[i, j] - im[i + 1, j]) ** 2)\r\n count += 1\r\n if j + 1 < width:\r\n temp += np.sum((im[i, j] - im[i, j + 1]) ** 2)\r\n count += 1\r\n beta = (2 * temp / count) ** -1\r\n ws_hor[:, :-1] = (np.exp(-beta * np.sum((im[:, 1:, :] - im[:, :-1, :]) ** 2, axis=-1)) + lmbd2) * lmbd\r\n ws_ver[:-1, :] = (np.exp(-beta * np.sum((im[1:, ...] - im[:-1, ...]) ** 2, axis=-1)) + lmbd2) * lmbd\r\n\r\n sij = np.ones((2,2)) - np.eye(2)\r\n\r\n update_gmms([fg_gmm, bg_gmm], im, ks, alphas, n_compo)\r\n\r\n data_loss = np.empty((height, width, 2))\r\n data_loss[fg_coors + (np.zeros(len(fg_coors_zipped)).astype(np.int),)] = 0.\r\n data_loss[fg_coors + (np.ones(len(fg_coors_zipped)).astype(np.int),)] = np.inf\r\n data_loss[bg_coors + (np.zeros(len(bg_coors_zipped)).astype(np.int),)] = np.inf\r\n data_loss[bg_coors + (np.ones(len(bg_coors_zipped)).astype(np.int),)] = 0.\r\n\r\n update_data_loss(data_loss, uninit, uninit_coors, [fg_gmm, bg_gmm])\r\n\r\n flag_rows = np.array([True] * height)\r\n flag_cols = np.array([True] * width)\r\n\r\n old_alphas = alphas[:]\r\n\r\n update_rows(alphas, data_loss, ws_hor, ws_ver, flag_rows, sij, height, width)\r\n update_cols(alphas, data_loss, ws_hor, ws_ver, flag_cols, sij, height, width)\r\n\r\n new_de, new_se = energy(alphas, data_loss, ws_hor, ws_ver, sij, height, width)\r\n new_energy = new_de + new_se\r\n print('Iteration 0 --- Energy: {} (DE={}, SE={})'.format(new_energy, new_de, new_se))\r\n\r\n #tolerance = 0\r\n\r\n for i in range(1, max_iter):\r\n old_energy = new_energy\r\n\r\n #'''\r\n old_flag_rows = flag_rows[:]\r\n old_flag_cols = flag_cols[:]\r\n flag_rows[:] = False\r\n flag_cols[:] = False\r\n for j in range(height):\r\n if old_flag_rows[j]:\r\n if np.any(old_alphas[j] != alphas[j]):\r\n if j > 0:\r\n flag_rows[j - 1] = True\r\n if j < height - 1:\r\n flag_rows[j + 1] = True\r\n for j in range(width):\r\n if old_flag_cols[j]:\r\n if np.any(old_alphas[:, j] != alphas[:, j]):\r\n if j > 0:\r\n flag_cols[j - 1] = True\r\n if j < width - 1:\r\n flag_cols[j + 1] = True\r\n #'''\r\n\r\n update_ks(ks, im, [fg_gmm, bg_gmm], alphas)\r\n update_gmms([fg_gmm, bg_gmm], im, ks, alphas, n_compo)\r\n update_data_loss(data_loss, uninit, uninit_coors, [fg_gmm, bg_gmm])\r\n\r\n old_alphas = alphas[:]\r\n\r\n update_rows(alphas, data_loss, ws_hor, ws_ver, flag_rows, sij, height, width)\r\n update_cols(alphas, data_loss, ws_hor, ws_ver, flag_cols, sij, height, width)\r\n\r\n new_de, new_se = energy(alphas, data_loss, ws_hor, ws_ver, sij, height, width)\r\n new_energy = new_de + new_se\r\n print('Iteration {} --- Energy: {} (DE={}, SE={})'.format(i, new_energy, new_de, new_se))\r\n\r\n if new_energy >= old_energy:\r\n #tolerance += 1\r\n #if tolerance > 3:\r\n break\r\n\r\n final_segs = (np.ones_like(alphas) - alphas) * 255\r\n cv2.imwrite('{}_seg.png'.format(sample), final_segs.astype(np.uint8))\r\n\r\n gt = cv2.imread('{}_gt.png'.format(sample)).astype(np.float)\r\n acc = float(np.sum(gt[..., 0]==final_segs)) / height / width\r\n print('acc: {}'.format(acc))\r\n print('\\n')", "repo_name": "zehzhang/MRF_BCD", "sub_path": "mrfBCD.py", "file_name": "mrfBCD.py", "file_ext": "py", "file_size_in_byte": 11595, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.where", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 76, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 141, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 151, "usage_type": "call"}, {"api_name": "sklearn.mixture", "line_number": 151, "usage_type": "name"}, {"api_name": "sklearn.mixture.GaussianMixture", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.mixture", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 181, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 182, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 206, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 207, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 207, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 268, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 269, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 271, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 272, "usage_type": "call"}]} +{"seq_id": "32029347095", "text": "from datetime import timedelta\n\n\nclass BusinessPeriod(object):\n\n def __init__(self, period='', years=0, quarters=0, months=0, weeks=0, days=0, businessdays=0):\n \"\"\" class to store and calculate date periods as combinations of days, weeks, years etc.\n\n :param str period: encoding a business period.\n Such is given by a sequence of digits as :class:`int` followed by a :class:`char` -\n indicating the number of\n years **Y**,\n quarters **Q** (which is equivalent to 3 month),\n month **M**,\n weeks **W** (which is equivalent to 7 days),\n days **D**,\n business days **B**.\n E.g. **1Y2W3D** what gives a period of 1 year plus 2 weeks and 3 days\n (see :doc:`tutorial ` for details).\n\n :param int years: number of years in the period (equivalent to 12 months)\n :param int quarters: number of quarters in the period (equivalent to 3 months)\n :param int months: number of month in the period\n :param int weeks: number of weeks in the period (equivalent to 7 days)\n :param int days: number of days in the period\n :param int businessdays: number of business days,\n i.e. days which are neither weekend nor :class:`holidays `, in the period.\n Only either `businessdays` or the others can be given.\n Both at the same time is not allowed.\n\n \"\"\"\n if period and any((years, months, days, businessdays)):\n raise ValueError(\n \"Either string or argument input only for %s\" % self.__class__.__name__)\n\n super(BusinessPeriod, self).__init__()\n if isinstance(period, BusinessPeriod):\n years = period.years\n months = period.months\n days = period.days\n businessdays = period.businessdays\n elif isinstance(period, timedelta):\n days = period.days\n elif period is None:\n pass\n elif isinstance(period, str):\n if period.upper() == '':\n pass\n elif period.upper() == '0D':\n pass\n elif period.upper() == 'ON':\n businessdays = 1\n elif period.upper() == 'TN':\n businessdays = 2\n elif period.upper() == 'DD':\n businessdays = 3\n else:\n s, y, q, m, w, d, f = BusinessPeriod._parse_ymd(period)\n # no final businesdays allowed\n if f:\n raise ValueError(\"Unable to parse %s as %s\" % (period, self.__class__.__name__))\n # except the first non vanishing of y,q,m,w,d must have positive sign\n sgn = [int(x / abs(x)) for x in (y, q, m, w, d) if x]\n if [x for x in sgn[1:] if x < 0]:\n raise ValueError(\n \"Except at the beginning no signs allowed in %s as %s\" % (str(period), self.__class__.__name__))\n y, q, m, w, d = (abs(x) for x in (y, q, m, w, d))\n # use sign of first non vanishing of y,q,m,w,d\n sgn = sgn[0] if sgn else 1\n businessdays, years, quarters, months, weeks, days = s, sgn * y, sgn * q, sgn * m, sgn * w, sgn * d\n else:\n raise TypeError(\n \"%s of Type %s not valid to create BusinessPeriod.\" %(str(period), period.__class__.__name__))\n\n self._months = 12 * years + 3 * quarters + months\n self._days = 7 * weeks + days\n self._businessdays = businessdays\n\n if businessdays and (self._months or self._days):\n raise ValueError(\n \"Either (years,months,days) or businessdays must be zero for %s\" % self.__class__.__name__)\n if self._months and not self._days / self._months >= 0:\n ymd = self.years, self.months, self.days\n raise ValueError(\n \"(years, months, days)=%s must have equal sign for %s\" % (str(ymd), self.__class__.__name__))\n\n @property\n def years(self):\n return int(-1 * (-1 * self._months // 12) if self._months < 0 else self._months // 12)\n\n @property\n def months(self):\n return int(-1 * (-1 * self._months % 12) if self._months < 0 else self._months % 12)\n\n @property\n def days(self):\n return int(self._days)\n\n @property\n def businessdays(self):\n return int(self._businessdays)\n\n # --- validation and information methods ---------------------------------\n\n @classmethod\n def _parse_ymd(cls, period):\n # can even parse strings like '-1B-2Y-4Q+5M' but also '0B', '-1Y2M3D' as well.\n period = period.upper().replace(' ', '')\n period = period.replace('BUSINESSDAYS', 'B')\n period = period.replace('YEARS', 'Y')\n period = period.replace('QUARTERS', 'Q')\n period = period.replace('MONTHS', 'M')\n period = period.replace('WEEKS', 'W')\n period = period.replace('DAYS', 'D')\n\n def _parse(p, letter):\n if p.find(letter) >= 0:\n s, p = p.split(letter, 1)\n s = s[1:] if s.startswith('+') else s\n sgn, s = (-1, s[1:]) if s.startswith('-') else (1, s)\n if not s.isdigit():\n raise ValueError(\"Unable to parse %s in %s as %s\" % (s, p, cls.__name__))\n return sgn * int(s), p\n return 0, p\n\n p = period.upper()\n\n # p[-1] is not 'B', p.strip('0123456789+-B')==''\n s, p = _parse(p, 'B') if not p[-1]=='B' else (0, p)\n s, p = _parse(p, 'B') if not p.strip('0123456789+-B') else (s, p)\n s, p = _parse(p, 'B') if p.count('B') > 1 else (s, p)\n y, p = _parse(p, 'Y')\n q, p = _parse(p, 'Q')\n m, p = _parse(p, 'M')\n w, p = _parse(p, 'W')\n d, p = _parse(p, 'D')\n f, p = _parse(p, 'B')\n if not p == '':\n raise ValueError(\"Unable to parse %s as %s\" % (p, cls.__name__))\n return s, y, q, m, w, d, f\n\n @classmethod\n def is_businessperiod(cls, period):\n \"\"\" returns true if the argument can be understood as :class:`BusinessPeriod` \"\"\"\n if period is None:\n return False\n if isinstance(period, (int, float, list, set, dict, tuple)):\n return False\n if isinstance(period, (timedelta, BusinessPeriod)):\n return True\n if period in ('', '0D', 'ON', 'TN', 'DD'):\n return True\n if isinstance(period, str):\n if period.isdigit():\n return False\n #if period.upper().strip('+-0123456789BYQMWD'):\n # return False\n try: # to be removed\n BusinessPeriod._parse_ymd(period)\n except ValueError:\n return False\n return True\n return False\n\n # --- operator methods ---------------------------------------------------\n\n def __repr__(self):\n return self.__class__.__name__ + \"('%s')\" % str(self)\n\n def __str__(self):\n\n if self.businessdays:\n period_str = str(self.businessdays) + 'B'\n else:\n period_str = '-' if self.years < 0 or self.months < 0 or self.days < 0 else ''\n if self.years:\n period_str += str(abs(self.years)) + 'Y'\n if self.months:\n period_str += str(abs(self.months)) + 'M'\n if self.days:\n period_str += str(abs(self.days)) + 'D'\n\n if not period_str:\n period_str = '0D'\n return period_str\n\n def __abs__(self):\n ymdb = self.years, self.months, self.days, self.businessdays\n y,m,d,b = tuple(map(abs, ymdb))\n return self.__class__(years=y, months=m, days=d, businessdays=b)\n\n def __cmp__(self, other):\n other = self.__class__() if other == 0 else other\n if not isinstance(other, BusinessPeriod):\n other = BusinessPeriod(other)\n if self.businessdays:\n if other and not other.businessdays:\n # log warning on non compatible pair\n return None\n return self.businessdays - other.businessdays\n m = 12 * (self.years - other.years) + self.months - other.months\n d = self.days - other.days\n if m * 28 < -d < m * 31:\n p = self.__class__(months=m)\n if p.min_days() <= -d <= p.max_days():\n # log warning on non orderable pair\n return None\n return m * 30.5 + d\n\n def __eq__(self, other):\n if isinstance(other, type(self)):\n attr = 'years', 'months', 'days', 'businessdays'\n return all(getattr(self, a) == getattr(other, a) for a in attr)\n return False\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __le__(self, other):\n cmp = self.__cmp__(other)\n cmp = self.__cmp__(other + '1D') if cmp is None else cmp\n return cmp if cmp is None else cmp <= 0\n\n def __lt__(self, other):\n cmp = self.__cmp__(other)\n return cmp if cmp is None else cmp < 0\n\n def __ge__(self, other):\n lt = self.__lt__(other)\n return None if lt is None else not lt\n\n def __gt__(self, other):\n le = self.__le__(other)\n return None if le is None else not le\n\n def __hash__(self):\n return hash(repr(self))\n\n def __nonzero__(self):\n # return any((self.years, self.months, self.days, self.businessdays))\n return self.__bool__()\n\n def __bool__(self):\n # return self.__nonzero__()\n return any((self._months, self._days, self._businessdays))\n\n def __add__(self, other):\n if isinstance(other, (list, tuple)):\n return [self + o for o in other]\n if BusinessPeriod.is_businessperiod(other):\n p = BusinessPeriod(other)\n y = self.years + p.years\n m = self.months + p.months\n d = self.days + p.days\n b = self.businessdays + p.businessdays\n return self.__class__(years=y, months=m, days=d, businessdays=b)\n raise TypeError('addition of BusinessPeriod cannot handle objects of type %s.' % other.__class__.__name__)\n\n def __sub__(self, other):\n if isinstance(other, (list, tuple)):\n return [self - o for o in other]\n if BusinessPeriod.is_businessperiod(other):\n return self + (-1 * BusinessPeriod(other))\n raise TypeError('subtraction of BusinessPeriod cannot handle objects of type %s.' % other.__class__.__name__)\n\n def __mul__(self, other):\n if isinstance(other, (list, tuple)):\n return [self * o for o in other]\n if isinstance(other, int):\n m = other * self._months\n d = other * self._days\n b = other * self._businessdays\n return BusinessPeriod(months=m, days=d, businessdays=b)\n raise TypeError(\"expected int type but got %s\" % other.__class__.__name__)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def max_days(self):\n if self._months < 0:\n sgn = -1\n days_in_month = 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 28 # days from mar to feb forwards\n else:\n sgn = 1\n days_in_month = 31, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 28\n m = sgn * self._months\n # days from jan to feb backwards\n days = 0\n for i in range(m):\n days += days_in_month[int(i % 12)]\n days += 1 if int(i % 48) == 11 else 0\n return sgn * days + self._days\n\n def min_days(self):\n if self._months < 0 :\n sgn = -1\n days_in_month = 28, 31, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 # days from feb to jan backwards\n else:\n sgn = 1\n days_in_month = 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31 # days from feb to jan forwards\n m = sgn * self._months\n days = 0\n for i in range(m):\n days += days_in_month[int(i % 12)]\n days += 1 if int(i % 48) == 36 else 0\n return sgn * days + self._days\n", "repo_name": "sonntagsgesicht/businessdate", "sub_path": "businessdate/businessperiod.py", "file_name": "businessperiod.py", "file_ext": "py", "file_size_in_byte": 12099, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datetime.timedelta", "line_number": 42, "usage_type": "argument"}, {"api_name": "datetime.timedelta", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "39690873246", "text": "from discord.ext import commands\nimport discord\nimport time\nimport requests\nimport aiohttp\nimport sys\nimport os\nfrom textwrap import dedent\nfrom utils import presence, settings\nimport psutil\nimport logging\nimport traceback\n\n\nclass General(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n def get_bot_uptime(self, start_time):\n t = time.gmtime(time.time() - start_time)\n return f\"{t.tm_mday - 1} days, {t.tm_hour} hours, and {t.tm_min} minutes\"\n\n @commands.cooldown(rate=1, per=3)\n @commands.command()\n async def info(self, ctx):\n async with ctx.channel.typing():\n e = discord.Embed(\n color=discord.Color(0x6441A4),\n title=\"Luna Bot | Discord Bot For Gaming Community\"\n )\n uptime = self.get_bot_uptime(self.bot.uptime)\n mem = psutil.virtual_memory()\n e.add_field(\n name=\"Uptime\",\n value=uptime,\n inline=False\n )\n e.add_field(\n name=\"Version\",\n value=dedent(f\"\"\"\\\n **·** Python {sys.version.split(' ')[0]}\n **·** discord.py {discord.__version__}\n **·** Luna Bot {settings.Version}\n \"\"\")\n )\n if ctx.guild is None:\n e.add_field(\n name=\"Shard Info\",\n value=dedent(f\"\"\"\\\n **·** Shard latency: {round(self.bot.latency*1000)}ms\n **·** Total shards: {self.bot.shard_count}\n \"\"\")\n )\n else:\n e.add_field(\n name=\"Shard Info\",\n value=dedent(f\"\"\"\\\n **·** Current shard: {ctx.guild.shard_id}\n **·** Shard latency: {round(self.bot.latency*1000)}ms\n **·** Total shards: {self.bot.shard_count}\n \"\"\")\n )\n e.add_field(\n name=\"System\",\n value=dedent(f\"\"\"\\\n **·** {psutil.cpu_percent(interval=1)}% CPU\n **·** {round(mem.used/1000000)}/{round(mem.total/1000000)}MB RAM used\n \"\"\")\n )\n e.add_field(\n name=\"Developer\",\n value=\"MetaLuna#1999\",\n inline=False\n )\n await ctx.send(embed=e)\n\n @commands.cooldown(rate=1, per=3)\n @commands.command(pass_context=True)\n async def ping(self, ctx):\n t = time.time()\n await ctx.trigger_typing()\n t2 = round((time.time() - t) * 1000)\n await ctx.send(\"Pong! {}ms\".format(t2))\n\n @commands.command(pass_context=True)\n async def invite(self, ctx):\n await ctx.send(\"https://discordapp.com/api/oauth2/authorize?scope=bot&permissions=8&client_id=272451861636841482\")\n\n\ndef setup(bot):\n bot.add_cog(General(bot))\n", "repo_name": "metalun4/LunaBot-v0.0.3", "sub_path": "cogs/general.py", "file_name": "general.py", "file_ext": "py", "file_size_in_byte": 3006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 15, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 15, "usage_type": "name"}, {"api_name": "time.gmtime", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 20, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 28, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 32, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.version.split", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.version", "line_number": 41, "usage_type": "attribute"}, {"api_name": "discord.__version__", "line_number": 42, "usage_type": "attribute"}, {"api_name": "utils.settings.Version", "line_number": 43, "usage_type": "attribute"}, {"api_name": "utils.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 49, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 57, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 65, "usage_type": "call"}, {"api_name": "psutil.cpu_percent", "line_number": 66, "usage_type": "call"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 23, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 24, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 24, "usage_type": "name"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 82, "usage_type": "call"}, {"api_name": "discord.ext.commands.cooldown", "line_number": 77, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 77, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 78, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 78, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 85, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "71384830235", "text": "''' Script to dump contents from Ntuples into CSV format '''\n\n# -- Import libraries -- #\nimport ROOT as r\nimport os, re, time, sys, pickle\nfrom optparse import OptionParser\nfrom root_numpy import tree2array\nimport numpy as np\nimport pandas as pd\nimport multiprocessing as mp\nimport utils.baseDumper as bd\nfrom concentrator import combine_dataframes\nimport time\n\ndef addbaseDumperOptions(pr):\n pr.add_option(\"-v\",\"--verbose\" , dest=\"verbose\" , action=\"store_true\", default=False, help=\"If activated, print verbose output\")\n pr.add_option('--outpath', '-o', type=\"string\", dest = \"outpath\", default = \"./results/\")\n pr.add_option('--inputFolder', '-i', type=\"string\", dest = \"inputFolder\", default = \".\")\n pr.add_option('--nevents', '-n', type=\"int\", metavar=\"nevents\", dest=\"nevents\", default = 100)\n pr.add_option(\"--njobs\", dest=\"njobs\", type=\"int\", help=\"Number of cores to use for multiprocess mode.\", default = 1)\n return \n\ndef submit(args):\n ''' Function to submit jobs '''\n DTdumper = bd.baseDumper(args[0], args[1])\n DTdumper.run()\n return\n\ndef main_run(opts, classtype): \n # Function for job submission \n\n files_ = [f for f in os.listdir(options.inputFolder) if \".root\" in f] \n t0 = time.time()\n with mp.Pool(processes = opts.njobs) as pool:\n pool.map( submit, ((opts, f) for f in files_))\n tf = time.time()\n print(\"I has taken %3.2f minutes (%3.2f seconds) to write these files.\"%(abs(t0-tf)/60, abs(t0-tf)))\n return\n\nif __name__ == \"__main__\":\n # parser inputs\n pr = OptionParser(usage=\"%prog [options]\")\n\n addbaseDumperOptions(pr)\n (options,args) = pr.parse_args()\n\n main_run(options, bd.baseDumper)\n", "repo_name": "Cvico/DTPatternRecognition", "sub_path": "dumpDTDigisFromNtuples.py", "file_name": "dumpDTDigisFromNtuples.py", "file_ext": "py", "file_size_in_byte": 1645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "utils.baseDumper.baseDumper", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.baseDumper", "line_number": 25, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 33, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.baseDumper.baseDumper", "line_number": 47, "usage_type": "attribute"}, {"api_name": "utils.baseDumper", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "21830007651", "text": "from hashlib import sha1\nimport hmac\nimport os\nfrom time import time\n\nfrom libcloud.utils.py3 import httplib\nfrom libcloud.utils.py3 import urlencode\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nfrom libcloud.utils.py3 import PY3\nfrom libcloud.utils.py3 import b\nfrom libcloud.utils.py3 import urlquote\n\nif PY3:\n from io import FileIO as file\n\nfrom libcloud.utils.files import read_in_chunks\nfrom libcloud.common.types import MalformedResponseError, LibcloudError\nfrom libcloud.common.base import Response, RawResponse\n\nfrom libcloud.storage.providers import Provider\nfrom libcloud.storage.base import Object, Container, StorageDriver\nfrom libcloud.storage.types import ContainerAlreadyExistsError\nfrom libcloud.storage.types import ContainerDoesNotExistError\nfrom libcloud.storage.types import ContainerIsNotEmptyError\nfrom libcloud.storage.types import ObjectDoesNotExistError\nfrom libcloud.storage.types import ObjectHashMismatchError\nfrom libcloud.storage.types import InvalidContainerNameError\nfrom libcloud.common.types import LazyList\nfrom libcloud.common.openstack import OpenStackBaseConnection\nfrom libcloud.common.openstack import OpenStackDriverMixin\n\nfrom libcloud.common.rackspace import (\n AUTH_URL_US, AUTH_URL_UK)\n\nCDN_HOST = 'cdn.clouddrive.com'\nAPI_VERSION = 'v1.0'\n\n\nclass CloudFilesResponse(Response):\n valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT]\n\n def success(self):\n i = int(self.status)\n return i >= 200 and i <= 299 or i in self.valid_response_codes\n\n def parse_body(self):\n if not self.body:\n return None\n\n if 'content-type' in self.headers:\n key = 'content-type'\n elif 'Content-Type' in self.headers:\n key = 'Content-Type'\n else:\n raise LibcloudError('Missing content-type header')\n\n content_type = self.headers[key]\n if content_type.find(';') != -1:\n content_type = content_type.split(';')[0]\n\n if content_type == 'application/json':\n try:\n data = json.loads(self.body)\n except:\n raise MalformedResponseError('Failed to parse JSON',\n body=self.body,\n driver=CloudFilesStorageDriver)\n elif content_type == 'text/plain':\n data = self.body\n else:\n data = self.body\n\n return data\n\n\nclass CloudFilesRawResponse(CloudFilesResponse, RawResponse):\n pass\n\n\nclass CloudFilesConnection(OpenStackBaseConnection):\n \"\"\"\n Base connection class for the Cloudfiles driver.\n \"\"\"\n\n auth_url = AUTH_URL_US\n responseCls = CloudFilesResponse\n rawResponseCls = CloudFilesRawResponse\n\n def __init__(self, user_id, key, secure=True, **kwargs):\n super(CloudFilesConnection, self).__init__(user_id, key, secure=secure,\n **kwargs)\n self.api_version = API_VERSION\n self.accept_format = 'application/json'\n self.cdn_request = False\n\n if self._ex_force_service_region:\n self.service_region = self._ex_force_service_region\n\n def get_endpoint(self):\n # First, we parse out both files and cdn endpoints\n # for each auth version\n if '2.0' in self._auth_version:\n eps = self.service_catalog.get_endpoints(\n service_type='object-store',\n name='cloudFiles')\n cdn_eps = self.service_catalog.get_endpoints(\n service_type='object-store',\n name='cloudFilesCDN')\n elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):\n eps = self.service_catalog.get_endpoints(name='cloudFiles')\n cdn_eps = self.service_catalog.get_endpoints(name='cloudFilesCDN')\n\n # if this is a CDN request, return the cdn url instead\n if self.cdn_request:\n eps = cdn_eps\n\n if self.service_region:\n _eps = []\n for ep in eps:\n if ep['region'].lower() == self.service_region.lower():\n _eps.append(ep)\n eps = _eps\n\n if len(eps) == 0:\n raise LibcloudError('Could not find specified endpoint')\n\n ep = eps[0]\n if 'publicURL' in ep:\n return ep['publicURL']\n else:\n raise LibcloudError('Could not find specified endpoint')\n\n def request(self, action, params=None, data='', headers=None, method='GET',\n raw=False, cdn_request=False):\n if not headers:\n headers = {}\n if not params:\n params = {}\n\n self.cdn_request = cdn_request\n params['format'] = 'json'\n\n if method in ['POST', 'PUT'] and 'Content-Type' not in headers:\n headers.update({'Content-Type': 'application/json; charset=UTF-8'})\n\n return super(CloudFilesConnection, self).request(\n action=action,\n params=params, data=data,\n method=method, headers=headers,\n raw=raw)\n\n\nclass CloudFilesUSConnection(CloudFilesConnection):\n \"\"\"\n Connection class for the Cloudfiles US endpoint.\n \"\"\"\n\n auth_url = AUTH_URL_US\n\n\nclass CloudFilesUKConnection(CloudFilesConnection):\n \"\"\"\n Connection class for the Cloudfiles UK endpoint.\n \"\"\"\n\n auth_url = AUTH_URL_UK\n\n\nclass CloudFilesSwiftConnection(CloudFilesConnection):\n \"\"\"\n Connection class for the Cloudfiles Swift endpoint.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.region_name = kwargs.pop('ex_region_name', None)\n super(CloudFilesSwiftConnection, self).__init__(*args, **kwargs)\n\n def get_endpoint(self, *args, **kwargs):\n if '2.0' in self._auth_version:\n endpoint = self.service_catalog.get_endpoint(\n service_type='object-store',\n name='swift',\n region=self.region_name)\n elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):\n endpoint = self.service_catalog.get_endpoint(\n name='swift', region=self.region_name)\n\n if 'publicURL' in endpoint:\n return endpoint['publicURL']\n else:\n raise LibcloudError('Could not find specified endpoint')\n\n\nclass CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin):\n \"\"\"\n Base CloudFiles driver.\n\n You should never create an instance of this class directly but use US/US\n class.\n \"\"\"\n name = 'CloudFiles'\n website = 'http://www.rackspace.com/'\n\n connectionCls = CloudFilesConnection\n hash_type = 'md5'\n supports_chunked_encoding = True\n\n def __init__(self, *args, **kwargs):\n OpenStackDriverMixin.__init__(self, *args, **kwargs)\n super(CloudFilesStorageDriver, self).__init__(*args, **kwargs)\n\n def list_containers(self):\n response = self.connection.request('')\n\n if response.status == httplib.NO_CONTENT:\n return []\n elif response.status == httplib.OK:\n return self._to_container_list(json.loads(response.body))\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def list_container_objects(self, container):\n value_dict = {'container': container}\n return LazyList(get_more=self._get_more, value_dict=value_dict)\n\n def get_container(self, container_name):\n response = self.connection.request('/%s' % (container_name),\n method='HEAD')\n\n if response.status == httplib.NO_CONTENT:\n container = self._headers_to_container(\n container_name, response.headers)\n return container\n elif response.status == httplib.NOT_FOUND:\n raise ContainerDoesNotExistError(None, self, container_name)\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def get_object(self, container_name, object_name):\n container = self.get_container(container_name)\n response = self.connection.request('/%s/%s' % (container_name,\n object_name),\n method='HEAD')\n if response.status in [httplib.OK, httplib.NO_CONTENT]:\n obj = self._headers_to_object(\n object_name, container, response.headers)\n return obj\n elif response.status == httplib.NOT_FOUND:\n raise ObjectDoesNotExistError(None, self, object_name)\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def get_container_cdn_url(self, container):\n container_name = container.name\n response = self.connection.request('/%s' % (container_name),\n method='HEAD',\n cdn_request=True)\n\n if response.status == httplib.NO_CONTENT:\n cdn_url = response.headers['x-cdn-uri']\n return cdn_url\n elif response.status == httplib.NOT_FOUND:\n raise ContainerDoesNotExistError(value='',\n container_name=container_name,\n driver=self)\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def get_object_cdn_url(self, obj):\n container_cdn_url = self.get_container_cdn_url(container=obj.container)\n return '%s/%s' % (container_cdn_url, obj.name)\n\n def enable_container_cdn(self, container, ex_ttl=None):\n \"\"\"\n @inherits: L{StorageDriver.enable_container_cdn}\n\n @param ex_ttl: cache time to live\n @type ex_ttl: C{int}\n \"\"\"\n container_name = container.name\n headers = {'X-CDN-Enabled': 'True'}\n\n if ex_ttl:\n headers['X-TTL'] = ex_ttl\n\n response = self.connection.request('/%s' % (container_name),\n method='PUT',\n headers=headers,\n cdn_request=True)\n\n return response.status in [httplib.CREATED, httplib.ACCEPTED]\n\n def create_container(self, container_name):\n container_name = self._clean_container_name(container_name)\n response = self.connection.request(\n '/%s' % (container_name), method='PUT')\n\n if response.status == httplib.CREATED:\n # Accepted mean that container is not yet created but it will be\n # eventually\n extra = {'object_count': 0}\n container = Container(name=container_name,\n extra=extra, driver=self)\n\n return container\n elif response.status == httplib.ACCEPTED:\n error = ContainerAlreadyExistsError(None, self, container_name)\n raise error\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def delete_container(self, container):\n name = self._clean_container_name(container.name)\n\n # Only empty container can be deleted\n response = self.connection.request('/%s' % (name), method='DELETE')\n\n if response.status == httplib.NO_CONTENT:\n return True\n elif response.status == httplib.NOT_FOUND:\n raise ContainerDoesNotExistError(value='',\n container_name=name, driver=self)\n elif response.status == httplib.CONFLICT:\n # @TODO: Add \"delete_all_objects\" parameter?\n raise ContainerIsNotEmptyError(value='',\n container_name=name, driver=self)\n\n def download_object(self, obj, destination_path, overwrite_existing=False,\n delete_on_failure=True):\n container_name = obj.container.name\n object_name = obj.name\n response = self.connection.request('/%s/%s' % (container_name,\n object_name),\n method='GET', raw=True)\n\n return self._get_object(\n obj=obj, callback=self._save_object, response=response,\n callback_kwargs={'obj': obj,\n 'response': response.response,\n 'destination_path': destination_path,\n 'overwrite_existing': overwrite_existing,\n 'delete_on_failure': delete_on_failure},\n success_status_code=httplib.OK)\n\n def download_object_as_stream(self, obj, chunk_size=None):\n container_name = obj.container.name\n object_name = obj.name\n response = self.connection.request('/%s/%s' % (container_name,\n object_name),\n method='GET', raw=True)\n\n return self._get_object(obj=obj, callback=read_in_chunks,\n response=response,\n callback_kwargs={'iterator': response.response,\n 'chunk_size': chunk_size},\n success_status_code=httplib.OK)\n\n def upload_object(self, file_path, container, object_name, extra=None,\n verify_hash=True):\n \"\"\"\n Upload an object.\n\n Note: This will override file with a same name if it already exists.\n \"\"\"\n upload_func = self._upload_file\n upload_func_kwargs = {'file_path': file_path}\n\n return self._put_object(container=container, object_name=object_name,\n upload_func=upload_func,\n upload_func_kwargs=upload_func_kwargs,\n extra=extra, file_path=file_path,\n verify_hash=verify_hash)\n\n def upload_object_via_stream(self, iterator,\n container, object_name, extra=None):\n if isinstance(iterator, file):\n iterator = iter(iterator)\n\n upload_func = self._stream_data\n upload_func_kwargs = {'iterator': iterator}\n\n return self._put_object(container=container, object_name=object_name,\n upload_func=upload_func,\n upload_func_kwargs=upload_func_kwargs,\n extra=extra, iterator=iterator)\n\n def delete_object(self, obj):\n container_name = self._clean_container_name(obj.container.name)\n object_name = self._clean_object_name(obj.name)\n\n response = self.connection.request(\n '/%s/%s' % (container_name, object_name), method='DELETE')\n\n if response.status == httplib.NO_CONTENT:\n return True\n elif response.status == httplib.NOT_FOUND:\n raise ObjectDoesNotExistError(value='', object_name=object_name,\n driver=self)\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def ex_get_meta_data(self):\n \"\"\"\n Get meta data\n\n @rtype: C{dict}\n \"\"\"\n response = self.connection.request('', method='HEAD')\n\n if response.status == httplib.NO_CONTENT:\n container_count = response.headers.get(\n 'x-account-container-count', 'unknown')\n object_count = response.headers.get(\n 'x-account-object-count', 'unknown')\n bytes_used = response.headers.get(\n 'x-account-bytes-used', 'unknown')\n temp_url_key = response.headers.get(\n 'x-account-meta-temp-url-key', None)\n\n return {'container_count': int(container_count),\n 'object_count': int(object_count),\n 'bytes_used': int(bytes_used),\n 'temp_url_key': temp_url_key}\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def ex_multipart_upload_object(self, file_path, container, object_name,\n chunk_size=33554432, extra=None,\n verify_hash=True):\n object_size = os.path.getsize(file_path)\n if object_size < chunk_size:\n return self.upload_object(file_path, container, object_name,\n extra=extra, verify_hash=verify_hash)\n\n iter_chunk_reader = FileChunkReader(file_path, chunk_size)\n\n for index, iterator in enumerate(iter_chunk_reader):\n self._upload_object_part(container=container,\n object_name=object_name,\n part_number=index,\n iterator=iterator,\n verify_hash=verify_hash)\n\n return self._upload_object_manifest(container=container,\n object_name=object_name,\n extra=extra,\n verify_hash=verify_hash)\n\n def ex_enable_static_website(self, container, index_file='index.html'):\n \"\"\"\n Enable serving a static website.\n\n @param container: Container instance\n @type container: L{Container}\n\n @param index_file: Name of the object which becomes an index page for\n every sub-directory in this container.\n @type index_file: C{str}\n\n @rtype: C{bool}\n \"\"\"\n container_name = container.name\n headers = {'X-Container-Meta-Web-Index': index_file}\n\n response = self.connection.request('/%s' % (container_name),\n method='POST',\n headers=headers,\n cdn_request=False)\n\n return response.status in [httplib.CREATED, httplib.ACCEPTED]\n\n def ex_set_error_page(self, container, file_name='error.html'):\n \"\"\"\n Set a custom error page which is displayed if file is not found and\n serving of a static website is enabled.\n\n @param container: Container instance\n @type container: L{Container}\n\n @param file_name: Name of the object which becomes the error page.\n @type file_name: C{str}\n\n @rtype: C{bool}\n \"\"\"\n container_name = container.name\n headers = {'X-Container-Meta-Web-Error': file_name}\n\n response = self.connection.request('/%s' % (container_name),\n method='POST',\n headers=headers,\n cdn_request=False)\n\n return response.status in [httplib.CREATED, httplib.ACCEPTED]\n\n def ex_set_account_metadata_temp_url_key(self, key):\n \"\"\"\n Set the metadata header X-Account-Meta-Temp-URL-Key on your Cloud\n Files account.\n\n @param key: X-Account-Meta-Temp-URL-Key\n @type key: C{str}\n\n @rtype: C{bool}\n \"\"\"\n headers = {'X-Account-Meta-Temp-URL-Key': key}\n\n response = self.connection.request('',\n method='POST',\n headers=headers,\n cdn_request=False)\n\n return response.status in [httplib.OK, httplib.NO_CONTENT,\n httplib.CREATED, httplib.ACCEPTED]\n\n def ex_get_object_temp_url(self, obj, method='GET', timeout=60):\n \"\"\"\n Create a temporary URL to allow others to retrieve or put objects\n in your Cloud Files account for as long or as short a time as you\n wish. This method is specifically for allowing users to retrieve\n or update an object.\n\n @param obj: The object that you wish to make temporarily public\n @type obj: L{Object}\n\n @param method: Which method you would like to allow, 'PUT' or 'GET'\n @type method: C{str}\n\n @param timeout: Time (in seconds) after which you want the TempURL\n to expire.\n @type timeout: C{int}\n\n @rtype: C{bool}\n \"\"\"\n self.connection._populate_hosts_and_request_paths()\n expires = int(time() + timeout)\n path = '%s/%s/%s' % (self.connection.request_path,\n obj.container.name, obj.name)\n try:\n key = self.ex_get_meta_data()['temp_url_key']\n assert key is not None\n except Exception:\n raise KeyError('You must first set the ' +\n 'X-Account-Meta-Temp-URL-Key header on your ' +\n 'Cloud Files account using ' +\n 'ex_set_account_metadata_temp_url_key before ' +\n 'you can use this method.')\n hmac_body = '%s\\n%s\\n%s' % (method, expires, path)\n sig = hmac.new(b(key), b(hmac_body), sha1).hexdigest()\n params = urlencode({'temp_url_sig': sig,\n 'temp_url_expires': expires})\n\n temp_url = 'https://%s/%s/%s?%s' %\\\n (self.connection.host + self.connection.request_path,\n obj.container.name, obj.name, params)\n\n return temp_url\n\n def _upload_object_part(self, container, object_name, part_number,\n iterator, verify_hash=True):\n upload_func = self._stream_data\n upload_func_kwargs = {'iterator': iterator}\n part_name = object_name + '/%08d' % part_number\n extra = {'content_type': 'application/octet-stream'}\n\n self._put_object(container=container,\n object_name=part_name,\n upload_func=upload_func,\n upload_func_kwargs=upload_func_kwargs,\n extra=extra, iterator=iterator,\n verify_hash=verify_hash)\n\n def _upload_object_manifest(self, container, object_name, extra=None,\n verify_hash=True):\n extra = extra or {}\n meta_data = extra.get('meta_data')\n\n container_name_cleaned = self._clean_container_name(container.name)\n object_name_cleaned = self._clean_object_name(object_name)\n request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned)\n\n headers = {'X-Auth-Token': self.connection.auth_token,\n 'X-Object-Manifest': '%s/%s/' %\n (container_name_cleaned,\n object_name_cleaned)}\n\n data = ''\n response = self.connection.request(request_path,\n method='PUT', data=data,\n headers=headers, raw=True)\n\n object_hash = None\n\n if verify_hash:\n hash_function = self._get_hash_function()\n hash_function.update(b(data))\n data_hash = hash_function.hexdigest()\n object_hash = response.headers.get('etag')\n\n if object_hash != data_hash:\n raise ObjectHashMismatchError(\n value=('MD5 hash checksum does not match (expected=%s, ' +\n 'actual=%s)') %\n (data_hash, object_hash),\n object_name=object_name, driver=self)\n\n obj = Object(name=object_name, size=0, hash=object_hash, extra=None,\n meta_data=meta_data, container=container, driver=self)\n\n return obj\n\n def _get_more(self, last_key, value_dict):\n container = value_dict['container']\n params = {}\n\n if last_key:\n params['marker'] = last_key\n\n response = self.connection.request('/%s' % (container.name),\n params=params)\n\n if response.status == httplib.NO_CONTENT:\n # Empty or inexistent container\n return [], None, True\n elif response.status == httplib.OK:\n objects = self._to_object_list(json.loads(response.body),\n container)\n\n # TODO: Is this really needed?\n if len(objects) == 0:\n return [], None, True\n\n return objects, objects[-1].name, False\n\n raise LibcloudError('Unexpected status code: %s' % (response.status))\n\n def _put_object(self, container, object_name, upload_func,\n upload_func_kwargs, extra=None, file_path=None,\n iterator=None, verify_hash=True):\n extra = extra or {}\n container_name_cleaned = self._clean_container_name(container.name)\n object_name_cleaned = self._clean_object_name(object_name)\n content_type = extra.get('content_type', None)\n meta_data = extra.get('meta_data', None)\n\n headers = {}\n if meta_data:\n for key, value in list(meta_data.items()):\n key = 'X-Object-Meta-%s' % (key)\n headers[key] = value\n\n request_path = '/%s/%s' % (container_name_cleaned, object_name_cleaned)\n result_dict = self._upload_object(\n object_name=object_name, content_type=content_type,\n upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,\n request_path=request_path, request_method='PUT',\n headers=headers, file_path=file_path, iterator=iterator)\n\n response = result_dict['response'].response\n bytes_transferred = result_dict['bytes_transferred']\n server_hash = result_dict['response'].headers.get('etag', None)\n\n if response.status == httplib.EXPECTATION_FAILED:\n raise LibcloudError(value='Missing content-type header',\n driver=self)\n elif verify_hash and not server_hash:\n raise LibcloudError(value='Server didn\\'t return etag',\n driver=self)\n elif (verify_hash and result_dict['data_hash'] != server_hash):\n raise ObjectHashMismatchError(\n value=('MD5 hash checksum does not match (expected=%s, ' +\n 'actual=%s)') % (result_dict['data_hash'], server_hash),\n object_name=object_name, driver=self)\n elif response.status == httplib.CREATED:\n obj = Object(\n name=object_name, size=bytes_transferred, hash=server_hash,\n extra=None, meta_data=meta_data, container=container,\n driver=self)\n\n return obj\n else:\n # @TODO: Add test case for this condition (probably 411)\n raise LibcloudError('status_code=%s' % (response.status),\n driver=self)\n\n def _clean_container_name(self, name):\n \"\"\"\n Clean container name.\n \"\"\"\n if name.startswith('/'):\n name = name[1:]\n name = urlquote(name)\n\n if name.find('/') != -1:\n raise InvalidContainerNameError(value='Container name cannot'\n ' contain slashes',\n container_name=name, driver=self)\n\n if len(name) > 256:\n raise InvalidContainerNameError(\n value='Container name cannot be longer than 256 bytes',\n container_name=name, driver=self)\n\n return name\n\n def _clean_object_name(self, name):\n name = urlquote(name)\n return name\n\n def _to_container_list(self, response):\n # @TODO: Handle more then 10k containers - use \"lazy list\"?\n containers = []\n\n for container in response:\n extra = {'object_count': int(container['count']),\n 'size': int(container['bytes'])}\n containers.append(Container(name=container['name'], extra=extra,\n driver=self))\n\n return containers\n\n def _to_object_list(self, response, container):\n objects = []\n\n for obj in response:\n name = obj['name']\n size = int(obj['bytes'])\n hash = obj['hash']\n extra = {'content_type': obj['content_type'],\n 'last_modified': obj['last_modified']}\n objects.append(Object(\n name=name, size=size, hash=hash, extra=extra,\n meta_data=None, container=container, driver=self))\n\n return objects\n\n def _headers_to_container(self, name, headers):\n size = int(headers.get('x-container-bytes-used', 0))\n object_count = int(headers.get('x-container-object-count', 0))\n\n extra = {'object_count': object_count,\n 'size': size}\n container = Container(name=name, extra=extra, driver=self)\n return container\n\n def _headers_to_object(self, name, container, headers):\n size = int(headers.pop('content-length', 0))\n last_modified = headers.pop('last-modified', None)\n etag = headers.pop('etag', None)\n content_type = headers.pop('content-type', None)\n\n meta_data = {}\n for key, value in list(headers.items()):\n if key.find('x-object-meta-') != -1:\n key = key.replace('x-object-meta-', '')\n meta_data[key] = value\n\n extra = {'content_type': content_type, 'last_modified': last_modified}\n\n obj = Object(name=name, size=size, hash=etag, extra=extra,\n meta_data=meta_data, container=container, driver=self)\n return obj\n\n def _ex_connection_class_kwargs(self):\n return self.openstack_connection_kwargs()\n\n\nclass CloudFilesUSStorageDriver(CloudFilesStorageDriver):\n \"\"\"\n Cloudfiles storage driver for the US endpoint.\n \"\"\"\n\n type = Provider.CLOUDFILES_US\n name = 'CloudFiles (US)'\n connectionCls = CloudFilesUSConnection\n\n\nclass CloudFilesSwiftStorageDriver(CloudFilesStorageDriver):\n \"\"\"\n Cloudfiles storage driver for the OpenStack Swift.\n \"\"\"\n type = Provider.CLOUDFILES_SWIFT\n name = 'CloudFiles (SWIFT)'\n connectionCls = CloudFilesSwiftConnection\n\n def __init__(self, *args, **kwargs):\n self._ex_region_name = kwargs.get('ex_region_name', 'RegionOne')\n super(CloudFilesSwiftStorageDriver, self).__init__(*args, **kwargs)\n\n def openstack_connection_kwargs(self):\n rv = super(CloudFilesSwiftStorageDriver,\n self).openstack_connection_kwargs()\n rv['ex_region_name'] = self._ex_region_name\n return rv\n\n\nclass CloudFilesUKStorageDriver(CloudFilesStorageDriver):\n \"\"\"\n Cloudfiles storage driver for the UK endpoint.\n \"\"\"\n\n type = Provider.CLOUDFILES_UK\n name = 'CloudFiles (UK)'\n connectionCls = CloudFilesUKConnection\n\n\nclass FileChunkReader(object):\n def __init__(self, file_path, chunk_size):\n self.file_path = file_path\n self.total = os.path.getsize(file_path)\n self.chunk_size = chunk_size\n self.bytes_read = 0\n self.stop_iteration = False\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.stop_iteration:\n raise StopIteration\n\n start_block = self.bytes_read\n end_block = start_block + self.chunk_size\n if end_block >= self.total:\n end_block = self.total\n self.stop_iteration = True\n self.bytes_read += end_block - start_block\n return ChunkStreamReader(file_path=self.file_path,\n start_block=start_block,\n end_block=end_block,\n chunk_size=8192)\n\n def __next__(self):\n return self.next()\n\n\nclass ChunkStreamReader(object):\n def __init__(self, file_path, start_block, end_block, chunk_size):\n self.fd = open(file_path, 'rb')\n self.fd.seek(start_block)\n self.start_block = start_block\n self.end_block = end_block\n self.chunk_size = chunk_size\n self.bytes_read = 0\n self.stop_iteration = False\n\n def __iter__(self):\n return self\n\n def next(self):\n if self.stop_iteration:\n self.fd.close()\n raise StopIteration\n\n block_size = self.chunk_size\n if self.bytes_read + block_size >\\\n self.end_block - self.start_block:\n block_size = self.end_block - self.start_block - self.bytes_read\n self.stop_iteration = True\n\n block = self.fd.read(block_size)\n self.bytes_read += block_size\n return block\n\n def __next__(self):\n return self.next()\n", "repo_name": "ConPaaS-team/conpaas", "sub_path": "conpaas-director/cpsdirector/iaas/libcloud/storage/drivers/cloudfiles.py", "file_name": "cloudfiles.py", "file_ext": "py", "file_size_in_byte": 32660, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "libcloud.utils.py3.PY3", "line_number": 18, "usage_type": "name"}, {"api_name": "libcloud.common.base.Response", "line_number": 44, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 45, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 45, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.CONFLICT", "line_number": 45, "usage_type": "attribute"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 60, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "libcloud.common.types.MalformedResponseError", "line_number": 70, "usage_type": "call"}, {"api_name": "libcloud.common.base.RawResponse", "line_number": 81, "usage_type": "name"}, {"api_name": "libcloud.common.openstack.OpenStackBaseConnection", "line_number": 85, "usage_type": "name"}, {"api_name": "libcloud.common.rackspace.AUTH_URL_US", "line_number": 90, "usage_type": "name"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 130, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 136, "usage_type": "call"}, {"api_name": "libcloud.common.rackspace.AUTH_URL_US", "line_number": 163, "usage_type": "name"}, {"api_name": "libcloud.common.rackspace.AUTH_URL_UK", "line_number": 171, "usage_type": "name"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 196, "usage_type": "call"}, {"api_name": "libcloud.storage.base.StorageDriver", "line_number": 199, "usage_type": "name"}, {"api_name": "libcloud.common.openstack.OpenStackDriverMixin", "line_number": 199, "usage_type": "name"}, {"api_name": "libcloud.common.openstack.OpenStackDriverMixin.__init__", "line_number": 214, "usage_type": "call"}, {"api_name": "libcloud.common.openstack.OpenStackDriverMixin", "line_number": 214, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 220, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 220, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 222, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 222, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 223, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 225, "usage_type": "call"}, {"api_name": "libcloud.common.types.LazyList", "line_number": 229, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 235, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 235, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 239, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 239, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ContainerDoesNotExistError", "line_number": 240, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 242, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 249, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 249, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 249, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 253, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 253, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ObjectDoesNotExistError", "line_number": 254, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 256, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 264, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 264, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 267, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 267, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ContainerDoesNotExistError", "line_number": 268, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 272, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 296, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 296, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.ACCEPTED", "line_number": 296, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 303, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 303, "usage_type": "name"}, {"api_name": "libcloud.storage.base.Container", "line_number": 307, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.ACCEPTED", "line_number": 311, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 311, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ContainerAlreadyExistsError", "line_number": 312, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 315, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 323, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 323, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 325, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 325, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ContainerDoesNotExistError", "line_number": 326, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.CONFLICT", "line_number": 328, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 328, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ContainerIsNotEmptyError", "line_number": 330, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 348, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 348, "usage_type": "name"}, {"api_name": "libcloud.utils.files.read_in_chunks", "line_number": 357, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 361, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 361, "usage_type": "name"}, {"api_name": "io.FileIO", "line_number": 381, "usage_type": "argument"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 399, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 399, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NOT_FOUND", "line_number": 401, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 401, "usage_type": "name"}, {"api_name": "libcloud.storage.types.ObjectDoesNotExistError", "line_number": 402, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 405, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 415, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 415, "usage_type": "name"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 430, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 435, "usage_type": "call"}, {"api_name": "os.path", "line_number": 435, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 475, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 475, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.ACCEPTED", "line_number": 475, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 498, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 498, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.ACCEPTED", "line_number": 498, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 517, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 517, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 517, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 518, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 518, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.ACCEPTED", "line_number": 518, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 540, "usage_type": "call"}, {"api_name": "hmac.new", "line_number": 553, "usage_type": "call"}, {"api_name": "hashlib.sha1", "line_number": 553, "usage_type": "argument"}, {"api_name": "libcloud.utils.py3.b", "line_number": 553, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.urlencode", "line_number": 554, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.b", "line_number": 600, "usage_type": "call"}, {"api_name": "libcloud.storage.types.ObjectHashMismatchError", "line_number": 605, "usage_type": "call"}, {"api_name": "libcloud.storage.base.Object", "line_number": 611, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.NO_CONTENT", "line_number": 626, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 626, "usage_type": "name"}, {"api_name": "libcloud.utils.py3.httplib.OK", "line_number": 629, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 629, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 630, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 639, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.EXPECTATION_FAILED", "line_number": 667, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 667, "usage_type": "name"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 668, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 671, "usage_type": "call"}, {"api_name": "libcloud.storage.types.ObjectHashMismatchError", "line_number": 674, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.httplib.CREATED", "line_number": 678, "usage_type": "attribute"}, {"api_name": "libcloud.utils.py3.httplib", "line_number": 678, "usage_type": "name"}, {"api_name": "libcloud.storage.base.Object", "line_number": 679, "usage_type": "call"}, {"api_name": "libcloud.common.types.LibcloudError", "line_number": 687, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.urlquote", "line_number": 696, "usage_type": "call"}, {"api_name": "libcloud.storage.types.InvalidContainerNameError", "line_number": 699, "usage_type": "call"}, {"api_name": "libcloud.storage.types.InvalidContainerNameError", "line_number": 704, "usage_type": "call"}, {"api_name": "libcloud.utils.py3.urlquote", "line_number": 711, "usage_type": "call"}, {"api_name": "libcloud.storage.base.Container", "line_number": 721, "usage_type": "call"}, {"api_name": "libcloud.storage.base.Object", "line_number": 735, "usage_type": "call"}, {"api_name": "libcloud.storage.base.Container", "line_number": 747, "usage_type": "call"}, {"api_name": "libcloud.storage.base.Object", "line_number": 764, "usage_type": "call"}, {"api_name": "libcloud.storage.providers.Provider.CLOUDFILES_US", "line_number": 777, "usage_type": "attribute"}, {"api_name": "libcloud.storage.providers.Provider", "line_number": 777, "usage_type": "name"}, {"api_name": "libcloud.storage.providers.Provider.CLOUDFILES_SWIFT", "line_number": 786, "usage_type": "attribute"}, {"api_name": "libcloud.storage.providers.Provider", "line_number": 786, "usage_type": "name"}, {"api_name": "libcloud.storage.providers.Provider.CLOUDFILES_UK", "line_number": 806, "usage_type": "attribute"}, {"api_name": "libcloud.storage.providers.Provider", "line_number": 806, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 814, "usage_type": "call"}, {"api_name": "os.path", "line_number": 814, "usage_type": "attribute"}]} +{"seq_id": "7507196809", "text": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = ['Click>=7.0', ]\n\ntest_requirements = ['pytest>=3', ]\n\nsetup(\n author=\"Robert G Hennessy\",\n author_email='robertghennessy@gmail.com',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Send out notifications for delays and next bbus or train arrival for transit operators using SIRI API (ex. Caltrain and SFMTA)\",\n entry_points={\n 'console_scripts': [\n 'transit_notification=transit_notification.cli:main',\n ],\n },\n install_requires=requirements,\n license=\"GNU General Public License v3\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='transit_notification',\n name='transit_notification',\n packages=find_packages(include=['transit_notification', 'transit_notification.*']),\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/robertghennessy/transit_notification',\n version='0.1.0',\n zip_safe=False,\n)\n", "repo_name": "robertghennessy/transit_notification", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1617, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "setuptools.setup", "line_number": 17, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "9469632474", "text": "from docplex.mp.model import Model\nfrom collections import namedtuple\nfrom sys import stdout\n\n# Initializing data\n# Cloud (c0) and cloudlets\n# units: storage(MB), cpu(MIPS), RAM(MB)\nCLOUDLETS = [\n (\"C0\", 500000, 3000000, 120000),\n (\"C1\", 510, 3100, 420),\n (\"C2\", 520, 3100, 420),\n (\"C3\", 530, 5300, 680)\n]\n\n# units: storage(MB), cpu(MIPS), RAM(MB), delayThreshold(ms)\nVMs = [\n (\"V1\", 'U1', 150, 2100, 120, 5000),\n (\"V2\", 'U1', 150, 2200, 220, 5000),\n (\"V3\", 'U1', 170, 2300, 320, 5000),\n (\"V4\", 'U1', 180, 2400, 420, 5000),\n (\"V5\", 'U1', 190, 2500, 520, 5000)\n]\n\n# Data tuple\nCloudlet = namedtuple(\"Cloudlet\", [\"c_name\", \"c_storage\", \"c_CPU\", \"c_RAM\"])\nVM = namedtuple(\"VM\", [\"v_name\", \"v_user\",\"v_storage\", \"v_CPU\", \"v_RAM\", \"v_delayTreshold\"])\n\n# COnnecting the data with the tuple\ncloudlets = [Cloudlet(*c) for c in CLOUDLETS]\nvms = [VM(*v) for v in VMs]\nidx = [(n.c_name,v.v_name) for n in cloudlets for v in vms]\n\nmodl = Model(\"Cloudlet-VM Allocation\")\n\n# creating variables\nx = modl.binary_var_dict(idx, name=\"allocate\")\n\n# storage contraint\nfor n in range(0, len(cloudlets)):\n modl.add_constraint(\n modl.sum(\n vms[v].v_storage*x[cloudlets[n].c_name, vms[v].v_name] for v in range(0, len(vms))\n ) <= cloudlets[n].c_storage\n , ctname=\"storageConstr\")\n\n# cpu contraint\nfor n in range(0, len(cloudlets)):\n modl.add_constraint(\n modl.sum(\n vms[v].v_CPU*x[cloudlets[n].c_name, vms[v].v_name] for v in range(0, len(vms))\n ) <= cloudlets[n].c_CPU\n , ctname=\"cpuConstr\")\n\n# ram contraint\nfor n in range(0, len(cloudlets)):\n modl.add_constraint(\n modl.sum(\n vms[v].v_RAM*x[cloudlets[n].c_name, vms[v].v_name] for v in range(0, len(vms))\n ) <= cloudlets[n].c_RAM\n , ctname=\"ramConstr\")\n\n# allocation constraint: a VM must be allocated (even in cloud), \n# but only in one place (i.e., a VM must not be allocated in two places)\nfor v in range(0, len(vms)):\n modl.add_constraint(\n modl.sum(\n x[cloudlets[n].c_name, vms[v].v_name] for n in range(0, len(cloudlets))\n ) == 1\n , ctname=\"AllocationConst\")\n\n# objective function\nmodl.minimize(modl.sum(x[cloudlets[0].c_name, vms[v].v_name] for v in range(0, len(vms))))\n\n# export the Linear Program model and solve\nmodl.export_as_lp(\"/home/jps/pli/cplex\")\nsolution = modl.solve()\n\n# print solution\nmodl.print_information()\nmodl.print_solution()", "repo_name": "jpfsilvaa/AllocationILP", "sub_path": "cplex/p1_cplex.py", "file_name": "p1_cplex.py", "file_ext": "py", "file_size_in_byte": 2492, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "collections.namedtuple", "line_number": 25, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 26, "usage_type": "call"}, {"api_name": "docplex.mp.model.Model", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "20058510734", "text": "\nimport numpy as np\nimport argparse\nimport cv2\nimport os\n\n# Bağımsız komut satırı argümanları belirlenir\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"programa girilen resim dizinine giden yol\")\nap.add_argument(\"-f\", \"--face\", required=True, help=\"yüz dedektörü model dizinine giden yol\")\nap.add_argument(\"-a\", \"--age\", required=True, help=\"yaş dedektörü model dizinine giden yol\")\nap.add_argument(\"-g\", \"--gender\", required=True, help=\"cinsiyet dedektörü model dizinine giden yol\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5, help=\"zayıf tespitleri filtreleme için minimum olasılık\")\nargs = vars(ap.parse_args())\n\n# Dedektörün tahmin edeceği yaş sınıfları tanımlanır\nAGE_BUCKETS = [\"(0-2)\", \"(4-6)\", \"(8-12)\", \"(15-20)\", \"(25-32)\", \"(38-43)\", \"(48-53)\", \"(60-100)\"]\nGENDER_BUCKETS = [\"Kadin\", \"Erkek\"]\n\n# Modeller yüklenir\nprint(\"[BILGI] Yüz dedektör modeli yükleniyor...\")\nprototxtPath = os.path.sep.join([args[\"face\"], \"deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"face\"], \"res10_300x300_ssd_iter_140000.caffemodel\"])\nfaceNet = cv2.dnn.readNet(prototxtPath, weightsPath)\nprint(\"[BILGI] Yaş dedektör modeli yükleniyor...\")\nprototxtPath = os.path.sep.join([args[\"age\"], \"age_deploy.prototxt\"])\nweightsPath = os.path.sep.join([args[\"age\"], \"age_net.caffemodel\"])\nageNet = cv2.dnn.readNet(prototxtPath, weightsPath)\nprint(\"[BILGI] Cinsiyet dedektör modeli yükleniyor...\")\nprototxtPath = os.path.sep.join([args[\"gender\"], \"gender.prototxt\"])\nweightsPath = os.path.sep.join([args[\"gender\"], \"gender_net.caffemodel\"])\ngenderNet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n# Resim yüklenir ve resim için bir input blobu oluşturulur\nimage = cv2.imread(args[\"image\"])\n(h, w) = image.shape[:2]\nblob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n# Blob ağa verilir ve yüz tespit edilir\nprint(\"[BILGI] Yüz algılama...\")\nfaceNet.setInput(blob)\ndetections = faceNet.forward()\n\n# Bulununan yüz döngüye verilir\nfor i in range(0, detections.shape[2]):\n\t# tahminle ilişkili güven (yani olasılık) çıkarılır\n\tconfidence = detections[0, 0, i, 2]\n\n\t# Güvenin(confidence) minimum güvenden daha yüksek olması sağlanarak zayıf algılamalar filtrelenir\n\tif confidence > args[\"confidence\"]:\n\t\t# Nesne için sınırla kutunun (x, y) koordinatlarını hesaplayın\n\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t# Yüzün ROI'sini çıkarılır ve yüz ROI'sinden bir blob oluşturulur\n\t\tface = image[startY:endY, startX:endX]\n\t\tfaceBlob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), (78.4263377603, 87.7689143744, 114.895847746), swapRB=False)\n\t\tglobal j\n\t\tj = i\n\t\t# Sınıflara ait tahminler bulunur ve en büyük olasılığa sahip yaş ve cinsiyet sınıfları belirlenir\n\t\tageNet.setInput(faceBlob)\n\t\tpredsA = ageNet.forward()\n\t\ti = predsA[0].argmax()\n\t\tage = AGE_BUCKETS[i]\n\t\tageConfidence = predsA[0][i]\n\t\tgenderNet.setInput(faceBlob)\n\t\tpredsG = genderNet.forward()\n\t\tj = predsG[0].argmax()\n\t\tgender = GENDER_BUCKETS[j]\n\t\tgenderConfidence = predsG[0][j]\n\n\t\t# Tahminler terminale yazdırılır\n\t\ttext = \"{}: {:.2f}%\".format(age, ageConfidence * 100) + \" \" + \"{}: {:.2f}%\".format(gender, genderConfidence * 100)\n\t\tprint(\"[BILGI] {}\".format(text))\n\n\t\t# İlgili sınıflar yazdırılır ve bulunan yüzün kutu sınırları çizilir\n\t\ty = startY - 20 if startY - 20 > 20 else startY + 20\n\t\tcv2.rectangle(image, (startX, startY), (endX, endY), (255, 255, 0), 3)\n\t\tcv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.85, (255, 255, 0), 2)\n\n# Çıktı(output) resmi ekranda gösterilir\ncv2.imshow(\"Image\", image)\ncv2.waitKey(0)", "repo_name": "fadimanakilci/Age-And-Gender-Prediction", "sub_path": "detect_age.py", "file_name": "detect_age.py", "file_ext": "py", "file_size_in_byte": 3739, "program_lang": "python", "lang": "tr", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.sep.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.dnn.readNet", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "cv2.dnn.readNet", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.sep.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cv2.dnn.readNet", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "38325039237", "text": "# -*- coding: utf-8 -*-\n#\n# This file is part of the OpenHandWrite project software.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\n\nfrom psychopy import gui\nfrom psychopy.iohub.client import launchHubServer\nfrom constants import *\n\n#\n### Misc. utility functions used by the python experiment template.\n#\n\ndef getImageFilePath(file_name):\n '''\n Returns the full, absolute, path to the image named file_name. file_name\n must be an image file located in the resources\\image folder of the project.\n If the file can not be found, None is returned.\n\n :param file_name: image file name\n :return: full path to image file in project, or None if file not found.\n '''\n pth = os.path.join(IMAGE_FOLDER,file_name)\n if os.path.exists(pth):\n return pth\n return None\n\ndef getAudioFilePath(file_name):\n '''\n Returns the full, absolute, path to the audio file named file_name.\n file_name must be an audio file located in the resources\\audio\n folder of the project. If the file can not be found, None is returned.\n\n :param file_name: audio file name\n :return: full path to audio file in project, or None if file not found.\n '''\n pth = os.path.join(AUDIO_FOLDER,file_name)\n if os.path.exists(pth):\n return pth\n return None\n\ndef getAvailableConditionsFileNames():\n '''\n Return a list of all .xlsx experiment condition file names that are in the\n projects conditions subfolder.\n :return: list of condition file name str\n '''\n if os.path.exists(CONDITIONS_FOLDER):\n import glob\n cvfile_paths = glob.glob(CONDITIONS_FOLDER+os.path.sep+'*.xlsx')\n return [ os.path.split(fpath)[1] for fpath in glob.glob(CONDITIONS_FOLDER+os.path.sep+'*.xlsx')]\n return []\n\ndef isImageFileCandidate(file_name):\n '''\n Returns True if the file_name str should be considered an image file name\n for use by an image stim graphic in the experiment. Otherwise returns False.\n :param file_name: candidate image name string\n :return: boolean\n '''\n try:\n fname, fext = file_name.rsplit('.')\n if fext in ACCEPTED_IMAGE_FORMATS:\n return True\n return False\n except:\n return False\n\ndef showSessionInfoDialog():\n '''\n Display a dialog to collect session or participant level information\n at the start of an experiment.\n\n If the dialog OK button is pressed, a dictionary with the values entered\n for each dialog input is returned. If thew dialogs Cancel button is pressed,\n None is returned.\n :return: dict of session info, or None if dialog was cancelled\n '''\n info = OrderedDict()\n info['Session Code'] = DEFAULT_SESSION_CODE\n info['Conditions File'] = getAvailableConditionsFileNames()\n# info['ExpName'] =EXP_NAME\n# info['ExpVersion'] = EXP_VERSION\n infoDlg = gui.DlgFromDict(dictionary=info,\n title='{} (v{})'.format(EXP_NAME, EXP_VERSION),\n order = info.keys(),\n )\n # fixed=['ExpName','ExpVersion'])\n if infoDlg.OK:\n return info\n return None\n\ndef start_iohub(sess_info):\n '''\n Starts the iohub server process, using data from the dict returned by\n showSessionInfoDialog() to create the hdf5 file name. If the file\n already exists, the existing file is renamed so that it is not\n overwritten by the current sessions data.\n\n iohub device configuration information is read from an\n 'iohub_config.yaml' file which must be in the same folder as this file.\n\n The created ioHubConnection object is returned after the iohub\n server has started and is ready for interaction with the experiment\n runtime.\n\n :param sess_info: dict returned from showSessionInfoDialog()\n :return: ioHubConnection object\n '''\n import os, shutil\n\n save_to = os.path.join(os.path.dirname(__file__),u'results',\n sess_info['Session Code'])\n save_to = os.path.normpath(save_to)\n if not save_to.endswith('.hdf5'):\n save_to = save_to+u'.hdf5'\n\n fdir, sess_code = os.path.split(save_to)\n if not os.path.exists(fdir):\n os.mkdir(fdir)\n\n #TODO: Ask if file should be overwritten, or new session code entered.\n si = 1\n save_dest = save_to\n while os.path.exists(save_dest):\n sname, sext = sess_code.rsplit(u'.',1)\n save_dest = os.path.join(fdir, u\"{}_{}.{}\".format(sname,si,sext))\n si+=1\n\n if save_dest is not save_to:\n shutil.move(save_to,save_dest)\n\n sess_code=sess_code[0:min(len(sess_code),24)]\n if sess_code.endswith(u'.hdf5'):\n sess_code = sess_code[:-5]\n if save_to.endswith(u'.hdf5'):\n save_to = save_to[:-5]\n\n kwargs={'experiment_code':EXP_NAME,\n 'session_code':sess_code,\n 'datastore_name':save_to,\n 'iohub_config_name': 'iohub_config.yaml'\n }\n return launchHubServer(**kwargs)\n\ndef saveWintabDeviceHardwareInfo(io):\n '''\n Save all available wintab device hardware information to the sessions .hdf5\n file as a series of experiment message events. This function is called at\n the start of the experiment, after the start_iohub() function has returned\n the created iohub connection object.\n\n The following areas of information are saved:\n\n * wintab device hardware model information\n * the availability, data range, etc, for each axis of the wintab device\n * wintab context values read from the C CONTEXT struct at device init\n\n :param io: ioHubConnection instance\n :return: None\n '''\n wtdev = io.devices.tablet\n\n io.sendMessageEvent(text=\"START WINTAB HW MODEL INFO\")\n for k, v in wtdev.model.items():\n io.sendMessageEvent(text=\"{}: {}\".format(k,v))\n io.sendMessageEvent(text=\"STOP WINTAB HW MODEL INFO\")\n\n io.sendMessageEvent(text=\"START WINTAB AXIS INFO\")\n for axname, axinfo in wtdev.axis.items():\n io.sendMessageEvent(text=\"{} Axis:\".format(axname))\n for k, v in axinfo.items():\n io.sendMessageEvent(text=\"{}: {}\".format(k,v))\n io.sendMessageEvent(text=\"END WINTAB AXIS INFO\")\n\n io.sendMessageEvent(text=\"START WINTAB CONTEXT INFO\")\n for k, v in wtdev.context.items():\n io.sendMessageEvent(text=\"{}: {}\".format(k,v))\n io.sendMessageEvent(text=\"END WINTAB CONTEXT INFO\")\n\n", "repo_name": "isolver/OpenHandWrite", "sub_path": "distribution/getwrite/experiments/ExperimentTemplate/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 7017, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 11, "dataset": "github-code", "pt": "50", "api": [{"api_name": "glob.glob", "line_number": 66, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 67, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 95, "usage_type": "call"}, {"api_name": "psychopy.gui.DlgFromDict", "line_number": 100, "usage_type": "call"}, {"api_name": "psychopy.gui", "line_number": 100, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 147, "usage_type": "call"}, {"api_name": "psychopy.iohub.client.launchHubServer", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "6312371323", "text": "import demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\nfrom typing import Tuple\n\nBELOW_THRESHOLD_ITEMS_CONTEXT_PATH = 'LowerSimilarityIncidents'\nABOVE_THE_THRESHOLD_ITEMS_CONTEXT_PATH = 'incidents'\n\n\ndef save_to_context(items: list, context_path: str, delete_existing: bool = False, is_sub_playbook: str = 'auto',\n table_header='Incidents Result'):\n if delete_existing:\n res = demisto.executeCommand('DeleteContext', {\"key\": context_path, \"subplaybook\": is_sub_playbook})\n if is_error(res):\n return_error('Failed to delete current context. Error details:\\n{}'.format(get_error(res)))\n\n return CommandResults(\n outputs_prefix=context_path,\n outputs=items,\n readable_output=tableToMarkdown(table_header, items))\n\n\ndef _get_incident_campaign(_id: int):\n res = demisto.executeCommand('getIncidents', {'id': _id})\n\n if is_error(res):\n return\n\n res_custom_fields = res[0]['Contents']['data'][0]['CustomFields']\n return res_custom_fields['partofcampaign'] if 'partofcampaign' in res_custom_fields else None\n\n\ndef filter_by_threshold(context: list, threshold: float) -> Tuple[list, list]:\n low = []\n high = []\n for item in context:\n if item.get('similarity') >= threshold:\n high.append(item)\n else:\n campaign = _get_incident_campaign(item['id'])\n if campaign:\n high.append(item)\n else:\n low.append(item)\n return low, high\n\n\ndef main():\n input_args = demisto.args()\n # If user did not provide a lower threshold then split is not needed.\n threshold = input_args.get('similarity_threshold')\n\n try:\n threshold = float(threshold)\n except ValueError as e:\n raise DemistoException(f'Could not use threshold: {threshold}. Error: {e}')\n\n root_context_path = 'EmailCampaign'\n above_threshold_context_path = f'{root_context_path}.{ABOVE_THE_THRESHOLD_ITEMS_CONTEXT_PATH}'\n below_threshold_context_path = f'{root_context_path}.{BELOW_THRESHOLD_ITEMS_CONTEXT_PATH}'\n context = demisto.get(demisto.context(), f'{above_threshold_context_path}')\n\n # If there are no incident to split\n if not context:\n return\n only_lower_values, only_higher_values = filter_by_threshold(context, threshold)\n result = []\n result.append(save_to_context(only_lower_values, below_threshold_context_path,\n table_header='Low Similarity Incidents Result'))\n result.append(save_to_context(only_higher_values, above_threshold_context_path, True,\n table_header='High Similarity Incidents Result'))\n return_results(result)\n\n\nif __name__ in ['__main__', '__builtin__', 'builtins']:\n main()\n", "repo_name": "demisto/content", "sub_path": "Packs/Campaign/Scripts/SplitCampaignContext/SplitCampaignContext.py", "file_name": "SplitCampaignContext.py", "file_ext": "py", "file_size_in_byte": 2801, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1023, "dataset": "github-code", "pt": "50", "api": [{"api_name": "demistomock.executeCommand", "line_number": 12, "usage_type": "call"}, {"api_name": "demistomock.executeCommand", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 32, "usage_type": "name"}, {"api_name": "demistomock.args", "line_number": 48, "usage_type": "call"}, {"api_name": "demistomock.get", "line_number": 60, "usage_type": "call"}, {"api_name": "demistomock.context", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "4390221181", "text": "#!/usr/bin/env python\n# coding: utf-8\n\nimport paramiko\nimport numpy as np\nimport pandas as pd\nimport os\nimport time\nimport io\n\nprint(paramiko.__version__)\n\n\ndef waitStrems(chan): \n time.sleep(1) \n outdata=errdata = \"\" \n while chan.recv_ready(): \n outdata += str(chan.recv(1000).decode('utf-8', 'ignore')) \n while chan.recv_stderr_ready(): \n errdata += str(chan.recv_stderr(1000).decode('utf-8', 'ignore')) \n return outdata, errdata\n\n\ntry :\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect('IP', username='ID', password='Password'\n , key_filename='pub file Path' , look_for_keys=False, allow_agent=False)\n print('ssh connected.')\n \n # session 사용\n se1 = ssh.invoke_shell()\n se1.settimeout(9999)\n se1.send('cd bin\\n')\n se1.send('pwd\\n')\n se1.send('ls\\n')\n outdata, errdata = waitStrems(se1)\n# print(outdata)\n \n se1.send(conv_str)\n status='Normal'\n while status!='End':\n time.sleep(1)\n resp = str(se1.recv(100000).decode('utf-8', 'ignore'))\n print(resp)\n if resp.count(input_file) > 1:\n status='End'\n\n print('Thank you!') \n\n ssh.close()\nexcept Exception as err:\n print(err)\n\n# ftp 사용(파일 업로드)\nimport ftplib\n\nftp = ftplib.FTP()\nftp.connect(host='IP')\nftp.encoding='인코딩'\nftp.decode='디코딩'\nftp.login(user='ID', passwd = 'Password')\nftp.retrlines('LIST')\nfiles = ftp.nlst()\nprint(files)\n\n", "repo_name": "Ockddak/studyPython", "sub_path": "기타/sshConnect.py", "file_name": "sshConnect.py", "file_ext": "py", "file_size_in_byte": 1511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "paramiko.__version__", "line_number": 11, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 25, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "ftplib.FTP", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "23983158860", "text": "from django.test import SimpleTestCase, TestCase\nfrom django.test.client import Client\nfrom .views import job_detail\nfrom .models import Job\nfrom user.models import CustomUser\nfrom django.urls import reverse\n\n\nclass SetupClass(TestCase):\n\t\"\"\"generate testuser for the test client\"\"\"\n\tdef setUp(self):\n\t\tself.user = CustomUser.objects.create_user(\n\t\t\t'testuser1',\n\t\t\t'test@email.com',\n\t\t\t'testpassword1'\n\t\t)\n\t\tself.client = Client()\n\t\tself.client.login(username = 'testuser1', password = 'testpassword1')\n\nclass JobViewsTest(SetupClass):\n\t\"\"\" Test if Job views return the correct views \n\tand templates \"\"\"\n\tdef test_job_list_view(self):\n\t\tresponse = self.client.get(reverse('job_list'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_list.html')\n\n\tdef test_job_detail_view(self):\n\t\tjob_detail = Job.objects.create()\n\t\tresponse = self.client.get(reverse('job_detail', args=[job_detail.id]))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_detail.html')\n\n\tdef test_job_list_open(self):\n\t\tresponse = self.client.get(reverse('job_list_open'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_list_open.html')\n\n\tdef test_job_list_closed(self):\n\t\tresponse = self.client.get(reverse('job_list_closed'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_list_closed.html')\n\n\tdef test_job_create(self):\n\t\tresponse = self.client.get(reverse('job_create'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_create.html')\n\n\tdef test_job_update(self):\n\t\t\"\"\"View contains if job.author != request.user\"\"\"\n\t\t# create job object with self,user instance as author\n\t\tjob = Job.objects.create(author = self.user)\n\t\tresponse = self.client.get(reverse('job_update', args=[job.id]))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertTemplateUsed(response, 'jobs/job_update.html')\n\n", "repo_name": "DominicH247/bbm-job-tracker", "sub_path": "jobs/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1985, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.test.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "user.models.CustomUser.objects.create_user", "line_number": 12, "usage_type": "call"}, {"api_name": "user.models.CustomUser.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "user.models.CustomUser", "line_number": 12, "usage_type": "name"}, {"api_name": "django.test.client.Client", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 24, "usage_type": "call"}, {"api_name": "views.job_detail", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Job.objects.create", "line_number": 29, "usage_type": "call"}, {"api_name": "models.Job.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Job", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 30, "usage_type": "call"}, {"api_name": "views.job_detail.id", "line_number": 30, "usage_type": "attribute"}, {"api_name": "views.job_detail", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Job.objects.create", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Job.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Job", "line_number": 52, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "31032634812", "text": "import argparse\nimport datetime as dt\nimport os\nimport warnings\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport soundfile as sf\nimport torch\n\nfrom matcha.hifigan.config import v1\nfrom matcha.hifigan.denoiser import Denoiser\nfrom matcha.hifigan.env import AttrDict\nfrom matcha.hifigan.models import Generator as HiFiGAN\nfrom matcha.models.matcha_tts import MatchaTTS\nfrom matcha.text import sequence_to_text, text_to_sequence\nfrom matcha.utils.utils import assert_model_downloaded, get_user_data_dir, intersperse\n\nMATCHA_URLS = {\n \"matcha_ljspeech\": \"https://drive.google.com/file/d/1BBzmMU7k3a_WetDfaFblMoN18GqQeHCg/view?usp=drive_link\",\n \"matcha_vctk\": \"https://drive.google.com/file/d/1enuxmfslZciWGAl63WGh2ekVo00FYuQ9/view?usp=drive_link\",\n}\n\nVOCODER_URLS = {\n \"hifigan_T2_v1\": \"https://drive.google.com/file/d/14NENd4equCBLyyCSke114Mv6YR_j_uFs/view?usp=drive_link\",\n \"hifigan_univ_v1\": \"https://drive.google.com/file/d/1qpgI41wNXFcH-iKq1Y42JlBC9j0je8PW/view?usp=drive_link\",\n}\n\nMULTISPEAKER_MODEL = {\n \"matcha_vctk\": {\"vocoder\": \"hifigan_univ_v1\", \"speaking_rate\": 0.85, \"spk\": 0, \"spk_range\": (0, 107)}\n}\n\nSINGLESPEAKER_MODEL = {\"matcha_ljspeech\": {\"vocoder\": \"hifigan_T2_v1\", \"speaking_rate\": 0.95, \"spk\": None}}\n\n\ndef plot_spectrogram_to_numpy(spectrogram, filename):\n fig, ax = plt.subplots(figsize=(12, 3))\n im = ax.imshow(spectrogram, aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.colorbar(im, ax=ax)\n plt.xlabel(\"Frames\")\n plt.ylabel(\"Channels\")\n plt.title(\"Synthesised Mel-Spectrogram\")\n fig.canvas.draw()\n plt.savefig(filename)\n\n\ndef process_text(i: int, text: str, device: torch.device):\n print(f\"[{i}] - Input text: {text}\")\n x = torch.tensor(\n intersperse(text_to_sequence(text, [\"english_cleaners2\"]), 0),\n dtype=torch.long,\n device=device,\n )[None]\n x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device)\n x_phones = sequence_to_text(x.squeeze(0).tolist())\n print(f\"[{i}] - Phonetised text: {x_phones[1::2]}\")\n\n return {\"x_orig\": text, \"x\": x, \"x_lengths\": x_lengths, \"x_phones\": x_phones}\n\n\ndef get_texts(args):\n if args.text:\n texts = [args.text]\n else:\n with open(args.file) as f:\n texts = f.readlines()\n return texts\n\n\ndef assert_required_models_available(args):\n save_dir = get_user_data_dir()\n if not hasattr(args, \"checkpoint_path\") and args.checkpoint_path is None:\n model_path = args.checkpoint_path\n else:\n model_path = save_dir / f\"{args.model}.ckpt\"\n assert_model_downloaded(model_path, MATCHA_URLS[args.model])\n\n vocoder_path = save_dir / f\"{args.vocoder}\"\n assert_model_downloaded(vocoder_path, VOCODER_URLS[args.vocoder])\n return {\"matcha\": model_path, \"vocoder\": vocoder_path}\n\n\ndef load_hifigan(checkpoint_path, device):\n h = AttrDict(v1)\n hifigan = HiFiGAN(h).to(device)\n hifigan.load_state_dict(torch.load(checkpoint_path, map_location=device)[\"generator\"])\n _ = hifigan.eval()\n hifigan.remove_weight_norm()\n return hifigan\n\n\ndef load_vocoder(vocoder_name, checkpoint_path, device):\n print(f\"[!] Loading {vocoder_name}!\")\n vocoder = None\n if vocoder_name in (\"hifigan_T2_v1\", \"hifigan_univ_v1\"):\n vocoder = load_hifigan(checkpoint_path, device)\n else:\n raise NotImplementedError(\n f\"Vocoder {vocoder_name} not implemented! define a load_<> method for it\"\n )\n\n denoiser = Denoiser(vocoder, mode=\"zeros\")\n print(f\"[+] {vocoder_name} loaded!\")\n return vocoder, denoiser\n\n\ndef load_matcha(model_name, checkpoint_path, device):\n print(f\"[!] Loading {model_name}!\")\n model = MatchaTTS.load_from_checkpoint(checkpoint_path, map_location=device)\n _ = model.eval()\n\n print(f\"[+] {model_name} loaded!\")\n return model\n\n\ndef to_waveform(mel, vocoder, denoiser=None):\n audio = vocoder(mel).clamp(-1, 1)\n if denoiser is not None:\n audio = denoiser(audio.squeeze(), strength=0.00025).cpu().squeeze()\n\n return audio.cpu().squeeze()\n\n\ndef save_to_folder(filename: str, output: dict, folder: str):\n folder = Path(folder)\n folder.mkdir(exist_ok=True, parents=True)\n plot_spectrogram_to_numpy(np.array(output[\"mel\"].squeeze().float().cpu()), f\"{filename}.png\")\n np.save(folder / f\"{filename}\", output[\"mel\"].cpu().numpy())\n sf.write(folder / f\"{filename}.wav\", output[\"waveform\"], 22050, \"PCM_24\")\n return folder.resolve() / f\"{filename}.wav\"\n\n\ndef validate_args(args):\n assert (\n args.text or args.file\n ), \"Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms.\"\n assert args.temperature >= 0, \"Sampling temperature cannot be negative\"\n assert args.steps > 0, \"Number of ODE steps must be greater than 0\"\n\n if args.checkpoint_path is None:\n # When using pretrained models\n if args.model in SINGLESPEAKER_MODEL.keys():\n args = validate_args_for_single_speaker_model(args)\n\n if args.model in MULTISPEAKER_MODEL:\n args = validate_args_for_multispeaker_model(args)\n else:\n # When using a custom model\n if args.vocoder != \"hifigan_univ_v1\":\n warn_ = \"[-] Using custom model checkpoint! I would suggest passing --vocoder hifigan_univ_v1, unless the custom model is trained on LJ Speech.\"\n warnings.warn(warn_, UserWarning)\n if args.speaking_rate is None:\n args.speaking_rate = 1.0\n\n if args.batched:\n assert args.batch_size > 0, \"Batch size must be greater than 0\"\n assert args.speaking_rate > 0, \"Speaking rate must be greater than 0\"\n\n return args\n\n\ndef validate_args_for_multispeaker_model(args):\n if args.vocoder is not None:\n if args.vocoder != MULTISPEAKER_MODEL[args.model][\"vocoder\"]:\n warn_ = f\"[-] Using {args.model} model! I would suggest passing --vocoder {MULTISPEAKER_MODEL[args.model]['vocoder']}\"\n warnings.warn(warn_, UserWarning)\n else:\n args.vocoder = MULTISPEAKER_MODEL[args.model][\"vocoder\"]\n\n if args.speaking_rate is None:\n args.speaking_rate = MULTISPEAKER_MODEL[args.model][\"speaking_rate\"]\n\n spk_range = MULTISPEAKER_MODEL[args.model][\"spk_range\"]\n if args.spk is not None:\n assert (\n args.spk >= spk_range[0] and args.spk <= spk_range[-1]\n ), f\"Speaker ID must be between {spk_range} for this model.\"\n else:\n available_spk_id = MULTISPEAKER_MODEL[args.model][\"spk\"]\n warn_ = f\"[!] Speaker ID not provided! Using speaker ID {available_spk_id}\"\n warnings.warn(warn_, UserWarning)\n args.spk = available_spk_id\n\n return args\n\n\ndef validate_args_for_single_speaker_model(args):\n if args.vocoder is not None:\n if args.vocoder != SINGLESPEAKER_MODEL[args.model][\"vocoder\"]:\n warn_ = f\"[-] Using {args.model} model! I would suggest passing --vocoder {SINGLESPEAKER_MODEL[args.model]['vocoder']}\"\n warnings.warn(warn_, UserWarning)\n else:\n args.vocoder = SINGLESPEAKER_MODEL[args.model][\"vocoder\"]\n\n if args.speaking_rate is None:\n args.speaking_rate = SINGLESPEAKER_MODEL[args.model][\"speaking_rate\"]\n\n if args.spk != SINGLESPEAKER_MODEL[args.model][\"spk\"]:\n warn_ = f\"[-] Ignoring speaker id {args.spk} for {args.model}\"\n warnings.warn(warn_, UserWarning)\n args.spk = SINGLESPEAKER_MODEL[args.model][\"spk\"]\n\n return args\n\n\n@torch.inference_mode()\ndef cli():\n parser = argparse.ArgumentParser(\n description=\" ðŸ�µ Matcha-TTS: A fast TTS architecture with conditional flow matching\"\n )\n parser.add_argument(\n \"--model\",\n type=str,\n default=\"matcha_ljspeech\",\n help=\"Model to use\",\n choices=MATCHA_URLS.keys(),\n )\n\n parser.add_argument(\n \"--checkpoint_path\",\n type=str,\n default=None,\n help=\"Path to the custom model checkpoint\",\n )\n\n parser.add_argument(\n \"--vocoder\",\n type=str,\n default=None,\n help=\"Vocoder to use (default: will use the one suggested with the pretrained model))\",\n choices=VOCODER_URLS.keys(),\n )\n parser.add_argument(\"--text\", type=str, default=None, help=\"Text to synthesize\")\n parser.add_argument(\"--file\", type=str, default=None, help=\"Text file to synthesize\")\n parser.add_argument(\"--spk\", type=int, default=None, help=\"Speaker ID\")\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=0.667,\n help=\"Variance of the x0 noise (default: 0.667)\",\n )\n parser.add_argument(\n \"--speaking_rate\",\n type=float,\n default=None,\n help=\"change the speaking rate, a higher value means slower speaking rate (default: 1.0)\",\n )\n parser.add_argument(\"--steps\", type=int, default=10, help=\"Number of ODE steps (default: 10)\")\n parser.add_argument(\"--cpu\", action=\"store_true\", help=\"Use CPU for inference (default: use GPU if available)\")\n parser.add_argument(\n \"--denoiser_strength\",\n type=float,\n default=0.00025,\n help=\"Strength of the vocoder bias denoiser (default: 0.00025)\",\n )\n parser.add_argument(\n \"--output_folder\",\n type=str,\n default=os.getcwd(),\n help=\"Output folder to save results (default: current dir)\",\n )\n parser.add_argument(\"--batched\", action=\"store_true\", help=\"Batched inference (default: False)\")\n parser.add_argument(\n \"--batch_size\", type=int, default=32, help=\"Batch size only useful when --batched (default: 32)\"\n )\n\n args = parser.parse_args()\n\n args = validate_args(args)\n device = get_device(args)\n print_config(args)\n paths = assert_required_models_available(args)\n\n if args.checkpoint_path is not None:\n print(f\"[ðŸ�µ] Loading custom model from {args.checkpoint_path}\")\n paths[\"matcha\"] = args.checkpoint_path\n args.model = \"custom_model\"\n\n model = load_matcha(args.model, paths[\"matcha\"], device)\n vocoder, denoiser = load_vocoder(args.vocoder, paths[\"vocoder\"], device)\n\n texts = get_texts(args)\n\n spk = torch.tensor([args.spk], device=device, dtype=torch.long) if args.spk is not None else None\n if len(texts) == 1 or not args.batched:\n unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk)\n else:\n batched_synthesis(args, device, model, vocoder, denoiser, texts, spk)\n\n\nclass BatchedSynthesisDataset(torch.utils.data.Dataset):\n def __init__(self, processed_texts):\n self.processed_texts = processed_texts\n\n def __len__(self):\n return len(self.processed_texts)\n\n def __getitem__(self, idx):\n return self.processed_texts[idx]\n\n\ndef batched_collate_fn(batch):\n x = []\n x_lengths = []\n\n for b in batch:\n x.append(b[\"x\"].squeeze(0))\n x_lengths.append(b[\"x_lengths\"])\n\n x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True)\n x_lengths = torch.concat(x_lengths, dim=0)\n return {\"x\": x, \"x_lengths\": x_lengths}\n\n\ndef batched_synthesis(args, device, model, vocoder, denoiser, texts, spk):\n total_rtf = []\n total_rtf_w = []\n processed_text = [process_text(i, text, \"cpu\") for i, text in enumerate(texts)]\n dataloader = torch.utils.data.DataLoader(\n BatchedSynthesisDataset(processed_text),\n batch_size=args.batch_size,\n collate_fn=batched_collate_fn,\n num_workers=8,\n )\n for i, batch in enumerate(dataloader):\n i = i + 1\n start_t = dt.datetime.now()\n output = model.synthesise(\n batch[\"x\"].to(device),\n batch[\"x_lengths\"].to(device),\n n_timesteps=args.steps,\n temperature=args.temperature,\n spks=spk,\n length_scale=args.speaking_rate,\n )\n\n output[\"waveform\"] = to_waveform(output[\"mel\"], vocoder, denoiser)\n t = (dt.datetime.now() - start_t).total_seconds()\n rtf_w = t * 22050 / (output[\"waveform\"].shape[-1])\n print(f\"[ðŸ�µ-Batch: {i}] Matcha-TTS RTF: {output['rtf']:.4f}\")\n print(f\"[ðŸ�µ-Batch: {i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}\")\n total_rtf.append(output[\"rtf\"])\n total_rtf_w.append(rtf_w)\n for j in range(output[\"mel\"].shape[0]):\n base_name = f\"utterance_{j:03d}_speaker_{args.spk:03d}\" if args.spk is not None else f\"utterance_{j:03d}\"\n length = output[\"mel_lengths\"][j]\n new_dict = {\"mel\": output[\"mel\"][j][:, :length], \"waveform\": output[\"waveform\"][j][: length * 256]}\n location = save_to_folder(base_name, new_dict, args.output_folder)\n print(f\"[ðŸ�µ-{j}] Waveform saved: {location}\")\n\n print(\"\".join([\"=\"] * 100))\n print(f\"[ðŸ�µ] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}\")\n print(f\"[ðŸ�µ] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}\")\n print(\"[ðŸ�µ] Enjoy the freshly whisked ðŸ�µ Matcha-TTS!\")\n\n\ndef unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk):\n total_rtf = []\n total_rtf_w = []\n for i, text in enumerate(texts):\n i = i + 1\n base_name = f\"utterance_{i:03d}_speaker_{args.spk:03d}\" if args.spk is not None else f\"utterance_{i:03d}\"\n\n print(\"\".join([\"=\"] * 100))\n text = text.strip()\n text_processed = process_text(i, text, device)\n\n print(f\"[ðŸ�µ] Whisking Matcha-T(ea)TS for: {i}\")\n start_t = dt.datetime.now()\n output = model.synthesise(\n text_processed[\"x\"],\n text_processed[\"x_lengths\"],\n n_timesteps=args.steps,\n temperature=args.temperature,\n spks=spk,\n length_scale=args.speaking_rate,\n )\n output[\"waveform\"] = to_waveform(output[\"mel\"], vocoder, denoiser)\n # RTF with HiFiGAN\n t = (dt.datetime.now() - start_t).total_seconds()\n rtf_w = t * 22050 / (output[\"waveform\"].shape[-1])\n print(f\"[ðŸ�µ-{i}] Matcha-TTS RTF: {output['rtf']:.4f}\")\n print(f\"[ðŸ�µ-{i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}\")\n total_rtf.append(output[\"rtf\"])\n total_rtf_w.append(rtf_w)\n\n location = save_to_folder(base_name, output, args.output_folder)\n print(f\"[+] Waveform saved: {location}\")\n\n print(\"\".join([\"=\"] * 100))\n print(f\"[ðŸ�µ] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}\")\n print(f\"[ðŸ�µ] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}\")\n print(\"[ðŸ�µ] Enjoy the freshly whisked ðŸ�µ Matcha-TTS!\")\n\n\ndef print_config(args):\n print(\"[!] Configurations: \")\n print(f\"\\t- Model: {args.model}\")\n print(f\"\\t- Vocoder: {args.vocoder}\")\n print(f\"\\t- Temperature: {args.temperature}\")\n print(f\"\\t- Speaking rate: {args.speaking_rate}\")\n print(f\"\\t- Number of ODE steps: {args.steps}\")\n print(f\"\\t- Speaker: {args.spk}\")\n\n\ndef get_device(args):\n if torch.cuda.is_available() and not args.cpu:\n print(\"[+] GPU Available! Using GPU\")\n device = torch.device(\"cuda\")\n else:\n print(\"[-] GPU not available or forced CPU run! Using CPU\")\n device = torch.device(\"cpu\")\n return device\n\n\nif __name__ == \"__main__\":\n cli()\n", "repo_name": "shivammehta25/Matcha-TTS", "sub_path": "matcha/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 15278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 212, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 50, "usage_type": "call"}, {"api_name": "matcha.utils.utils.intersperse", "line_number": 51, "usage_type": "call"}, {"api_name": "matcha.text.text_to_sequence", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 55, "usage_type": "attribute"}, {"api_name": "matcha.text.sequence_to_text", "line_number": 56, "usage_type": "call"}, {"api_name": "matcha.utils.utils.get_user_data_dir", "line_number": 72, "usage_type": "call"}, {"api_name": "matcha.utils.utils.assert_model_downloaded", "line_number": 77, "usage_type": "call"}, {"api_name": "matcha.utils.utils.assert_model_downloaded", "line_number": 80, "usage_type": "call"}, {"api_name": "matcha.hifigan.env.AttrDict", "line_number": 85, "usage_type": "call"}, {"api_name": "matcha.hifigan.config.v1", "line_number": 85, "usage_type": "argument"}, {"api_name": "matcha.hifigan.models.Generator", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 87, "usage_type": "call"}, {"api_name": "matcha.hifigan.denoiser.Denoiser", "line_number": 103, "usage_type": "call"}, {"api_name": "matcha.models.matcha_tts.MatchaTTS.load_from_checkpoint", "line_number": 110, "usage_type": "call"}, {"api_name": "matcha.models.matcha_tts.MatchaTTS", "line_number": 110, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 129, "usage_type": "call"}, {"api_name": "soundfile.write", "line_number": 130, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 152, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 167, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 182, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 192, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 201, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 209, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 285, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 285, "usage_type": "attribute"}, {"api_name": "torch.inference_mode", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 292, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 311, "usage_type": "attribute"}, {"api_name": "torch.concat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 320, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 328, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 328, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 339, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 339, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 353, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 354, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 370, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 370, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 381, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 381, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 393, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 408, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 408, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 410, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 413, "usage_type": "call"}]} +{"seq_id": "16047269010", "text": "from __future__ import division\nimport sys\nfrom pathlib import Path\nimport pandas as pd\nimport selfUtils as su\nimport csv\nfrom math import log\nfrom scipy.stats import laplace\nimport queue\nimport numpy as np\nimport os\n\n\ndef calculate_ratio(packets):\n s = r = 0\n for p in packets:\n s += p[1]\n for i,p in enumerate(packets):\n if i == 0:\n r = p[1] / s\n p.append(r)\n continue\n r = (p[1] / s) + packets[i - 1][2]\n p.append(r)\n\n\ndef info_stat(eps, trace_name, ori_size, real_ap_overhead, et_ap_overhead, ap_overall_overhead, real_lap_overhead, et_lap_overhead, lap_overall_overhead, ori_end, overall_delay, unfinished):\n\n with open('stats/overhead_list_' + str(eps) + '.csv','a') as build:\n writer = csv.writer(build)\n writer.writerow([trace_name, ori_size, real_ap_overhead, et_ap_overhead, ap_overall_overhead,\n real_lap_overhead, et_lap_overhead, lap_overall_overhead, ori_end, overall_delay, unfinished])\n\n\ndef lap_trace(packets, lap_list, eps):\n # lap_list = []\n g = 0\n r = 0\n num = -1\n i = len(lap_list)\n\n g = su.cal_g(i)\n if i == 1 or i == su.cal_d(i):\n # r = int(np.random.laplace(0, 1/eps))\n r = int(laplace.rvs(0, 1/eps))\n else:\n num = int(log(i, 2))\n # r = int(np.random.laplace(0, num/eps))\n r = int(laplace.rvs(0, num/eps))\n x = lap_list[g] + (packets[i] - packets[g]) + r\n # print(g, i)\n # if x > 1500:\n # x = 1500\n if x < 0:\n x = 0\n\n n = x\n return n, x - packets[i]\n\n\ndef dp_bin(ori_packets, eps_light, eps_heavy, selected_idxes):\n\n buffer_q = queue.Queue()\n buffer_p = [] # [time, index, size]\n proc_q = queue.Queue() # [buffered_time, buffered_index, size, cleaned_time, cleaned_index, real_n]\n return_q = queue.Queue()\n lap_overhead = 0\n n = 0\n ori_packets = [n] + ori_packets\n lap_list = [n]\n while len(lap_list) != len(ori_packets):\n idx = len(lap_list)\n lap_p = 0\n if (idx+1) not in selected_idxes:\n lap_p, diff_o = lap_trace(ori_packets, lap_list, eps_light)\n lap_overhead+= diff_o\n else:\n lap_p, diff_o = lap_trace(ori_packets, lap_list, eps_heavy)\n lap_overhead+= diff_o\n # continue\n # else:\n lap_list.append(lap_p)\n\n\n return lap_list, proc_q, lap_overhead\n\n\ndef main():\n\n b = float(sys.argv[1])\n r = int(sys.argv[2])\n eps_heavy = float(sys.argv[3])\n eps_light = 0.000005\n\n # method = 'mi'\n # b = 0.25\n # r = 720\n # eps_heavy = 0.000005\n # eps_light = 0.000005\n\n\n l = int(180/b)\n csv_path = 'datafiles/video/video_bin_'+ str(b) +'.csv'\n packets = pd.read_csv(csv_path)\n packets = packets.values.tolist()\n\n selected_indexes = [i for i in range(120,120+r)]\n # selected_indexes.sort()\n s = 0\n overhead = 0\n ori_size = 0\n for trace in packets:\n incoming = trace[1:]\n s+=1\n idx = s%200\n incoming_lap_list, incoming_proc_q, in_overhead = dp_bin(incoming, eps_light, eps_heavy, selected_indexes)\n incoming_lap_list[0] = trace[1]\n lap_list = incoming_lap_list\n with open('/home/lhp/Documents/pfi_' + str(b) + '_test_' + str(r) + '_' + str(eps_heavy) +'_' + str(eps_light) +'.csv', 'a') as w:\n writer = csv.writer(w)\n writer.writerow(lap_list)\n overhead += in_overhead\n ori_size += sum(incoming)\n\n if s%200 == 0:\n print(str(int(s)) + ' ' + str(b) + ' ' +str(selected_indexes[-1]) + ' ' + str(eps_heavy) + ' is finished')\n # with open('overhead_selected/' + str(b) +'overhead_' + str(r) + '_' + method + '_' + str(eps) + '.csv', 'a') as w:\n\n with open('pfi_weighted_overhead_test.csv', 'a') as w:\n writer = csv.writer(w)\n writer.writerow([b, r, eps_heavy, eps_light, ori_size, overhead])\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "li2haipeng/feature_analysis", "sub_path": "laplace_test.py", "file_name": "laplace_test.py", "file_ext": "py", "file_size_in_byte": 3956, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.writer", "line_number": 30, "usage_type": "call"}, {"api_name": "selfUtils.cal_g", "line_number": 42, "usage_type": "call"}, {"api_name": "selfUtils.cal_d", "line_number": 43, "usage_type": "call"}, {"api_name": "scipy.stats.laplace.rvs", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.stats.laplace", "line_number": 45, "usage_type": "name"}, {"api_name": "math.log", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.stats.laplace.rvs", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.stats.laplace", "line_number": 49, "usage_type": "name"}, {"api_name": "queue.Queue", "line_number": 63, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 65, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 66, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 104, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 120, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "8218034568", "text": "# -*- coding:utf-8 -*-\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport datetime\nfrom nlp.relation_extract.model import GRU\nfrom nlp.relation_extract.dataset.utils import handleData\n\n\ndef train(args):\n handleData(args.data_dir)\n\n print('reading wordembedding')\n wordembedding = np.load(os.path.join(args.data_dir, 'vec.npy'))\n\n print('reading training data')\n train_y = np.load(os.path.join(args.data_dir, 'train_y.npy'))\n train_word = np.load(os.path.join(args.data_dir, 'train_word.npy'))\n train_pos1 = np.load(os.path.join(args.data_dir, 'train_pos1.npy'))\n train_pos2 = np.load(os.path.join(args.data_dir, 'train_pos2.npy'))\n\n args.vocab_size = len(wordembedding)\n args.num_classes = len(train_y[0])\n\n with tf.Graph().as_default():\n sess = tf.Session()\n with sess.as_default():\n initializer = tf.contrib.layers.xavier_initializer()\n with tf.variable_scope(\"model\", reuse=None, initializer=initializer):\n m = GRU(is_training=True, word_embeddings=wordembedding, args=args)\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(0.0005)\n\n train_op = optimizer.minimize(m.final_loss, global_step=global_step)\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(max_to_keep=None)\n\n merged_summary = tf.summary.merge_all()\n summary_writer = tf.summary.FileWriter(args.summary_dir + '/train_loss', sess.graph)\n\n def train_step(word_batch, pos1_batch, pos2_batch, y_batch, big_num):\n feed_dict = {}\n total_shape = []\n total_num = 0\n total_word = []\n total_pos1 = []\n total_pos2 = []\n for i in range(len(word_batch)):\n total_shape.append(total_num)\n total_num += len(word_batch[i])\n for word in word_batch[i]:\n total_word.append(word)\n for pos1 in pos1_batch[i]:\n total_pos1.append(pos1)\n for pos2 in pos2_batch[i]:\n total_pos2.append(pos2)\n total_shape.append(total_num)\n total_shape = np.array(total_shape)\n total_word = np.array(total_word)\n total_pos1 = np.array(total_pos1)\n total_pos2 = np.array(total_pos2)\n\n feed_dict[m.total_shape] = total_shape\n feed_dict[m.input_word] = total_word\n feed_dict[m.input_pos1] = total_pos1\n feed_dict[m.input_pos2] = total_pos2\n feed_dict[m.input_y] = y_batch\n\n temp, step, loss, accuracy, summary, l2_loss, final_loss = sess.run(\n [train_op, global_step, m.total_loss, m.accuracy, merged_summary, m.l2_loss, m.final_loss],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n accuracy = np.reshape(np.array(accuracy), (big_num))\n acc = np.mean(accuracy)\n summary_writer.add_summary(summary, step)\n\n if step % 50 == 0:\n tempstr = \"{}: step {}, softmax_loss {:g}, acc {:g}\".format(time_str, step, loss, acc)\n print(tempstr)\n\n for one_epoch in range(args.num_epochs):\n print(\"epoch num %d\", one_epoch)\n temp_order = list(range(len(train_word)))\n np.random.shuffle(temp_order)\n for i in range(int(len(temp_order) / float(args.big_num))):\n temp_word = []\n temp_pos1 = []\n temp_pos2 = []\n temp_y = []\n\n temp_input = temp_order[i * args.big_num:(i + 1) * args.big_num]\n for k in temp_input:\n temp_word.append(train_word[k])\n temp_pos1.append(train_pos1[k])\n temp_pos2.append(train_pos2[k])\n temp_y.append(train_y[k])\n num = 0\n for single_word in temp_word:\n num += len(single_word)\n\n if num > 1500:\n print('out of range')\n continue\n\n temp_word = np.array(temp_word)\n temp_pos1 = np.array(temp_pos1)\n temp_pos2 = np.array(temp_pos2)\n temp_y = np.array(temp_y)\n\n train_step(temp_word, temp_pos1, temp_pos2, temp_y, args.big_num)\n\n current_step = tf.train.global_step(sess, global_step)\n if current_step % 100 == 0:\n print('saving model')\n path = saver.save(sess, os.path.join(args.train_dir, 'ATT_GRU_model'), global_step=current_step)\n tempstr = 'have saved model to ' + path\n print(tempstr)\n", "repo_name": "xrick/tensorflow_nlp", "sub_path": "nlp/relation_extract/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "nlp.relation_extract.dataset.utils.handleData", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.Graph", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.contrib.layers.xavier_initializer", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 30, "usage_type": "call"}, {"api_name": "nlp.relation_extract.model.GRU", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.merge_all", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.train.global_step", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "4885772885", "text": "import pytest\nfrom app.android.ut.common import *\n\nfrom frame.po.App import App\nclass TestSearchPage(object):\n @classmethod\n def setup_class(cls):\n cls.searchpage=App.tohome().to_search()\n\n\n # @pytest.mark.parametrize(\"name,excepted\",[(\"PDD\",\"PDD\"),(\"alibaba\",\"BABA\")])\n # def test_search(self,name,excepted):\n # realvalue=''\n # self.driver=self.searchpage.stock_search(name)\n # realvalue=self.searchpage.get_stock_name()\n # assert realvalue in excepted\n # self.searchpage.stock_back()\n\n @pytest.mark.parametrize(\"name,excepted\", [(\"拼多多\", \"已添加\"), (\"阿里巴巴\", \"已添加\")])\n def test_add_stock(self,name,excepted):\n button_text=''\n self.driver=self.searchpage.stock_search(name)\n self.driver=self.searchpage.add_stock()\n button_text=self.searchpage.get_add_button_status()\n assert button_text == excepted\n self.searchpage.cancel_stock()#取消自选\n\n\n\n # def teardown_method(self):\n # self.searchpage.stock_back()\n", "repo_name": "susanaochaonan/UI", "sub_path": "frame/testcase/test_search.py", "file_name": "test_search.py", "file_ext": "py", "file_size_in_byte": 1044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "frame.po.App.App.tohome", "line_number": 8, "usage_type": "call"}, {"api_name": "frame.po.App.App", "line_number": 8, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "9304635865", "text": "from __future__ import absolute_import\n\nfrom contextlib import contextmanager\nimport copy\n\nimport dbt.compat\nimport dbt.deprecations\nimport dbt.exceptions\nimport dbt.schema\nimport dbt.flags as flags\nimport dbt.clients.gcloud\nimport dbt.clients.agate_helper\n\nfrom dbt.adapters.postgres import PostgresAdapter\nfrom dbt.adapters.bigquery.relation import BigQueryRelation\nfrom dbt.contracts.connection import Connection\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nimport google.auth\nimport google.api_core\nimport google.oauth2\nimport google.cloud.exceptions\nimport google.cloud.bigquery\n\nimport time\nimport agate\n\n\nclass BigQueryAdapter(PostgresAdapter):\n\n config_functions = [\n # deprecated -- use versions that take relations instead\n \"query_for_existing\",\n \"execute_model\",\n \"create_temporary_table\",\n \"drop\",\n \"execute\",\n \"quote_schema_and_table\",\n \"make_date_partitioned_table\",\n \"already_exists\",\n \"expand_target_column_types\",\n \"load_dataframe\",\n \"get_missing_columns\",\n \"cache_new_relation\",\n\n \"create_schema\",\n \"alter_table_add_columns\",\n\n # versions of adapter functions that take / return Relations\n \"get_relation\",\n \"drop_relation\",\n \"rename_relation\",\n\n \"get_columns_in_table\",\n\n # formerly profile functions\n \"add_query\",\n ]\n\n SCOPE = ('https://www.googleapis.com/auth/bigquery',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/drive')\n\n RELATION_TYPES = {\n 'TABLE': BigQueryRelation.Table,\n 'VIEW': BigQueryRelation.View,\n 'EXTERNAL': BigQueryRelation.External\n }\n\n QUERY_TIMEOUT = 300\n Relation = BigQueryRelation\n Column = dbt.schema.BigQueryColumn\n\n @classmethod\n def handle_error(cls, error, message, sql):\n logger.debug(message.format(sql=sql))\n logger.debug(error)\n error_msg = \"\\n\".join(\n [item['message'] for item in error.errors])\n\n raise dbt.exceptions.DatabaseException(error_msg)\n\n @contextmanager\n def exception_handler(self, sql, model_name=None,\n connection_name='master'):\n try:\n yield\n\n except google.cloud.exceptions.BadRequest as e:\n message = \"Bad request while running:\\n{sql}\"\n self.handle_error(e, message, sql)\n\n except google.cloud.exceptions.Forbidden as e:\n message = \"Access denied while running:\\n{sql}\"\n self.handle_error(e, message, sql)\n\n except Exception as e:\n logger.debug(\"Unhandled error while running:\\n{}\".format(sql))\n logger.debug(e)\n raise dbt.exceptions.RuntimeException(dbt.compat.to_string(e))\n\n @classmethod\n def type(cls):\n return 'bigquery'\n\n @classmethod\n def date_function(cls):\n return 'CURRENT_TIMESTAMP()'\n\n def begin(self, name):\n pass\n\n def commit(self, connection):\n pass\n\n @classmethod\n def get_status(cls, cursor):\n raise dbt.exceptions.NotImplementedException(\n '`get_status` is not implemented for this adapter!')\n\n @classmethod\n def get_bigquery_credentials(cls, profile_credentials):\n method = profile_credentials.method\n creds = google.oauth2.service_account.Credentials\n\n if method == 'oauth':\n credentials, project_id = google.auth.default(scopes=cls.SCOPE)\n return credentials\n\n elif method == 'service-account':\n keyfile = profile_credentials.keyfile\n return creds.from_service_account_file(keyfile, scopes=cls.SCOPE)\n\n elif method == 'service-account-json':\n details = profile_credentials.keyfile_json\n return creds.from_service_account_info(details, scopes=cls.SCOPE)\n\n error = ('Invalid `method` in profile: \"{}\"'.format(method))\n raise dbt.exceptions.FailedToConnectException(error)\n\n @classmethod\n def get_bigquery_client(cls, profile_credentials):\n project_name = profile_credentials.project\n creds = cls.get_bigquery_credentials(profile_credentials)\n\n return google.cloud.bigquery.Client(project_name, creds)\n\n @classmethod\n def open_connection(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n handle = cls.get_bigquery_client(connection.credentials)\n\n except google.auth.exceptions.DefaultCredentialsError as e:\n logger.info(\"Please log into GCP to continue\")\n dbt.clients.gcloud.setup_default_credentials()\n\n handle = cls.get_bigquery_client(connection.credentials)\n\n except Exception as e:\n raise\n logger.debug(\"Got an error when attempting to create a bigquery \"\n \"client: '{}'\".format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise dbt.exceptions.FailedToConnectException(str(e))\n\n connection.handle = handle\n connection.state = 'open'\n return connection\n\n @classmethod\n def close(cls, connection):\n connection.state = 'closed'\n\n return connection\n\n def _link_cached_relations(self, manifest, schemas):\n pass\n\n def _list_relations(self, schema, model_name=None):\n connection = self.get_connection(model_name)\n client = connection.handle\n\n bigquery_dataset = self.get_dataset(schema, model_name)\n\n all_tables = client.list_tables(\n bigquery_dataset,\n # BigQuery paginates tables by alphabetizing them, and using\n # the name of the last table on a page as the key for the\n # next page. If that key table gets dropped before we run\n # list_relations, then this will 404. So, we avoid this\n # situation by making the page size sufficiently large.\n # see: https://github.com/fishtown-analytics/dbt/issues/726\n # TODO: cache the list of relations up front, and then we\n # won't need to do this\n max_results=100000)\n\n # This will 404 if the dataset does not exist. This behavior mirrors\n # the implementation of list_relations for other adapters\n try:\n return [self._bq_table_to_relation(table) for table in all_tables]\n except google.api_core.exceptions.NotFound as e:\n return []\n\n def get_relation(self, schema, identifier, model_name=None):\n if self._schema_is_cached(schema, model_name):\n # if it's in the cache, use the parent's model of going through\n # the relations cache and picking out the relation\n return super(BigQueryAdapter, self).get_relation(\n schema=schema,\n identifier=identifier,\n model_name=model_name\n )\n\n table = self._get_bq_table(schema, identifier)\n return self._bq_table_to_relation(table)\n\n def drop_relation(self, relation, model_name=None):\n if self._schema_is_cached(relation.schema, model_name):\n self.cache.drop(relation)\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n dataset = self.get_dataset(relation.schema, model_name)\n relation_object = dataset.table(relation.identifier)\n client.delete_table(relation_object)\n\n def rename(self, schema, from_name, to_name, model_name=None):\n raise dbt.exceptions.NotImplementedException(\n '`rename` is not implemented for this adapter!')\n\n def rename_relation(self, from_relation, to_relation, model_name=None):\n raise dbt.exceptions.NotImplementedException(\n '`rename_relation` is not implemented for this adapter!')\n\n @classmethod\n def get_timeout(cls, conn):\n credentials = conn['credentials']\n return credentials.get('timeout_seconds', cls.QUERY_TIMEOUT)\n\n def materialize_as_view(self, dataset, model):\n model_name = model.get('name')\n model_alias = model.get('alias')\n model_sql = model.get('injected_sql')\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n view_ref = dataset.table(model_alias)\n view = google.cloud.bigquery.Table(view_ref)\n view.view_query = model_sql\n view.view_use_legacy_sql = False\n\n logger.debug(\"Model SQL ({}):\\n{}\".format(model_name, model_sql))\n\n with self.exception_handler(model_sql, model_name, model_name):\n client.create_table(view)\n\n return \"CREATE VIEW\"\n\n @classmethod\n def poll_until_job_completes(cls, job, timeout):\n retry_count = timeout\n\n while retry_count > 0 and job.state != 'DONE':\n retry_count -= 1\n time.sleep(1)\n job.reload()\n\n if job.state != 'DONE':\n raise dbt.exceptions.RuntimeException(\"BigQuery Timeout Exceeded\")\n\n elif job.error_result:\n raise job.exception()\n\n def make_date_partitioned_table(self, dataset_name, identifier,\n model_name=None):\n conn = self.get_connection(model_name)\n client = conn.handle\n\n dataset = self.get_dataset(dataset_name, identifier)\n table_ref = dataset.table(identifier)\n table = google.cloud.bigquery.Table(table_ref)\n table.partitioning_type = 'DAY'\n\n return client.create_table(table)\n\n def materialize_as_table(self, dataset, model, model_sql,\n decorator=None):\n model_name = model.get('name')\n model_alias = model.get('alias')\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n if decorator is None:\n table_name = model_alias\n else:\n table_name = \"{}${}\".format(model_alias, decorator)\n\n table_ref = dataset.table(table_name)\n job_config = google.cloud.bigquery.QueryJobConfig()\n job_config.destination = table_ref\n job_config.write_disposition = 'WRITE_TRUNCATE'\n\n logger.debug(\"Model SQL ({}):\\n{}\".format(table_name, model_sql))\n query_job = client.query(model_sql, job_config=job_config)\n\n # this waits for the job to complete\n with self.exception_handler(model_sql, model_alias,\n model_name):\n query_job.result(timeout=self.get_timeout(conn))\n\n return \"CREATE TABLE\"\n\n def execute_model(self, model,\n materialization, sql_override=None,\n decorator=None, model_name=None):\n\n if sql_override is None:\n sql_override = model.get('injected_sql')\n\n if flags.STRICT_MODE:\n connection = self.get_connection(model.get('name'))\n Connection(**connection)\n\n model_name = model.get('name')\n model_schema = model.get('schema')\n\n dataset = self.get_dataset(model_schema, model_name)\n\n if materialization == 'view':\n res = self.materialize_as_view(dataset, model)\n elif materialization == 'table':\n res = self.materialize_as_table(\n dataset, model,\n sql_override, decorator)\n else:\n msg = \"Invalid relation type: '{}'\".format(materialization)\n raise dbt.exceptions.RuntimeException(msg, model)\n\n return res\n\n def raw_execute(self, sql, model_name=None, fetch=False, **kwargs):\n conn = self.get_connection(model_name)\n client = conn.handle\n\n logger.debug('On %s: %s', model_name, sql)\n\n job_config = google.cloud.bigquery.QueryJobConfig()\n job_config.use_legacy_sql = False\n query_job = client.query(sql, job_config)\n\n # this blocks until the query has completed\n with self.exception_handler(sql, model_name):\n iterator = query_job.result()\n\n return query_job, iterator\n\n def create_temporary_table(self, sql, model_name=None, **kwargs):\n\n # BQ queries always return a temp table with their results\n query_job, _ = self.raw_execute(sql, model_name)\n bq_table = query_job.destination\n\n return self.Relation.create(\n project=bq_table.project,\n schema=bq_table.dataset_id,\n identifier=bq_table.table_id,\n quote_policy={\n 'schema': True,\n 'identifier': True\n },\n type=BigQueryRelation.Table)\n\n def alter_table_add_columns(self, relation, columns, model_name=None):\n\n logger.debug('Adding columns ({}) to table {}\".'.format(\n columns, relation))\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n dataset = self.get_dataset(relation.schema, model_name)\n\n table_ref = dataset.table(relation.name)\n table = client.get_table(table_ref)\n\n new_columns = [col.to_bq_schema_object() for col in columns]\n new_schema = table.schema + new_columns\n\n new_table = google.cloud.bigquery.Table(table_ref, schema=new_schema)\n client.update_table(new_table, ['schema'])\n\n def execute(self, sql, model_name=None, fetch=None, **kwargs):\n _, iterator = self.raw_execute(sql, model_name, fetch, **kwargs)\n\n if fetch:\n res = self.get_table_from_response(iterator)\n else:\n res = dbt.clients.agate_helper.empty_table()\n\n # If we get here, the query succeeded\n status = 'OK'\n return status, res\n\n def execute_and_fetch(self, sql, model_name, auto_begin=None):\n status, table = self.execute(sql, model_name, fetch=True)\n return status, table\n\n @classmethod\n def get_table_from_response(cls, resp):\n column_names = [field.name for field in resp.schema]\n rows = [dict(row.items()) for row in resp]\n return dbt.clients.agate_helper.table_from_data(rows, column_names)\n\n # BigQuery doesn't support BEGIN/COMMIT, so stub these out.\n\n def add_begin_query(self, name):\n pass\n\n def add_commit_query(self, name):\n pass\n\n def create_schema(self, schema, model_name=None):\n logger.debug('Creating schema \"%s\".', schema)\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n dataset = self.get_dataset(schema, model_name)\n\n # Emulate 'create schema if not exists ...'\n try:\n client.get_dataset(dataset)\n except google.api_core.exceptions.NotFound:\n with self.exception_handler('create dataset', model_name):\n client.create_dataset(dataset)\n\n def drop_tables_in_schema(self, dataset):\n conn = self.get_connection()\n client = conn.handle\n\n for table in client.list_tables(dataset):\n client.delete_table(table.reference)\n\n def drop_schema(self, schema, model_name=None):\n logger.debug('Dropping schema \"%s\".', schema)\n\n if not self.check_schema_exists(schema, model_name):\n return\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n dataset = self.get_dataset(schema, model_name)\n with self.exception_handler('drop dataset', model_name):\n self.drop_tables_in_schema(dataset)\n client.delete_dataset(dataset)\n\n def get_existing_schemas(self, model_name=None):\n conn = self.get_connection(model_name)\n client = conn.handle\n\n with self.exception_handler('list dataset', model_name):\n all_datasets = client.list_datasets(include_all=True)\n return [ds.dataset_id for ds in all_datasets]\n\n def get_columns_in_table(self, schema_name, table_name,\n database=None, model_name=None):\n\n # BigQuery does not have databases -- the database parameter is here\n # for consistency with the base implementation\n\n conn = self.get_connection(model_name)\n client = conn.handle\n\n try:\n dataset_ref = client.dataset(schema_name)\n table_ref = dataset_ref.table(table_name)\n table = client.get_table(table_ref)\n return self.get_dbt_columns_from_bq_table(table)\n\n except (ValueError, google.cloud.exceptions.NotFound) as e:\n logger.debug(\"get_columns_in_table error: {}\".format(e))\n return []\n\n def get_dbt_columns_from_bq_table(self, table):\n \"Translates BQ SchemaField dicts into dbt BigQueryColumn objects\"\n\n columns = []\n for col in table.schema:\n # BigQuery returns type labels that are not valid type specifiers\n dtype = self.Column.translate_type(col.field_type)\n column = self.Column(\n col.name, dtype, col.fields, col.mode)\n columns.append(column)\n\n return columns\n\n def check_schema_exists(self, schema, model_name=None):\n conn = self.get_connection(model_name)\n client = conn.handle\n\n with self.exception_handler('get dataset', model_name):\n all_datasets = client.list_datasets(include_all=True)\n return any([ds.dataset_id == schema for ds in all_datasets])\n\n def get_dataset(self, dataset_name, model_name=None):\n conn = self.get_connection(model_name)\n dataset_ref = conn.handle.dataset(dataset_name)\n return google.cloud.bigquery.Dataset(dataset_ref)\n\n def _bq_table_to_relation(self, bq_table):\n if bq_table is None:\n return None\n\n return self.Relation.create(\n project=bq_table.project,\n schema=bq_table.dataset_id,\n identifier=bq_table.table_id,\n quote_policy={\n 'schema': True,\n 'identifier': True\n },\n type=self.RELATION_TYPES.get(bq_table.table_type))\n\n def _get_bq_table(self, dataset_name, identifier, model_name=None):\n conn = self.get_connection(model_name)\n\n dataset = self.get_dataset(dataset_name, model_name)\n\n table_ref = dataset.table(identifier)\n\n try:\n return conn.handle.get_table(table_ref)\n except google.cloud.exceptions.NotFound:\n return None\n\n @classmethod\n def warning_on_hooks(hook_type):\n msg = \"{} is not supported in bigquery and will be ignored\"\n dbt.ui.printer.print_timestamped_line(msg.format(hook_type),\n dbt.ui.printer.COLOR_FG_YELLOW)\n\n def add_query(self, sql, model_name=None, auto_begin=True,\n bindings=None, abridge_sql_log=False):\n if model_name in ['on-run-start', 'on-run-end']:\n self.warning_on_hooks(model_name)\n else:\n raise dbt.exceptions.NotImplementedException(\n '`add_query` is not implemented for this adapter!')\n\n @classmethod\n def is_cancelable(cls):\n return False\n\n @classmethod\n def quote(cls, identifier):\n return '`{}`'.format(identifier)\n\n def quote_schema_and_table(self, schema, table, model_name=None):\n return self.render_relation(self.quote(schema), self.quote(table))\n\n def render_relation(cls, schema, table):\n connection = self.get_connection()\n project = connection.credentials.project\n return '{}.{}.{}'.format(self.quote(project), schema, table)\n\n @classmethod\n def convert_text_type(cls, agate_table, col_idx):\n return \"string\"\n\n @classmethod\n def convert_number_type(cls, agate_table, col_idx):\n decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))\n return \"float64\" if decimals else \"int64\"\n\n @classmethod\n def convert_boolean_type(cls, agate_table, col_idx):\n return \"bool\"\n\n @classmethod\n def convert_datetime_type(cls, agate_table, col_idx):\n return \"datetime\"\n\n @classmethod\n def _agate_to_schema(cls, agate_table, column_override):\n bq_schema = []\n for idx, col_name in enumerate(agate_table.column_names):\n inferred_type = cls.convert_agate_type(agate_table, idx)\n type_ = column_override.get(col_name, inferred_type)\n bq_schema.append(\n google.cloud.bigquery.SchemaField(col_name, type_))\n return bq_schema\n\n def load_dataframe(self, schema, table_name, agate_table,\n column_override, model_name=None):\n bq_schema = self._agate_to_schema(agate_table, column_override)\n dataset = self.get_dataset(schema, None)\n table = dataset.table(table_name)\n conn = self.get_connection(None)\n client = conn.handle\n\n load_config = google.cloud.bigquery.LoadJobConfig()\n load_config.skip_leading_rows = 1\n load_config.schema = bq_schema\n\n with open(agate_table.original_abspath, \"rb\") as f:\n job = client.load_table_from_file(f, table, rewind=True,\n job_config=load_config)\n\n with self.exception_handler(\"LOAD TABLE\"):\n self.poll_until_job_completes(job, self.get_timeout(conn))\n\n def expand_target_column_types(self, temp_table,\n to_schema, to_table, model_name=None):\n # This is a no-op on BigQuery\n pass\n\n def _flat_columns_in_table(self, table):\n \"\"\"An iterator over the flattened columns for a given schema and table.\n Resolves child columns as having the name \"parent.child\".\n \"\"\"\n for col in self.get_dbt_columns_from_bq_table(table):\n flattened = col.flatten()\n for subcol in flattened:\n yield subcol\n\n @classmethod\n def _get_stats_column_names(cls):\n \"\"\"Construct a tuple of the column names for stats. Each stat has 4\n columns of data.\n \"\"\"\n columns = []\n stats = ('num_bytes', 'num_rows', 'location', 'partitioning_type',\n 'clustering_fields')\n stat_components = ('label', 'value', 'description', 'include')\n for stat_id in stats:\n for stat_component in stat_components:\n columns.append('stats:{}:{}'.format(stat_id, stat_component))\n return tuple(columns)\n\n @classmethod\n def _get_stats_columns(cls, table, relation_type):\n \"\"\"Given a table, return an iterator of key/value pairs for stats\n column names/values.\n \"\"\"\n column_names = cls._get_stats_column_names()\n\n # agate does not handle the array of column names gracefully\n clustering_value = None\n if table.clustering_fields is not None:\n clustering_value = ','.join(table.clustering_fields)\n # cast num_bytes/num_rows to str before they get to agate, or else\n # agate will incorrectly decide they are booleans.\n column_values = (\n 'Number of bytes',\n str(table.num_bytes),\n 'The number of bytes this table consumes',\n relation_type == 'table',\n\n 'Number of rows',\n str(table.num_rows),\n 'The number of rows in this table',\n relation_type == 'table',\n\n 'Location',\n table.location,\n 'The geographic location of this table',\n True,\n\n 'Partitioning Type',\n table.partitioning_type,\n 'The partitioning type used for this table',\n relation_type == 'table',\n\n 'Clustering Fields',\n clustering_value,\n 'The clustering fields for this table',\n relation_type == 'table',\n )\n return zip(column_names, column_values)\n\n def get_catalog(self, manifest):\n connection = self.get_connection('catalog')\n client = connection.handle\n\n schemas = {\n node.to_dict()['schema']\n for node in manifest.nodes.values()\n }\n\n column_names = (\n 'table_schema',\n 'table_name',\n 'table_type',\n 'table_comment',\n # does not exist in bigquery, but included for consistency\n 'table_owner',\n 'column_name',\n 'column_index',\n 'column_type',\n 'column_comment',\n )\n all_names = column_names + self._get_stats_column_names()\n columns = []\n\n for schema_name in schemas:\n relations = self.list_relations(schema_name)\n for relation in relations:\n\n # This relation contains a subset of the info we care about.\n # Fetch the full table object here\n dataset_ref = client.dataset(relation.schema)\n table_ref = dataset_ref.table(relation.identifier)\n table = client.get_table(table_ref)\n\n flattened = self._flat_columns_in_table(table)\n relation_stats = dict(self._get_stats_columns(table,\n relation.type))\n\n for index, column in enumerate(flattened, start=1):\n column_data = (\n relation.schema,\n relation.name,\n relation.type,\n None,\n None,\n column.name,\n index,\n column.data_type,\n None,\n )\n column_dict = dict(zip(column_names, column_data))\n column_dict.update(copy.deepcopy(relation_stats))\n\n columns.append(column_dict)\n\n return dbt.clients.agate_helper.table_from_data(columns, all_names)\n", "repo_name": "alexpatton/dbt", "sub_path": "dbt/adapters/bigquery/impl.py", "file_name": "impl.py", "file_ext": "py", "file_size_in_byte": 25792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "50", "api": [{"api_name": "dbt.adapters.postgres.PostgresAdapter", "line_number": 29, "usage_type": "name"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation.Table", "line_number": 65, "usage_type": "attribute"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation", "line_number": 65, "usage_type": "name"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation.View", "line_number": 66, "usage_type": "attribute"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation", "line_number": 66, "usage_type": "name"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation.External", "line_number": 67, "usage_type": "attribute"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation", "line_number": 67, "usage_type": "name"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation", "line_number": 71, "usage_type": "name"}, {"api_name": "dbt.compat.schema", "line_number": 72, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 72, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 76, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 77, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 77, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.DatabaseException", "line_number": 81, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 81, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 81, "usage_type": "name"}, {"api_name": "google.auth.cloud", "line_number": 89, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 89, "usage_type": "name"}, {"api_name": "google.auth.cloud", "line_number": 93, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 93, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 98, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 98, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 99, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 99, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.RuntimeException", "line_number": 100, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 100, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 100, "usage_type": "name"}, {"api_name": "dbt.compat.compat.to_string", "line_number": 100, "usage_type": "call"}, {"api_name": "dbt.compat.compat", "line_number": 100, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 83, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.NotImplementedException", "line_number": 118, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 118, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 118, "usage_type": "name"}, {"api_name": "google.auth.oauth2", "line_number": 124, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 124, "usage_type": "name"}, {"api_name": "google.auth.auth.default", "line_number": 127, "usage_type": "call"}, {"api_name": "google.auth.auth", "line_number": 127, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 127, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.FailedToConnectException", "line_number": 139, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 139, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 139, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.Client", "line_number": 146, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 146, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 146, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 151, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 151, "usage_type": "name"}, {"api_name": "google.auth.auth", "line_number": 157, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 157, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.info", "line_number": 158, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 158, "usage_type": "name"}, {"api_name": "dbt.compat.clients.gcloud.setup_default_credentials", "line_number": 159, "usage_type": "call"}, {"api_name": "dbt.compat.clients", "line_number": 159, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 159, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 165, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 165, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.FailedToConnectException", "line_number": 171, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 171, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 171, "usage_type": "name"}, {"api_name": "google.auth.api_core", "line_number": 208, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 208, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.NotImplementedException", "line_number": 236, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 236, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 236, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.NotImplementedException", "line_number": 240, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 240, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 240, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.Table", "line_number": 257, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 257, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 257, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 261, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 261, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 274, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions.RuntimeException", "line_number": 278, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 278, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 278, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.Table", "line_number": 290, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 290, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 290, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.QueryJobConfig", "line_number": 309, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 309, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 309, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 313, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 313, "usage_type": "name"}, {"api_name": "dbt.flags.STRICT_MODE", "line_number": 330, "usage_type": "attribute"}, {"api_name": "dbt.flags", "line_number": 330, "usage_type": "name"}, {"api_name": "dbt.contracts.connection.Connection", "line_number": 332, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions.RuntimeException", "line_number": 347, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 347, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 347, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 355, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 355, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.QueryJobConfig", "line_number": 357, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 357, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 357, "usage_type": "name"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation.Table", "line_number": 381, "usage_type": "attribute"}, {"api_name": "dbt.adapters.bigquery.relation.BigQueryRelation", "line_number": 381, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 385, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 385, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.Table", "line_number": 399, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 399, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 399, "usage_type": "name"}, {"api_name": "dbt.compat.clients.agate_helper.empty_table", "line_number": 408, "usage_type": "call"}, {"api_name": "dbt.compat.clients", "line_number": 408, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 408, "usage_type": "name"}, {"api_name": "dbt.compat.clients.agate_helper.table_from_data", "line_number": 422, "usage_type": "call"}, {"api_name": "dbt.compat.clients", "line_number": 422, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 422, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 433, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 433, "usage_type": "name"}, {"api_name": "google.auth.api_core", "line_number": 443, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 443, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 455, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 455, "usage_type": "name"}, {"api_name": "google.auth.cloud", "line_number": 491, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 491, "usage_type": "name"}, {"api_name": "dbt.logger.GLOBAL_LOGGER.debug", "line_number": 492, "usage_type": "call"}, {"api_name": "dbt.logger.GLOBAL_LOGGER", "line_number": 492, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.Dataset", "line_number": 519, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 519, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 519, "usage_type": "name"}, {"api_name": "google.auth.cloud", "line_number": 544, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 544, "usage_type": "name"}, {"api_name": "dbt.compat.ui.printer.print_timestamped_line", "line_number": 550, "usage_type": "call"}, {"api_name": "dbt.compat.ui", "line_number": 550, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 550, "usage_type": "name"}, {"api_name": "dbt.compat.ui", "line_number": 551, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 551, "usage_type": "name"}, {"api_name": "dbt.compat.exceptions.NotImplementedException", "line_number": 558, "usage_type": "call"}, {"api_name": "dbt.compat.exceptions", "line_number": 558, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 558, "usage_type": "name"}, {"api_name": "agate.MaxPrecision", "line_number": 583, "usage_type": "call"}, {"api_name": "google.auth.cloud.bigquery.SchemaField", "line_number": 601, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 601, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 601, "usage_type": "name"}, {"api_name": "google.auth.cloud.bigquery.LoadJobConfig", "line_number": 612, "usage_type": "call"}, {"api_name": "google.auth.cloud", "line_number": 612, "usage_type": "attribute"}, {"api_name": "google.auth", "line_number": 612, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 743, "usage_type": "call"}, {"api_name": "dbt.compat.clients.agate_helper.table_from_data", "line_number": 747, "usage_type": "call"}, {"api_name": "dbt.compat.clients", "line_number": 747, "usage_type": "attribute"}, {"api_name": "dbt.compat", "line_number": 747, "usage_type": "name"}]} +{"seq_id": "74668010085", "text": "import praw\nimport random\nfrom random import randint\nimport requests\nimport shutil\nimport os\n\n\n\n\nreddit = praw.Reddit(client_id = \"*\",\n client_secret= \"*\",\n username=\"DaT1dUdE05\",\n password= \"*\",\n user_agent=\"pythonmeme\",\n check_for_async=False\n )\n\nall_subred = []\n\nsubreddit1 = reddit.subreddit(\"dankmemes\")\nsubreddit2 = reddit.subreddit(\"meme\")\nsubreddit3 = reddit.subreddit(\"memes\")\nsubreddit4 = reddit.subreddit(\"HistoryMemes\")\nall_subred.append(subreddit1)\nall_subred.append(subreddit2)\nall_subred.append(subreddit3)\nall_subred.append(subreddit4)\n\nsubreddit = random.choice(all_subred)\n\nall_subs= []\n\nfile_path1 = r'C:\\Memes\\meme1.gif'\nfile_path2 = r'C:\\Memes\\meme1.jpg'\nfile_path3 = r'C:\\Memes\\meme2.gif'\nfile_path4 = r'C:\\Memes\\meme2.jpg'\nfile_path5 = r'C:\\Memes\\meme3.gif'\nfile_path6 = r'C:\\Memes\\meme3.jpg'\nfile_path7 = r'C:\\Memes\\meme4.gif'\nfile_path8 = r'C:\\Memes\\meme4.jpg'\nfile_path9 = r'C:\\Memes\\meme5.gif'\nfile_path10 = r'C:\\Memes\\meme5.jpg'\nif os.path.exists(file_path1):\n os.remove(file_path1)\nif os.path.exists(file_path2):\n os.remove(file_path2)\nif os.path.exists(file_path3):\n os.remove(file_path3)\nif os.path.exists(file_path4):\n os.remove(file_path4)\nif os.path.exists(file_path5):\n os.remove(file_path5)\nif os.path.exists(file_path6):\n os.remove(file_path6)\nif os.path.exists(file_path7):\n os.remove(file_path7)\nif os.path.exists(file_path8):\n os.remove(file_path8)\nif os.path.exists(file_path9):\n os.remove(file_path9)\nif os.path.exists(file_path10):\n os.remove(file_path10)\n\ncount = 1\nfor i in range(5):\n\n\n top = subreddit.hot(limit = 100)\n\n for submission in top:\n all_subs.append(submission)\n\n random_sub = random.choice(all_subs)\n\n print(random_sub.url[-3:])\n\n res = requests.get(random_sub.url, stream = True)\n\n if res.status_code == 200:\n if random_sub.url[-3:] == \"gif\":\n with open(\"C:\\Memes\\meme\"+str(count)+\".gif\",'wb') as f:\n shutil.copyfileobj(res.raw, f)\n elif (random_sub.url[-3:] == \"jpg\") or (random_sub.url[-3:] == \"png\"):\n with open(\"C:\\Memes\\meme\"+str(count)+\".jpg\",'wb') as f:\n shutil.copyfileobj(res.raw, f)\n else:\n continue\n count+=1\n\n print(random_sub.url)\n print(\"Done.\")\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\nfrom selenium.webdriver.chrome.service import Service\n\n\n\n \npath=Service(\"C:\\chromedriver_win32 (1)\\chromedriver.exe\")\ndriver = webdriver.Chrome(service=path)\n\ndriver.get('https://app.clipchamp.com/login')\n\n\n\n\n\n", "repo_name": "aravindanew/YoutubeBot", "sub_path": "memeUploader.py", "file_name": "memeUploader.py", "file_ext": "py", "file_size_in_byte": 2731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "praw.Reddit", "line_number": 11, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 63, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 74, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 78, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 83, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 86, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 104, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 105, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "38767719074", "text": "import datetime\nimport time\nimport aiosqlite\nfrom builtins import bot, slash\nimport Utils.utils as utils\nimport Cogs.languages as languages\nfrom Cogs import council\nfrom Cogs import get_scp\nimport discord\nimport datetime\nimport random\nfrom discord.ext import commands\nfrom dislash import Option, Button, ActionRow, ButtonStyle\n\n\n\nclass SCP_Slash(commands.Cog):\n place = 0\n def __init__(self):\n self.bot = bot\n\n @slash.command(name=\"scp\", description=\"Get Info on ANY SCP of your Choice\" , options=[Option(\"scp_number\", \"Enter the SCP Number (001 - 5999)\")])\n async def scp(self, ctx, scp_number):\n if isinstance(ctx.channel, discord.DMChannel):\n language = \"english\"\n else:\n language = await languages.get_language(ctx.guild.id)\n\n scp_int = int(scp_number)\n\n if 100 > scp_int >= 10:\n scp_number = f\"0{scp_int}\"\n elif 1 < scp_int < 10:\n scp_number = f\"00{scp_int}\"\n elif scp_int == 1:\n return\n\n x = get_scp.get_scp(scp_number, language)\n\n text_lists = []\n scp_len = len(x)\n scp_count = 0\n \n while scp_count < scp_len:\n scp_string = x[scp_count:scp_count + 2031]\n text_lists.append(scp_string)\n scp_count += 2031\n\n embed_list = []\n\n\n for x in range(0, len(text_lists)):\n embed_scp = None\n if x != len(text_lists) - 1:\n embed_scp = discord.Embed(\n title=f'SCP-{scp_number}', url=fr\"{utils.langauge_to_website[language]}scp-{scp_number}\",\n description=text_lists[x] + '... **Read More**',\n colour=discord.Colour(0x992d22))\n else:\n embed_scp = discord.Embed(\n title=f'SCP-{scp_number}', url=fr\"{utils.langauge_to_website[language]}scp-{scp_number}\", description=text_lists[x],\n colour=discord.Colour(0x992d22))\n\n embed_list.append(embed_scp)\n\n row = ActionRow(\n Button(\n style=ButtonStyle.green,\n label=\"Back\",\n custom_id=\"back_page\"\n ),\n Button(\n style=ButtonStyle.red,\n label=\"Reset\",\n custom_id=\"restart\"\n ),\n Button(\n style=ButtonStyle.green,\n label=\"Next\",\n custom_id=\"next_page\"\n )\n )\n\n\n message = await ctx.send(embed=embed_list[0], components=[row])\n\n on_click = message.create_click_listener(timeout=60)\n\n @on_click.not_from_user(ctx.author, cancel_others=True, reset_timeout=False)\n async def on_wrong_user(inter):\n\n # Reply with a hidden message\n await inter.reply(\"You're not the author\", ephemeral=True)\n\n @on_click.matching_id(\"back_page\")\n async def on_back_page(ctx):\n if SCP_Slash.place > 0:\n SCP_Slash.place -= 1\n await ctx.reply(embed=embed_list[SCP_Slash.place], type=7)\n\n \n @on_click.matching_id(\"restart\")\n async def on_reset(ctx):\n SCP_Slash.place = 0\n await ctx.reply(embed=embed_list[SCP_Slash.place], type=7)\n \n @on_click.matching_id(\"next_page\")\n async def on_next_page(ctx):\n if SCP_Slash.place < len(embed_list) - 1:\n SCP_Slash.place += 1\n await ctx.reply(embed=embed_list[SCP_Slash.place], type=7)\n\n @on_click.timeout\n async def on_timeout():\n await message.edit(components=[])\n\n\n @scp.error\n async def scp_error(self, ctx, error):\n print(error)\n embed_scp_error = discord.Embed(\n title=':octagonal_sign:Oops!',\n description='You might have missed an argument or put an invalid number in! Try `\\'scp {001 - 5999}`',\n colour=discord.Colour(0x992d22))\n await ctx.send(embed=embed_scp_error)", "repo_name": "Tqter/SCP-Info", "sub_path": "Slash/scp_slash.py", "file_name": "scp_slash.py", "file_ext": "py", "file_size_in_byte": 3941, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 17, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 17, "usage_type": "name"}, {"api_name": "builtins.bot", "line_number": 20, "usage_type": "name"}, {"api_name": "discord.DMChannel", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Cogs.languages.get_language", "line_number": 27, "usage_type": "call"}, {"api_name": "Cogs.languages", "line_number": 27, "usage_type": "name"}, {"api_name": "Cogs.get_scp.get_scp", "line_number": 38, "usage_type": "call"}, {"api_name": "Cogs.get_scp", "line_number": 38, "usage_type": "name"}, {"api_name": "discord.Embed", "line_number": 55, "usage_type": "call"}, {"api_name": "Utils.utils.langauge_to_website", "line_number": 56, "usage_type": "attribute"}, {"api_name": "Utils.utils", "line_number": 56, "usage_type": "name"}, {"api_name": "discord.Colour", "line_number": 58, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 60, "usage_type": "call"}, {"api_name": "Utils.utils.langauge_to_website", "line_number": 61, "usage_type": "attribute"}, {"api_name": "Utils.utils", "line_number": 61, "usage_type": "name"}, {"api_name": "discord.Colour", "line_number": 62, "usage_type": "call"}, {"api_name": "dislash.ActionRow", "line_number": 66, "usage_type": "call"}, {"api_name": "dislash.Button", "line_number": 67, "usage_type": "call"}, {"api_name": "dislash.ButtonStyle.green", "line_number": 68, "usage_type": "attribute"}, {"api_name": "dislash.ButtonStyle", "line_number": 68, "usage_type": "name"}, {"api_name": "dislash.Button", "line_number": 72, "usage_type": "call"}, {"api_name": "dislash.ButtonStyle.red", "line_number": 73, "usage_type": "attribute"}, {"api_name": "dislash.ButtonStyle", "line_number": 73, "usage_type": "name"}, {"api_name": "dislash.Button", "line_number": 77, "usage_type": "call"}, {"api_name": "dislash.ButtonStyle.green", "line_number": 78, "usage_type": "attribute"}, {"api_name": "dislash.ButtonStyle", "line_number": 78, "usage_type": "name"}, {"api_name": "builtins.slash.command", "line_number": 22, "usage_type": "call"}, {"api_name": "builtins.slash", "line_number": 22, "usage_type": "name"}, {"api_name": "dislash.Option", "line_number": 22, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 121, "usage_type": "call"}, {"api_name": "discord.Colour", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "30978170420", "text": "import math\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndef cut_batch(tensor, batch_size, model):\n '''\n tensor - len x long x *\n cut long into batch_sizes and feed in model\n '''\n lon = tensor.shape[1]\n num = int(math.ceil(float(lon) / batch_size))\n outs = []\n for j in range(num):\n start = j*batch_size\n end = min((j+1)*batch_size, lon)\n ten = tensor[:,start:end] # len x batch x *\n out = model(ten) # len x batch x *\n outs.append(out)\n outs = torch.cat(outs, dim=1) # len x long x *\n return outs \n\ndef run_pretrain_transformer(conj, deps, step, neg_step, pt, percent, device, loss_fn, return_outputs=False):\n '''\n pretrain using random mask char\n conj - batch x clen\n deps - [num_dep x dlen], len=batch\n step - [num_step x slen], len=batch\n pt - pretrain_transformer\n percent - masking percent\n '''\n # turn into tensors\n conj = torch.LongTensor(conj).to(device).transpose(0,1) # clen x batch\n b=conj.shape[1]\n deps = torch.cat([torch.LongTensor(d[random.randint(0, d.shape[0]-1)]).unsqueeze(1) for d in deps], dim=1).to(device) # 256 x b, randomly choose 1 for each sample\n step = torch.cat([torch.LongTensor(s[random.randint(0, s.shape[0]-1)]).unsqueeze(1) for s in step], dim=1).to(device) # 256 x b, randomly choose 1 for each sample\n neg_step = torch.cat([torch.LongTensor(s[random.randint(0, s.shape[0]-1)]).unsqueeze(1) for s in neg_step], dim=1).to(device) # 256 x b, randomly choose 1 for each sample\n\n sample = torch.cat([conj,deps,step,neg_step],dim=1) # 256 x 4b\n\n encoder = nn.DataParallel(pt['encoder'],[0,1,3],dim=1)\n decoder = nn.DataParallel(pt['decoder'],[0,1,3],dim=1)\n\n while 1:\n mask = torch.ones(sample.shape[0]*sample.shape[1]).long().to(device)\n mask[:int(math.ceil(len(mask)*percent))]=0\n mask = mask[torch.randperm(len(mask))]\n mask = mask.reshape(sample.shape)\n masked_sample = sample*mask\n if torch.sum(masked_sample != sample)>0 and masked_sample.sum(0).min() > 0:\n break\n hidden = encoder(masked_sample) # len x batch x channel\n output = decoder(hidden) # len x batch x vocab_size\n if return_outputs:\n return sample, mask, output\n res = loss_fn(sample, output, mask)\n if res == 0.0:\n return None\n loss, corrects, total = res\n pt['encoder'] = encoder.module\n pt['decoder'] = decoder.module\n return loss, corrects, total\n\ndef run_step_cls_transformer(conj, deps, step, labels, sct, use_deps, device, loss_fn):\n '''\n int-encoded inputs, 0 padded, numpy\n conj - batch x clen\n deps - [num_dep x dlen], len=batch\n step - batch x slen\n sct - step_cls_transformer\n use_deps - bool, used deps or not\n '''\n # turn into tensors\n conj = torch.LongTensor(conj).to(device).transpose(0,1) # clen x batch\n step = torch.LongTensor(step).to(device).transpose(0,1) # (1+slen) x batch\n b=conj.shape[1]\n if use_deps:\n deps = torch.cat([torch.LongTensor(d[random.randint(0, d.shape[0]-1)]).unsqueeze(1) for d in deps], dim=1).to(device) # 256 x b, randomly choose 1 for each sample\n\n conj_encoder = nn.DataParallel(sct['conj_encoder'],[0,1,3],dim=1)\n deps_encoder = nn.DataParallel(sct['deps_encoder'],[0,1,3],dim=1)\n step_decoder = nn.DataParallel(sct['step_decoder'],[0,1,3],dim=1)\n\n encoded_conj = conj_encoder(conj) # clen x batch x channel\n\n if use_deps:\n encoded_deps = deps_encoder(deps) # dlen x batch x channel\n memory = torch.cat([encoded_conj,encoded_deps], dim=0) # (256*2) x batch x channel\n else:\n memory = encoded_conj\n\n outputs = step_decoder(step ,memory, mask_tgt=True)[0] # batch x cls_num\n labels = torch.LongTensor(labels).to(device)\n loss, corrects, total = loss_fn(outputs, labels)\n return loss, corrects, total\n\ndef run_step_gen_transformer(conj, deps, step, sgt, d_model, device, loss_fn, return_outputs=False):\n '''\n conj - batch x clen\n deps - [num_dep x dlen], len=batch\n sgt - step_gen_transformer\n '''\n # turn into tensors\n conj = torch.LongTensor(conj).to(device).transpose(0,1) # clen x batch\n deps = torch.cat([torch.LongTensor(d[random.randint(0, d.shape[0]-1)]).unsqueeze(1) for d in deps], dim=1).to(device) # 256 x b, randomly choose 1 for each sample\n\n conj_encoder = nn.DataParallel(sgt['conj_encoder'],[0,1,3],dim=1)\n deps_encoder = nn.DataParallel(sgt['deps_encoder'],[0,1,3],dim=1)\n step_decoder = nn.DataParallel(sgt['step_decoder'],[0,1,3],dim=1)\n\n b=conj.shape[1]\n\n encoded_conj = conj_encoder(conj) # clen x batch x channel\n encoded_deps = deps_encoder(deps) # dlen x batch x channel\n\n memory = torch.cat([encoded_conj,encoded_deps],dim=0) # (256*2) x batch x channel\n\n tgt = torch.randn(conj.shape[0], b, d_model).to(device)\n outputs = step_decoder(tgt ,memory, mask_tgt=False) # tlen x batch x cls_num\n if return_outputs:\n return conj, deps, outputs\n else:\n step = [torch.LongTensor(s).to(device).transpose(0,1) for s in step] # [tlen' x num_dep]\n loss, corrects, total = loss_fn(outputs, step)\n return loss, corrects, total \n\nif __name__ == '__main__':\n from data_utils import DataParser\n from model import build_pretrain_transformer, build_step_cls_transformer, build_step_gen_transformer\n from loss import *\n\n dataparser = DataParser('../holstep', max_len=256, use_tokens=False, verbose=True, saved_vocab='vocab.pkl', saved_train_conj='train_conj.pkl', saved_val_conj='val_conj.pkl', saved_test_conj='test_conj.pkl', saved_max_len=57846)\n\n pre_train_gen = dataparser.conj_generator(split='train', batch_size=1, shuffle=True, load_neg_steps=True)\n pre_val_gen = dataparser.conj_generator(split='val', batch_size=1, shuffle=False, load_neg_steps=True)\n\n cls_train_gen= dataparser.steps_generator(split='train', batch_size=1, shuffle=True)\n cls_val_gen= dataparser.steps_generator(split='val', batch_size=1, shuffle=False)\n\n gen_train_gen= dataparser.conj_generator(split='train', batch_size=1, shuffle=True, load_neg_steps=False)\n gen_val_gen= dataparser.conj_generator(split='val', batch_size=1, shuffle=False, load_neg_steps=False)\n\n d_model = 8\n n_head=8\n n_hid=16\n n_layers=6\n pt = build_pretrain_transformer(dataparser.vocab_size+3, dataparser.max_len,d_model, n_head, n_hid, n_layers) # ,,\n sct = build_step_cls_transformer(dataparser.vocab_size+3, dataparser.max_len,d_model, n_head, n_hid, n_layers)\n sgt = build_step_gen_transformer(dataparser.vocab_size+3, dataparser.max_len, d_model, n_head, n_hid, n_layers)\n\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n '''\n for i, data in enumerate(cls_train_gen):\n print(i)\n loss = run_step_cls_transformer(*data, sct, device, cls_loss)\n break\n for i, data in enumerate(gen_train_gen):\n print(i)\n loss = run_step_gen_transformer(*data, sgt, d_model, device, gen_loss)\n break\n '''\n for i, data in enumerate(pre_train_gen):\n print(i)\n loss = run_pretrain_transformer(*data, pt, 0.2, device, pretrain_loss)\n break\n", "repo_name": "arthurhero/HOL-transformer", "sub_path": "run_models.py", "file_name": "run_models.py", "file_ext": "py", "file_size_in_byte": 7217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "math.ceil", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 39, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 47, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 80, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 107, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.DataParallel", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 125, "usage_type": "call"}, {"api_name": "data_utils.DataParser", "line_number": 134, "usage_type": "call"}, {"api_name": "model.build_pretrain_transformer", "line_number": 149, "usage_type": "call"}, {"api_name": "model.build_step_cls_transformer", "line_number": 150, "usage_type": "call"}, {"api_name": "model.build_step_gen_transformer", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 153, "usage_type": "attribute"}]} +{"seq_id": "28937245899", "text": "import configparser\r\nimport os\r\nimport re\r\nimport subprocess\r\nimport sys\r\nimport threading\r\nimport time\r\n\r\nimport requests\r\nfrom pykeyboard import *\r\nimport datetime\r\n\r\nepisodes = []\r\nPID = ''\r\n\r\ndef d11():\r\n time.sleep(5)\r\n k = PyKeyboard()\r\n k.press_key(k.alt_key)\r\n k.press_key(k.control_key)\r\n k.tap_key(k.function_keys[6])\r\n k.release_key(k.alt_key)\r\n k.release_key(k.control_key)\r\n\r\ndef appends(url, token, domain, userId, persist):\r\n time.sleep(3)\r\n itemId = url.split('/')[7]\r\n response = requests.get(domain + '/emby/Items?Ids=' + itemId + '&api_key=' + token)\r\n global episodes\r\n episodes.append(url.replace('emby://', '').replace('%20', ' '))\r\n try:\r\n seriesId = response.json()['Items'][0]['SeriesId']\r\n seasonId = response.json()['Items'][0]['SeasonId']\r\n response = requests.get(domain + '/emby/Shows/' + seriesId + '/Episodes?SeasonId=' + seasonId + '&StartItemId=' + itemId + '&api_key=' + token)\r\n for item in response.json()['Items']:\r\n if item['Id'] == itemId:\r\n continue\r\n response = requests.get(domain + '/emby/Items/' + item['Id'] + '/PlaybackInfo?api_key=' + token)\r\n Container = response.json()['MediaSources'][0]['Container']\r\n episode = domain + '/emby/videos/' + item['Id'] + '/stream.' + Container + '?Static=true&api_key=' + token\r\n response = requests.get(domain + '/emby/Users/' + userId + '/Items/' + item['Id'] + '?&api_key=' + token)\r\n ifSub = False\r\n for media in response.json()[\"MediaSources\"][0]['MediaStreams']:\r\n if media[\"Type\"] == \"Subtitle\":\r\n try:\r\n path = media[\"Path\"]\r\n ifSub = True\r\n index = media[\"Index\"]\r\n except:\r\n pass\r\n if ifSub:\r\n subtitle = domain + '/emby/videos/' + item['Id'] + response.json()['MediaSources'][0]['Id'] + '/Subtitles/' + str(index) + '/Stream.' + path.split('.')[-1] + '?api_key=7753ae02845a4e968eeb39aca46c03e3'\r\n else:\r\n subtitle = ''\r\n for line in os.popen('tasklist /V /FI \"IMAGENAME eq PotPlayer64.exe\"').readlines():\r\n if r'PotPlayer64.exe' in line:\r\n if 'stream' in line:\r\n print(line)\r\n os.system(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.exe \"' + episode + '\" /sub=\"' + subtitle + '\" /add')\r\n episodes.append(episode)\r\n print(episodes)\r\n time.sleep(1)\r\n except:\r\n pass\r\n\r\nt = threading.Thread(target=d11)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n config = configparser.ConfigParser()\r\n config.read(os.path.split(os.path.realpath(__file__))[0] + '\\\\config.ini')\r\n token = config.get('baseconf', 'token')\r\n userName = config.get('baseconf', 'userName')\r\n persist = config.get('baseconf', 'persist')\r\n url = sys.argv[1]\r\n # with open(os.path.split(os.path.realpath(__file__))[0] + '\\\\tmp.log', 'w') as file:\r\n # file.write(url)\r\n try:\r\n domain = re.search(r'(http://.+?)/emby', url).group(1)\r\n except:\r\n domain = re.search(r'(https://.+?)/emby', url).group(1)\r\n session = requests.session()\r\n response = session.get(domain + '/emby/users?api_key=' + token, proxies={'http': None, 'https': None})\r\n for item in response.json():\r\n if item['Name'] == userName:\r\n userId = item['Id']\r\n break\r\n t2 = threading.Thread(target=appends, args=(url, token, domain, userId, persist,))\r\n url = url.replace('emby://', '').replace('%20', ' ')\r\n if 'hdr' in url:\r\n config1= configparser.RawConfigParser()\r\n config1.optionxform = lambda option: option\r\n config1.read(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.ini', encoding=\"utf-16\")\r\n config1.set('Settings', 'VideoRen2', '15')\r\n subprocess.Popen(persist + '\\\\apps\\\\emby2potplayer\\\\current\\\\HDRSwitch.exe')\r\n config1.write(open(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.ini', \"w\", encoding=\"utf-16\"), space_around_delimiters=False)\r\n else:\r\n config1= configparser.RawConfigParser()\r\n config1.optionxform = lambda option: option\r\n config1.read(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.ini', encoding=\"utf-16\")\r\n config1.remove_option('Settings', 'VideoRen2')\r\n config1.write(open(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.ini', \"w\", encoding=\"utf-16\"), space_around_delimiters=False)\r\n t.setDaemon(True)\r\n t.start()\r\n url = url.replace(' hdr', '')\r\n time.sleep(1.5)\r\n t2.setDaemon(True)\r\n t2.start()\r\n os.system(persist + '\\\\apps\\\\potplayer\\\\current\\\\PotPlayer64.exe \"' + url + '\"')\r\n os.system('taskkill /F /IM HDRSwitch.exe')\r\n os.system('taskkill /F /IM PotPlayer64.exe')\r\n with open(persist + '\\\\persist\\\\potplayer\\\\Playlist\\\\PotPlayer64.dpl', 'r', encoding='utf-8-sig') as file:\r\n content = file.read()\r\n playList = []\r\n for i in range(len(episodes)):\r\n playList.append(re.search(str(i + 1) + r'\\*file\\*(.+?)\\n', content))\r\n for i in range(len(episodes)):\r\n if playList[i] != None:\r\n state = 'Unknown'\r\n try:\r\n endtime = int(re.search(str(i + 1) + r'\\*duration2\\*(\\d+)', content).group(1))\r\n try:\r\n playtime = int(re.search(str(i + 1) + r'\\*start\\*(\\d+)', content).group(1))\r\n if endtime - playtime > 180000:\r\n state = 'Progress'\r\n duration = playtime\r\n else:\r\n state = 'Finished'\r\n except:\r\n playtime = re.search(str(i + 1) + r'\\*played\\*(\\d)', content)\r\n if playtime != None:\r\n state = 'Finished'\r\n elif len(episodes) == 1:\r\n state = 'Finished'\r\n except:\r\n state = 'UnPlayed'\r\n item = episodes[i].split('/')[5]\r\n if state == 'Progress':\r\n response = session.get(domain + '/emby/Users/' + userId + '/Items?Ids=' + item + '&api_key=' + token)\r\n response = session.post(domain + '/emby/users/' + userId + '/Items/' + item + '/UserData?api_key=' + token, data = {'PlaybackPositionTicks': duration * 10000, 'Played': response.json()['Items'][0]['UserData']['Played'], \"LastPlayedDate\": datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.0000000+00:00\")}, proxies={'http': None, 'https': None})\r\n elif state == 'Finished':\r\n response = session.post(domain + '/emby/Users/' + userId + '/PlayedItems/' + item + '?api_key=' + token, proxies={'http': None, 'https': None})\r\n print(response.text)\r\n", "repo_name": "chenguang217/bucketFiles", "sub_path": "emby2potplayer.py", "file_name": "emby2potplayer.py", "file_ext": "py", "file_size_in_byte": 7012, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 55, "usage_type": "call"}, {"api_name": "os.system", "line_number": 59, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 66, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 71, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 79, "usage_type": "call"}, {"api_name": "re.search", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 82, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 88, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 95, "usage_type": "call"}, {"api_name": "configparser.RawConfigParser", "line_number": 98, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 106, "usage_type": "call"}, {"api_name": "os.system", "line_number": 109, "usage_type": "call"}, {"api_name": "os.system", "line_number": 110, "usage_type": "call"}, {"api_name": "os.system", "line_number": 111, "usage_type": "call"}, {"api_name": "re.search", "line_number": 116, "usage_type": "call"}, {"api_name": "re.search", "line_number": 121, "usage_type": "call"}, {"api_name": "re.search", "line_number": 123, "usage_type": "call"}, {"api_name": "re.search", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 140, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 140, "usage_type": "attribute"}]} +{"seq_id": "10888900704", "text": "import os\nimport requests\nimport json\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nurl = 'https://id.indeed.com/jobs'\nsite = 'https://id.indeed.com'\nparams = {\n 'q': 'python developer',\n 'l': 'jakarta',\n 'vjk': 'b8bc9c3e08bc8ae3'\n}\nheaders = {'User Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:99.0) Gecko/20100101 Firefox/99.0'}\nres = requests.get(url, params=params, headers=headers)\n\"\"\"\nprint(res.headers)\nScraping step\nsoup = BeautifulSoup(res.text,'html.parser')\nprint(soup.prettify())\n\"\"\"\n\n\ndef get_total_pages(query, location):\n params = {\n 'q': query,\n 'l': location,\n 'vjk': 'b8bc9c3e08bc8ae3'\n }\n res = requests.get(url, params=params, headers=headers)\n #create folder dan file\n try:\n os.mkdir('temp')\n except FileExistsError:\n pass\n with open('temp/res.html', \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(res.text)\n outfile.close()\n total_pages = []\n #Scarping step\n soup = BeautifulSoup(res.text, 'html.parser')\n pagination = soup.find('ul', 'pagination-list')\n pages = pagination.find_all('li')\n for page in pages:\n total_pages.append(page.text)\n print(total_pages)\n total = int(max(total_pages))\n return total\n\n\ndef get_items(query, location, start, page):\n params = {\n 'q': query,\n 'l': location,\n 'start': start,\n 'vjk': 'b8bc9c3e08bc8ae3'\n }\n res = requests.get(url, params=params, headers=headers)\n with open('temp/res.html', \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(res.text)\n outfile.close()\n soup = BeautifulSoup(res.text, 'html.parser')\n#Scraping proses\n contents = soup.find_all('table', 'jobCard_mainContent')\n job_list = []\n for item in contents:\n title = item.find('h2', 'jobTitle').text\n company = item.find('span','companyName')\n company_name = company.text\n try:\n company_link = site + company.find('a')['href']\n except:\n company_link = 'Link Tidak Ditemukan'\n #sorting data by dictionary\n data_dict = {\n 'Title': title,\n 'Company Name': company_name,\n 'Link': company_link\n }\n job_list.append(data_dict)\n #export to json\n try:\n os.mkdir('json_result')\n except FileExistsError:\n pass\n with open('json_result/job_list.json', 'w+') as json_data:\n json.dump(job_list, json_data)\n return job_list\n #print('File Json sudah dibuat')\n\n\"\"\"\n #export to csv\n df = pd.DataFrame(job_list)\n df.to_csv('indeed_data.csv', index=False)\n df.to_excel('indeed_data.xlsx', index=False)\n print('Export csv dan excel berhasil')\n\"\"\"\n#Create document for dynamic scraping\ndef create_document(dataframe, filename):\n try:\n os.mkdir('data_result')\n except FileExistsError:\n pass\n\n df = pd.DataFrame(dataframe)\n df.to_csv(f'data_result/{filename}.csv', index=False)\n df.to_excel(f'data_result/{filename}.xlsx', index=False)\n\n print(f'File {filename}.csv dan {filename}.xlsx Berhasil dibuat ')\n\n\n#create Function Run\ndef run():\n query = input('Masukan Key pencarian : ')\n location = input('Masukan lokasi :')\n\n total = get_total_pages(query, location)\n counter = 0\n final_result = []\n for page in range(total):\n page += 1\n counter += 10\n final_result += get_items(query, location, counter, page)\n\n ##formating data\n try:\n os.mkdir('reports')\n except FileExistsError:\n pass\n\n with open('reports/{}.json'. format(query), 'w+') as final_data:\n json.dump(final_result, final_data)\n print('Data Json sudah dibuat')\n\n #Create Document\n create_document(final_result, query)\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "hardiyan46/scraping", "sub_path": "indeed.py", "file_name": "indeed.py", "file_ext": "py", "file_size_in_byte": 3802, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 58, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 83, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 87, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 101, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 127, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 132, "usage_type": "call"}]} +{"seq_id": "40176921216", "text": "from rfvision.components.roi_heads import StandardRoIHead\nfrom rfvision.models.builder import HEADS\nimport torch\nfrom rfvision.core import bbox2roi\n\n\n@HEADS.register_module()\nclass DCTRoIHead(StandardRoIHead):\n \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n img_metas):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(x, pos_rois, decode=False)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n\n mask_results = self._mask_forward(\n x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n self.train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n return mask_results\n\n def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None, decode=True):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n assert ((rois is not None) ^\n (pos_inds is not None and bbox_feats is not None))\n if rois is not None:\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n assert bbox_feats is not None\n mask_feats = bbox_feats[pos_inds]\n\n mask_pred = self.mask_head(mask_feats, decode=decode)\n mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n return mask_results", "repo_name": "mvig-robotflow/rfvision", "sub_path": "rfvision/components/roi_heads/dct_roi_head.py", "file_name": "dct_roi_head.py", "file_ext": "py", "file_size_in_byte": 2584, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rfvision.components.roi_heads.StandardRoIHead", "line_number": 8, "usage_type": "name"}, {"api_name": "rfvision.core.bbox2roi", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 38, "usage_type": "call"}, {"api_name": "rfvision.models.builder.HEADS.register_module", "line_number": 7, "usage_type": "call"}, {"api_name": "rfvision.models.builder.HEADS", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "24277283464", "text": "import asyncio,aiohttp,ssl,random,datetime,time,re,json,collections,functools\r\nfrom faker import Faker\r\nimport urllib.parse as parse\r\n\r\nfaker = Faker('zh-cn')\r\n\r\nssl._create_default_https_context = ssl._create_unverified_context\r\nssl_context = ssl.create_default_context()\r\nssl_context.check_hostname = False\r\nssl_context.verify_mode = ssl.CERT_NONE\r\n\r\ndef get_ts():\r\n return datetime.datetime.now().isoformat(' ')\r\n\r\ndef get_ua():\r\n return faker.user_agent()\r\n\r\ndef get_id():\r\n return str(random.randint(1,8))\r\n\r\ndef get_phone():\r\n return str(faker.phone_number())\r\n\r\n@functools.lru_cache()\r\ndef load_citys(fn='citys.txt'):\r\n with open(fn,'r',encoding='utf-8') as f:\r\n return f.read().split()\r\n\r\ndef get_area():\r\n return random.choice(load_citys())\r\n\r\ndef get_qq():\r\n if random.random() < 0.8:\r\n a = random.randrange(10000,1000000000)\r\n else:\r\n a = random.randrange(1000000000,5000000000)\r\n return str(a)\r\n\r\ndef get_mobile_code():\r\n return ''.join(map(str,(random.randrange(10) for _ in range(6))))\r\n\r\ndef rand_bool():\r\n return random.random() < 0.5\r\n\r\ndef get_password():\r\n while True:\r\n k = dict(\r\n length = random.randint(8,16),\r\n special_chars = rand_bool(),\r\n digits = rand_bool(),\r\n upper_case = rand_bool(),\r\n lower_case = rand_bool(),\r\n )\r\n try:\r\n return faker.password(**k)\r\n except Exception:\r\n pass\r\n\r\ndef get_headers():\r\n return {\r\n 'User-Agent': faker.user_agent(),\r\n }\r\n\r\ndef auto_json(res):\r\n try:\r\n return json.loads(res)\r\n except Exception:\r\n return res\r\n\r\ndef detect_csrf(html):\r\n pt = r'\\'\r\n p = re.search(pt,html)\r\n if p:\r\n return p.group(1)\r\n\r\nclass worker:\r\n time_limit = random.uniform(5,6) * 3600\r\n def __init__(self,rand_mode=True):\r\n self.logd = {}\r\n self.tasks = []\r\n self.rand_mode = rand_mode\r\n \r\n def set_alive(self):\r\n self.alive = time.time()\r\n \r\n def is_alive(self):\r\n return time.time() - self.alive < 60\r\n \r\n def add_task(self,co):\r\n self.tasks.append(asyncio.Task(co))\r\n \r\n async def task_exit(self):\r\n await asyncio.sleep(self.time_limit)\r\n print('time limit! exiting...')\r\n exit()\r\n\r\n async def post(self,*a,csrf=None,headers=None,**k):\r\n session = self.session\r\n if csrf:\r\n headers = headers or {}\r\n headers = headers.copy()\r\n headers['X-CSRF-Token'] = csrf\r\n async with session.post(*a,headers=headers,**k,ssl=ssl_context) as response:\r\n res = await response.text()\r\n return auto_json(res)\r\n\r\n async def get(self,*a,**k):\r\n session = self.session\r\n async with session.get(*a,**k,ssl=ssl_context) as response:\r\n res = await response.text()\r\n return auto_json(res)\r\n \r\n async def query_qq_api(self,t):\r\n entry = 'https://apis.map.qq.com/'\r\n key = 'PTMBZ-GCQLW-SC2RG-R2FNI-HWPNQ-4PBQM'\r\n city = parse.quote(faker.city())\r\n kw = parse.quote(faker.word())\r\n x = random.uniform(0,90)\r\n y = random.uniform(0,180)\r\n x2 = random.uniform(0,90)\r\n y2 = random.uniform(0,180)\r\n if t == 0: # https://apis.map.qq.com/ws/location/v1/ip?key=PTMBZ-GCQLW-SC2RG-R2FNI-HWPNQ-4PBQM\r\n url = f'{entry}ws/location/v1/ip?key={key}'\r\n elif t == 1:\r\n url = f'{entry}ws/geocoder/v1/?address={city}&key={key}'\r\n elif t == 2:\r\n url = f'{entry}ws/place/v1/search?boundary=nearby({x},{y},1000)&keyword={kw}&page_size=10&page_index=1&key={key}'\r\n elif t == 3:\r\n url = f'{entry}ws/place/v1/suggestion/?region={city}&keyword={kw}&key={key}'\r\n elif t == 4:\r\n url = f'{entry}ws/geocoder/v1/?location={x},{y}&key={key}&get_poi=1'\r\n elif t == 5:\r\n url = f'{entry}ws/direction/v1/bicycling/?from={x},{y}&to={x2},{y2}&key={key}'\r\n elif t == 6:\r\n url = f'{entry}ws/direction/v1/ebicycling/?from={x},{y}&to={x2},{y2}&key={key}'\r\n elif t == 7:\r\n url = f'{entry}ws/direction/v1/transit/?from={x},{y}&to={x2},{y2}&key={key}'\r\n elif t == 8:\r\n url = f'{entry}ws/direction/v1/driving/?from={x},{y}&to={x2},{y2}&key={key}'\r\n elif t == 9:\r\n url = f'{entry}ws/direction/v1/walking/?from={x},{y}&to={x2},{y2}&key={key}'\r\n headers = get_headers()\r\n r = await self.get(url,headers=headers)\r\n try:\r\n m = r['message']\r\n except Exception:\r\n m = str(r)\r\n if '每日调用量已达到上限' in m:\r\n return 'limit_day'\r\n if '已达到上限' in m:\r\n return 'limit'\r\n return 'ok'\r\n \r\n async def task_qq_api(self,t):\r\n sd = self.qq_states[t]\r\n while True:\r\n await asyncio.sleep(0)\r\n try:\r\n m = await self.query_qq_api(t)\r\n except Exception as e:\r\n print('qq_api',t,repr(e))\r\n m = 'err'\r\n sd[m] += 1\r\n if sd['limit_day'] > 10:return\r\n if m == 'ok':\r\n self.set_alive()\r\n \r\n async def start_qq_api(self,tn=10,n_con=5):\r\n self.qq_states = [collections.defaultdict(int) for _ in range(tn)]\r\n for _ in range(n_con):\r\n for t in range(tn):\r\n self.add_task(self.task_qq_api(t))\r\n await asyncio.sleep(0.1)\r\n \r\n async def query_toupiao(self,host):\r\n headers = get_headers()\r\n html = await self.get(host,headers=headers)\r\n t = re.escape(host)\r\n # \r\n pt = rf'a href\\=\\\"({t}[a-z/]+/login\\?id=)\\d+\\\"\\s+onclick=\\\"window\\.alert\\([^)\">]+?\\)\\\"'\r\n nexts = re.findall(pt,html)\r\n if not nexts:\r\n print(host)\r\n print(html)\r\n return 'err'\r\n self.set_alive()\r\n csrf = detect_csrf(html)\r\n\r\n cid = get_id()\r\n headers['Referer'] = host\r\n url = random.choice(nexts) + str(cid)\r\n html = await self.get(url,headers=headers)\r\n pt = rf\"url\\:\\s*\\'({t}[a-z/]+/login)\\'\"\r\n nexts = re.findall(pt,html)\r\n if not nexts:\r\n print(host)\r\n print(html)\r\n return 'err'\r\n self.set_alive()\r\n csrf = detect_csrf(html) or csrf\r\n\r\n headers['Referer'] = url\r\n if self.rand_mode:\r\n url = random.choice([\r\n f\"{host}mobile/login\",\r\n f\"{host}qq/login\",\r\n ])\r\n else:\r\n url = nexts[0]\r\n if csrf:\r\n print('toupiao csrf',csrf)\r\n if 'mobile' in url:\r\n name = get_phone()\r\n data = {\r\n 'id':cid,\r\n 'username':name,\r\n 'area':get_area(),\r\n }\r\n r = await self.post(url,json=data,headers=headers,csrf=csrf)\r\n if r['code'] != 200:\r\n print('toupiao','mobile',r)\r\n return 'err'\r\n print('toupiao','mobile',data)\r\n self.set_alive()\r\n\r\n url = f\"{host}mobile/code?name={name}\"\r\n html = await self.get(url,headers=headers)\r\n # 错误的手机号跳转到首页\r\n # 未提交的手机号等待提交\r\n # 已提交的手机号跳转到 codeverify\r\n self.set_alive()\r\n csrf = detect_csrf(html) or csrf\r\n\r\n headers['Referer'] = url\r\n url = f'{host}mobile/submitcode'\r\n data = {\r\n 'name':name,\r\n 'code':get_mobile_code(),\r\n }\r\n r = await self.post(url,json=data,headers=headers,csrf=csrf)\r\n if r['code'] != 200:\r\n print('toupiao','mobile',r)\r\n return 'err'\r\n print('toupiao','mobile submitcode',data)\r\n self.set_alive()\r\n\r\n # 提交后跳转\r\n url = f'{host}mobile/codeverify?name={name}'\r\n html = await self.get(url,headers=headers)\r\n self.set_alive()\r\n csrf = detect_csrf(html) or csrf\r\n\r\n headers['Referer'] = url\r\n url = f'{host}mobile/codeverify'\r\n data = {\r\n 'name':name,\r\n }\r\n r = await self.post(url,json=data,headers=headers,csrf=csrf)\r\n print('toupiao','mobile codeverify',r)\r\n elif 'qq' in url:\r\n name = get_qq()\r\n data = {\r\n 'id':cid,\r\n 'username':name,\r\n 'password':get_password(),\r\n 'area':get_area(),\r\n }\r\n r = await self.post(url,json=data,headers=headers,csrf=csrf)\r\n if r['code'] != 200:\r\n print('toupiao','qq',r)\r\n return 'err'\r\n print('toupiao','qq',data)\r\n self.set_alive()\r\n\r\n # 提交后跳转\r\n url = f\"{host}qq/verify?name={name}\"\r\n html = await self.get(url,headers=headers)\r\n self.set_alive()\r\n csrf = detect_csrf(html) or csrf\r\n\r\n headers['Referer'] = url\r\n url = f'{host}qq/verify'\r\n data = {\r\n 'name':name,\r\n }\r\n r = await self.post(url,json=data,headers=headers,csrf=csrf)\r\n print('toupiao','qq codeverify',r)\r\n else:\r\n print('toupiao','???',url)\r\n return 'ok'\r\n \r\n async def task_toupiao(self,suf,dt):\r\n await asyncio.sleep(dt)\r\n sd = self.tp_state[suf]\r\n host = f'https://guanfangtoupiaol.{suf}/' # 103.234.54.102\r\n while True:\r\n t = random.gauss(0,0.1)\r\n t = max(t, 0)\r\n await asyncio.sleep(t)\r\n try:\r\n m = await self.query_toupiao(host)\r\n except Exception as e:\r\n print('toupiao',repr(e))\r\n m = 'err'\r\n sd[m] += 1\r\n if m == 'ok':\r\n self.set_alive()\r\n\r\n async def start_toupiao(self,n_con=15,t_window=50,t_base=0.01):\r\n sufs = ['top','cloud','monster','site','cyou','buzz']\r\n k_all = t_window / t_base\r\n n_sample = n_con * len(sufs)\r\n k0 = k_all ** (1.0 / n_sample)\r\n random.shuffle(sufs)\r\n self.tp_state = {suf:collections.defaultdict(int) for suf in sufs}\r\n t0 = t_base\r\n for _ in range(n_con):\r\n for suf in sufs:\r\n t0 *= k0\r\n dt = t0 * random.uniform(0,2)\r\n self.add_task(self.task_toupiao(suf,dt))\r\n\r\n def checkpoint(self):\r\n self.tasks = [t for t in self.tasks if not t.done()]\r\n n = len(self.tasks)\r\n dt = time.time()\r\n t_run = dt - self.t_begin\r\n t_idle = dt - self.alive\r\n print(f'任务数量 {n}, 运行时间 {t_run / 60} min, 空闲时间 {t_idle} s')\r\n for i,c in enumerate(self.qq_states):\r\n print('qq api',i,dict(c))\r\n for suf,c in self.tp_state.items():\r\n print('toupiao',suf,dict(c))\r\n \r\n async def run(self):\r\n self.set_alive()\r\n self.add_task(self.task_exit())\r\n async with aiohttp.ClientSession() as session:\r\n self.session = session\r\n self.t_begin = time.time()\r\n self.add_task(self.start_toupiao())\r\n self.add_task(self.start_qq_api())\r\n while True:\r\n await asyncio.sleep(5)\r\n self.checkpoint()\r\n if not self.tasks:break\r\n if not self.is_alive():break\r\n print('exiting...')\r\n exit()\r\n\r\nasync def main():\r\n w = worker()\r\n await w.run()\r\n\r\nif __name__ == '__main__':\r\n asyncio.run(main())\r\n", "repo_name": "dannis999/zyws", "sub_path": "g2.py", "file_name": "g2.py", "file_ext": "py", "file_size_in_byte": 11988, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "faker.Faker", "line_number": 5, "usage_type": "call"}, {"api_name": "ssl._create_default_https_context", "line_number": 7, "usage_type": "attribute"}, {"api_name": "ssl._create_unverified_context", "line_number": 7, "usage_type": "attribute"}, {"api_name": "ssl.create_default_context", "line_number": 8, "usage_type": "call"}, {"api_name": "ssl.CERT_NONE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 13, "usage_type": "attribute"}, {"api_name": "faker.user_agent", "line_number": 16, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "faker.phone_number", "line_number": 22, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 24, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 30, "usage_type": "call"}, {"api_name": "random.random", "line_number": 33, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 34, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 36, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 40, "usage_type": "call"}, {"api_name": "random.random", "line_number": 43, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 48, "usage_type": "call"}, {"api_name": "faker.password", "line_number": 55, "usage_type": "call"}, {"api_name": "faker.user_agent", "line_number": 61, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 66, "usage_type": "call"}, {"api_name": "re.search", "line_number": 72, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "asyncio.Task", "line_number": 90, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 93, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 116, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 116, "usage_type": "name"}, {"api_name": "faker.city", "line_number": 116, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 117, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 117, "usage_type": "name"}, {"api_name": "faker.word", "line_number": 117, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 118, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 119, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 120, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 121, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 157, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 169, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 173, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 178, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 182, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 192, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 195, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 294, "usage_type": "call"}, {"api_name": "random.gauss", "line_number": 298, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 300, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 315, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 316, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 321, "usage_type": "call"}, {"api_name": "time.time", "line_number": 327, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 339, "usage_type": "call"}, {"api_name": "time.time", "line_number": 341, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 345, "usage_type": "call"}, {"api_name": "asyncio.run", "line_number": 357, "usage_type": "call"}]} +{"seq_id": "7526419447", "text": "'''\nGiven a sorted (increasing order) array with unique integer elements,\nwrite an algorithm to create a binary search tree with min height.\n\nbrute force:\n\nmin height of such tree is log(N)+1\nas the array is sorted, then root element is the middle one\nits left child is the middle of (0, root), its right child is (root, N)\nand so on.\n\nO(N)=log(N) time and memory\n'''\n\nimport math\nfrom collections import deque\n\nclass Node:\n def __init__(self, value: int):\n self.value = value\n self.left = None\n self.right = None\n\n\ndef print_tree(root: Node, n: int) -> None:\n queue = deque()\n queue.append((0, root))\n prev_level = 0\n tree_width = int(math.pow(2, math.ceil(math.log2(n))-1)/2)\n while len(queue):\n level, node = queue.popleft()\n if not node:\n continue\n if level != prev_level:\n tree_width = int(tree_width/2)\n print()\n print(' '*tree_width, end='')\n print(str(node.value), end='')\n queue.append((level+1, node.left))\n queue.append((level+1, node.right))\n prev_level = level\n\n\n'''\ncreate_bst([0, 1, 2, 3], root)\nroot.value = 2\n create_bst([0, 1], root.left)\n root.left.value = 1\n create_bst([0], root.left.left)\n root.l.l.value = 0\n create_bst([3], root.right)\n root.right.value = 3\n'''\ndef create_bst(array: list, parent: Node) -> None:\n mid = len(array) // 2\n parent.value = array[mid]\n if array[:mid]:\n parent.left = Node(0)\n create_bst(array[:mid], parent.left)\n if mid+1 < len(array):\n parent.right = Node(0)\n create_bst(array[mid+1:], parent.right)\n\n\ndef create_bs_tree(array: list) -> Node:\n root = Node(0)\n create_bst(array, root)\n return root\n\n\nif __name__ == '__main__':\n array = list(range(3))\n tree = create_bs_tree(array)\n print_tree(tree, len(array))\n", "repo_name": "TheTweak/coding-int-prep", "sub_path": "trees_and_graphs/minimal_tree.py", "file_name": "minimal_tree.py", "file_ext": "py", "file_size_in_byte": 1876, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 26, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 29, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 29, "usage_type": "call"}, {"api_name": "math.log2", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "25694044790", "text": "# -*- coding: utf-8 -*-\n\nimport time\n\nfrom django.core import mail\nfrom django.core.urlresolvers import reverse\n\nfrom auf.django.loginguard.models import LoginEvent\nfrom auf.django.loginguard import conf\n\nfrom .common import CommonTest\n\n\nclass SettingTest(CommonTest):\n\n def _try_patched_login_ko(self, url_name):\n data = {'username': self.username,\n 'password': 'xxx', }\n url = reverse(url_name)\n return self.client.post(url, data)\n\n def test_flag_policy(self):\n \"\"\"\n no timecheck if flag is off, no log\n \"\"\"\n self.assertEqual(conf.LOGIN_GUARD_RETRY_POLICY_ON, False)\n response = self._try_patched_login_ko('retry_policy_off_login')\n self.assertEqual(response.status_code, 200)\n events = LoginEvent.objects.all()\n self.assertEqual(len(events), 0)\n response = self._try_patched_login_ko('retry_policy_off_login')\n self.assertEqual(response.status_code, 200)\n events = LoginEvent.objects.all()\n self.assertEqual(len(events), 0)\n\n def test_alert(self):\n \"\"\"\n no timecheck if flag is off, no log\n \"\"\"\n self.assertEqual(conf.LOGIN_GUARD_FREQUENCY_ALERT_ON, False)\n self._try_patched_login_ko('alert_off_login')\n time.sleep(3)\n self._try_patched_login_ko('alert_off_login')\n mails = [m.body for m in mail.outbox]\n self.assertEqual(len(mails), 0)\n", "repo_name": "olarcheveque/auf.django.loginguard", "sub_path": "auf/django/loginguard/tests/configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "common.CommonTest", "line_number": 14, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 19, "usage_type": "call"}, {"api_name": "auf.django.loginguard.conf.LOGIN_GUARD_RETRY_POLICY_ON", "line_number": 26, "usage_type": "attribute"}, {"api_name": "auf.django.loginguard.conf", "line_number": 26, "usage_type": "name"}, {"api_name": "auf.django.loginguard.models.LoginEvent.objects.all", "line_number": 29, "usage_type": "call"}, {"api_name": "auf.django.loginguard.models.LoginEvent.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "auf.django.loginguard.models.LoginEvent", "line_number": 29, "usage_type": "name"}, {"api_name": "auf.django.loginguard.models.LoginEvent.objects.all", "line_number": 33, "usage_type": "call"}, {"api_name": "auf.django.loginguard.models.LoginEvent.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "auf.django.loginguard.models.LoginEvent", "line_number": 33, "usage_type": "name"}, {"api_name": "auf.django.loginguard.conf.LOGIN_GUARD_FREQUENCY_ALERT_ON", "line_number": 40, "usage_type": "attribute"}, {"api_name": "auf.django.loginguard.conf", "line_number": 40, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "django.core.mail.outbox", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.core.mail", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "20000869427", "text": "import dps_client\nfrom datetime import datetime\n\nURL = 'http://bergerab.com/dps/db/api/v1/'\nclient = dps_client.connect(URL, 'Adam Test')\n\nbatch = client.make_batch(datetime.now())\nbatch.add('Signal A', 1.2)\nbatch.add('Signal B', 1.3)\n\nprint(client.send())\n", "repo_name": "bergerab/dps", "sub_path": "dps_client/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 257, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dps_client.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "72526115686", "text": "import itertools as it\n\n\ndef LookSay():\n s = '1'\n while True:\n for item in s:\n yield int(item)\n s = \"\".join([str(len(list(l))) + str(k) for k, l in it.groupby(s)])\n # print(s)\n\n\n# for i, l in enumerate(LookSay()):\n# print(f\"{i}: {l}\")\n# if i > 10:\n# break\n", "repo_name": "llilyshkall/MSU", "sub_path": "python/day07/day07_task03.py", "file_name": "day07_task03.py", "file_ext": "py", "file_size_in_byte": 313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.groupby", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "6286832477", "text": "from skimage import io\nfrom AgentModel import DQNAgent, DroneEnv\nimport numpy as np\n\n\nif __name__ == '__main__':\n save_path = './img_save/'\n npy_path = './npy_save/'\n agent = DQNAgent(test_mode=True)\n # DroneEnv.get_dist()\n # need an api for distance info\n # api:\n # return: float\n env = DroneEnv()\n trial_len = 100\n current_state = env.reset()\n distance_list = []\n for step in range(trial_len):\n action = agent.act(current_state)\n io.imsave(save_path + '{}act{}.jpg'.format(step, action), current_state)\n distance_list.append(env.get_dist())\n new_state, reward, done = env.step(action)\n current_state = new_state\n if done:\n print('Finished in {} steps.'.format(step))\n break\n distance_list = np.array(distance_list)\n np.save(npy_path + '.npy', distance_list)\n", "repo_name": "n0lean/AirSim_DroneLanding", "sub_path": "ModelTest.py", "file_name": "ModelTest.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "AgentModel.DQNAgent", "line_number": 9, "usage_type": "call"}, {"api_name": "AgentModel.DroneEnv", "line_number": 14, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 20, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "34069001844", "text": "from openpyxl import load_workbook\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn import metrics\nimport os\n\n# Inputs\nfolder_name = 'inputs_october_v2'\nweek_names = [1, 2, 3]\n\nO_S_name_mapping = {\n 1: [f'B{i}' for i in range(2, 236)],\n 2: [f'C{i}' for i in range(2, 236)],\n 3: [f'D{i}' for i in range(2, 236)]\n}\nS_O_name_mapping = {\n 1: [f'C{i}' for i in range(2, 236)],\n 2: [f'D{i}' for i in range(2, 236)],\n 3: [f'E{i}' for i in range(2, 236)]\n}\nmodel_cell_names = [f'A{i}' for i in range(2, 236)]\ns_s_cell_names = [f'B{i}' for i in range(2, 236)]\nmodel_names = ['mlp_dp_sgd_64', 'lstm_dp_sgd', 'mlp_dp_loss_64', 'lstm_dp_loss']\n# model_names = ['mlp_dp_sgd', 'lstm_dp_sgd', 'mlp_dp_loss', 'lstm_dp_loss']\nsheet_names = ['GLM_synthetic']\n\noriginal_sheet_name = 'GLM_original'\noriginal_result_cell = 'J2'\noriginal_filename = 'Sept_train_on_original_test_on_synthetic.xlsx'\n\neps_names = ['2_3_4_1']\n\nfor week_name in week_names:\n model_result_dict = {model_name: {} for model_name in model_names}\n O_S_cell_names = O_S_name_mapping[week_name]\n S_O_cell_names = S_O_name_mapping[week_name]\n\n for eps_name in eps_names:\n \"\"\"Get O->S Results\"\"\"\n filename = 'Sept_train_on_original_test_on_synthetic.xlsx'\n wb = load_workbook(folder_name + '/' + filename)\n working_name = filename.strip('.xlsx')\n\n mse_dict = {}\n for sheet_name in sheet_names:\n ws = wb[sheet_name]\n for model_cell, result_cell in zip(model_cell_names, O_S_cell_names):\n model_name = ws[model_cell].value\n # Only process epsilon breakdowns we are considering\n if model_name.endswith(eps_name):\n result = ws[result_cell].value\n mse_dict[model_name + '_' + sheet_name] = result\n\n model_result_dict['lstm_dp_loss']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_loss')]\n model_result_dict['lstm_dp_sgd']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_sgd')]\n # model_result_dict['mlp_dp_loss']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss__')]\n model_result_dict['mlp_dp_loss_64']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss_64')]\n # model_result_dict['mlp_dp_sgd']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd__')]\n model_result_dict['mlp_dp_sgd_64']['O->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd_64')]\n\n \"\"\"Get S->O Results\"\"\"\n filename = 'Sept_train_on_synthetic_test_on_original.xlsx'\n wb = load_workbook(folder_name + '/' + filename)\n working_name = filename.strip('.xlsx')\n\n mse_dict = {}\n for sheet_name in sheet_names:\n ws = wb[sheet_name]\n for model_cell, result_cell in zip(model_cell_names, S_O_cell_names):\n model_name = ws[model_cell].value\n # Only process epsilon breakdowns we are considering\n if model_name.endswith(eps_name):\n result = ws[result_cell].value\n mse_dict[model_name + '_' + sheet_name] = result\n\n model_result_dict['lstm_dp_loss']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_loss')]\n model_result_dict['lstm_dp_sgd']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_sgd')]\n # model_result_dict['mlp_dp_loss']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss__')]\n model_result_dict['mlp_dp_loss_64']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss_64')]\n # model_result_dict['mlp_dp_sgd']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd__')]\n model_result_dict['mlp_dp_sgd_64']['S->O'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd_64')]\n\n \"\"\"Get S->S Results\"\"\"\n filename = 'Sept_train_on_synthetic_test_on_original.xlsx'\n wb = load_workbook(folder_name + '/' + filename)\n working_name = filename.strip('.xlsx')\n\n mse_dict = {}\n for sheet_name in sheet_names:\n ws = wb[sheet_name]\n for model_cell, result_cell in zip(model_cell_names, s_s_cell_names):\n model_name = ws[model_cell].value\n # Only process epsilon breakdowns we are considering\n if model_name.endswith(eps_name):\n result = ws[result_cell].value\n mse_dict[model_name + '_' + sheet_name] = result\n\n model_result_dict['lstm_dp_loss']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_loss')]\n model_result_dict['lstm_dp_sgd']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('lstm_dp_sgd')]\n # model_result_dict['mlp_dp_loss']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss__')]\n model_result_dict['mlp_dp_loss_64']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_loss_64')]\n # model_result_dict['mlp_dp_sgd']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd__')]\n model_result_dict['mlp_dp_sgd_64']['S->S'] = [float(val) for key, val in mse_dict.items() if key.startswith('mlp_dp_sgd_64')]\n\n \"\"\"Manage original data\"\"\"\n wb = load_workbook(folder_name + '/' + original_filename)\n ws = wb[original_sheet_name]\n MSE_test_original = ws[original_result_cell].value\n\n \"\"\"Combine results\"\"\"\n chart_dict = {}\n for network_type, mse_dict in model_result_dict.items():\n results_csv = pd.DataFrame(mse_dict)\n results_csv.to_csv(network_type + '_results.csv', index=False)\n chart_dict[network_type] = results_csv\n\n \"\"\"Plot\"\"\"\n width = 1 / (len(model_names)) # the width of the bars\n utility_comparison_4cases_figure1 = plt.figure(figsize=(16, 12))\n # plt.rc('font', size=16)\n plt.rc('legend', fontsize=18)\n plt.rc('axes', titlesize=18)\n plt.rc('axes', labelsize=18)\n plt.rc('xtick', labelsize=18)\n plt.rc('ytick', labelsize=18)\n ax = utility_comparison_4cases_figure1.add_subplot(111)\n error_bar_capsize = 6\n alpha_value = 0.7\n\n # Original Results\n ax.bar(width,\n MSE_test_original, width * 3,\n color='r', alpha=alpha_value)\n\n for setting_idx, syn_data_prefix in enumerate(model_names):\n sub_combined_utility_results = chart_dict[syn_data_prefix]\n print(syn_data_prefix)\n print(sub_combined_utility_results)\n\n ax.bar(setting_idx + 1 + width * 0,\n np.mean(sub_combined_utility_results['O->S']), width,\n yerr=np.std(sub_combined_utility_results['O->S']),\n color='g', alpha=alpha_value, capsize=error_bar_capsize)\n ax.bar(setting_idx + 1 + width * 1,\n np.mean(sub_combined_utility_results['S->S']), width,\n yerr=np.std(sub_combined_utility_results['S->S']),\n color='b', alpha=alpha_value, capsize=error_bar_capsize)\n ax.bar(setting_idx + 1 + width * 2,\n np.mean(sub_combined_utility_results['S->O']), width,\n yerr=np.std(sub_combined_utility_results['S->O']),\n color='y', alpha=alpha_value, capsize=error_bar_capsize)\n\n\n\n ax.set_ylabel('MSE')\n ax.set_xlabel('DP-GAN Scheme', labelpad=14)\n ax.set_xticks(np.arange(len(model_names) + 1) + width * 1)\n ax.set_xticklabels(['Original'] + [x.upper().strip('_64').replace('_', '-') for x in model_names])\n ax.set_ylim(0, 0.3)\n\n ax.legend(('Original model to original data (test MSE)',\n 'Original model to synthetic data (MSE: pred. labels vs syn. labels)',\n 'Synthetic model to synthetic data (test MSE)',\n 'Synthetic model to original data (MSE: pred. labels vs orig. labels)'),\n loc='upper left')\n # plt.title('MSE Error of Synthetic Data Generation for varying model structure', fontdict={'size': 20})\n plt.savefig(f'mse_test_chen_custom_{eps_name}_week{week_name}.png')\n plt.close()\n", "repo_name": "paper-code-anon/dp-gan", "sub_path": "SAS/create_box_plots_custom.py", "file_name": "create_box_plots_custom.py", "file_ext": "py", "file_size_in_byte": 8684, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 45, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 67, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 89, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 110, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}]} +{"seq_id": "8438084657", "text": "from mimetypes import init\nfrom re import T\nimport rclpy\nfrom rclpy.node import Node\nimport tf_transformations\nfrom cv_bridge import CvBridge, CvBridgeError\n\nfrom geometry_msgs.msg import Twist, Pose\nfrom sensor_msgs.msg import Imu\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import Range, Image\n\nfrom math import sqrt, sin, cos, atan2, pi\nfrom enum import Enum\nfrom rclpy.task import Future\nimport sys, os, cv2\n\n\nclass ThymioState(Enum):\n INIT = 0\n FOLLOWING_LINE = 1\n\nHOMEPATH = os.path.expanduser(\"~\")\nDATASET_PATH = HOMEPATH+'/dataset'\n\nclass ControllerNode(Node):\n def __init__(self):\n super().__init__('main_node')\n \n self.vel_publisher = self.create_publisher(Twist, 'cmd_vel', 10)\n self.odom_subscriber = self.create_subscription(Odometry, 'odom', self.odom_callback, 10)\n self.camera = self.create_subscription(Image, 'camera', self.img_callback, 10)\n\n self.current_state = ThymioState.INIT\n\n self.gl_sens = None\n self.gr_sens = None\n\n self.ground_l = self.create_subscription(Range, 'ground/left', self.ground_l_cb, 10)\n self.ground_r = self.create_subscription(Range, 'ground/right', self.ground_r_cb, 10)\n\n \n def start(self):\n # Create and immediately start a timer that will regularly publish commands\n self.timer = self.create_timer(1/60, self.update_callback)\n self.done_future = Future()\n \n return self.done_future\n \n def stop(self):\n # Set all velocities to zero\n cmd_vel = Twist()\n self.vel_publisher.publish(cmd_vel)\n \n def odom_callback(self, msg):\n self.odom_pose = msg.pose.pose\n self.odom_velocity = msg.twist.twist\n \n pose2d = self.pose3d_to_2d(self.odom_pose)\n\n def img_callback(self, msg):\n pass\n\n\n def ground_l_cb(self, msg):\n self.gl_sens = msg.range\n \n def ground_r_cb(self, msg):\n self.gr_sens = msg.range\n\n def pose3d_to_2d(self, pose3):\n quaternion = (\n pose3.orientation.x,\n pose3.orientation.y,\n pose3.orientation.z,\n pose3.orientation.w\n )\n\n roll, pitch, yaw = tf_transformations.euler_from_quaternion(quaternion)\n \n pose2 = (\n pose3.position.x, # x position\n pose3.position.y, # y position\n yaw # theta orientation\n )\n \n return pose2\n\n def init_state(self):\n cmd_vel = Twist()\n cmd_vel.linear.x = 2.0\n cmd_vel.angular.z = 0.0\n return cmd_vel\n\n\n def update_init_state(self):\n if self.current_state == ThymioState.INIT:\n if self.gr_sens == 1.0 or self.gr_sens == 1.0:\n self.get_logger().info(f\"Line detected!\")\n self.current_state = ThymioState.FOLLOWING_LINE\n self.get_logger().info(f\"Entered state {self.current_state}\")\n\n\n def follow_line(self):\n cmd_vel = Twist()\n if not self.gl_sens:\n cmd_vel.linear.x = 0.0\n cmd_vel.angular.z = -1.5\n elif not self.gr_sens:\n cmd_vel.linear.x = 0.0\n cmd_vel.angular.z = 1.5\n else:\n cmd_vel.linear.x = 2.0\n cmd_vel.angular.z = 0.0\n\n return cmd_vel\n\n\n def update_callback(self):\n if self.current_state == ThymioState.INIT:\n self.update_init_state()\n cmd_vel = self.init_state()\n\n if self.current_state == ThymioState.FOLLOWING_LINE:\n cmd_vel = self.follow_line()\n\n \n #self.get_logger().info(f\"Left: {self.gl_sens}, Right: {self.gr_sens}\")\n\n # Publish the command\n self.vel_publisher.publish(cmd_vel)\n\n \n\n\ndef main():\n # Initialize the ROS client library\n rclpy.init(args=sys.argv)\n \n # Create an instance of your node class\n node = ControllerNode()\n done = node.start()\n\n rclpy.spin_until_future_complete(node, done)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "marcolatella/Mighty-Thymio-Simulation", "sub_path": "project_ml/project_ml/main_node.py", "file_name": "main_node.py", "file_ext": "py", "file_size_in_byte": 4022, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rclpy.node.Node", "line_number": 26, "usage_type": "name"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 30, "usage_type": "argument"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 31, "usage_type": "argument"}, {"api_name": "sensor_msgs.msg.Image", "line_number": 32, "usage_type": "argument"}, {"api_name": "sensor_msgs.msg.Range", "line_number": 39, "usage_type": "argument"}, {"api_name": "sensor_msgs.msg.Range", "line_number": 40, "usage_type": "argument"}, {"api_name": "rclpy.task.Future", "line_number": 46, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 52, "usage_type": "call"}, {"api_name": "tf_transformations.euler_from_quaternion", "line_number": 79, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 90, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 105, "usage_type": "call"}, {"api_name": "rclpy.init", "line_number": 138, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 138, "usage_type": "attribute"}, {"api_name": "rclpy.spin_until_future_complete", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "9170127807", "text": "#!/usr/bin/python\nusage =\"\"\"\n\nMakes a BED file of the cut sites of a specified restriction enzyme.\n\nExample usage:\n \n # Get BED file of DpnI sites in dm3.fa\n python restriction-finder.py --fasta dm3.fa --enzyme DpnI --bed DpnI-sites.bed\n\n # can pipe to BedTools to get, e.g, sites in genes::\n python restriction-finder.py --fasta myfasta.fa --enzyme DpnI | intersectBed -a stdin -b genes.bed > DpnI-in-genes.bed\n\n\nCreated 13 Aug 2010 by Ryan Dale\"\"\"\ntry:\n from Bio import SeqIO\n from Bio import Restriction\nexcept ImportError:\n sys.stderr.write(\"\\nPlease install BioPython to use this script \\n\")\nimport optparse\nimport sys\nimport os\n\nop = optparse.OptionParser(usage=usage)\nop.add_option('--fasta', help='Required FASTA file containing sequences to search')\nop.add_option('--enzyme', help='Required enzyme name, case sensitive (e.g., DpnI or EcoRI)')\nop.add_option('--bed',help='BED file to create. If not specified, output will print to stdout.')\noptions,args = op.parse_args()\n\n# Input error checking...\ndef err(s):\n op.print_help()\n sys.stderr.write('\\n***ERROR: %s***\\n\\n'%s)\n sys.exit(1)\n# Hack to import just the enzyme you want from the Restriction module\nif options.enzyme is None:\n err('Please specify an enzyme with --enzyme')\nif options.fasta is None:\n err('Please specify a FASTA file with --fasta')\ntry:\n exec('from Bio.Restriction import %s as restr' % options.enzyme)\nexcept ImportError:\n err('No restriction enzyme \"%s\" found!' % options.enzyme)\n\nif not os.path.exists(options.fasta):\n err('FASTA file %s not found'%options.fasta)\n\nif options.bed is None:\n fout = sys.stdout\nelse:\n fout = open(options.bed,'w')\n\n\n# Let BioPython do the work...\nparser = SeqIO.parse(options.fasta,'fasta')\nfor chrom in parser:\n sys.stderr.write(chrom.name+'\\n')\n hits = restr.search(chrom.seq)\n for hit in hits:\n values = [chrom.name,\n str(hit),\n str(hit+1)]\n fout.write('\\t'.join(values)+'\\n')\n fout.flush()\nif options.bed is not None:\n fout.close()\n", "repo_name": "daler/rdbio-scripts", "sub_path": "sequenceFiles/restriction-finder.py", "file_name": "restriction-finder.py", "file_ext": "py", "file_size_in_byte": 2108, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "52", "api": [{"api_name": "optparse.OptionParser", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.stderr.write", "line_number": 34, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 34, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 50, "usage_type": "attribute"}, {"api_name": "Bio.SeqIO.parse", "line_number": 56, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 56, "usage_type": "name"}, {"api_name": "sys.stderr.write", "line_number": 58, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "13451695989", "text": "import gzip\nimport json\nimport os\nimport pickle\nimport shutil\nfrom collections import defaultdict, Counter\nfrom pprint import pprint\nimport math\nimport numpy as np\nimport scipy\nfrom scipy import stats\nimport argparse\nfrom pathlib import Path\n\nfrom matplotlib import cm\nfrom transforms3d.axangles import axangle2mat\nfrom util.transforms import hmg, dot\nimport torch\nimport cv2\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom dataset.preprocessing.sens_reader.SensorData import SensorData\nfrom util.distinct_colors import DistinctColors\nfrom util.metrics import ConfusionMatrix\nfrom util.misc import visualize_mask, create_box, get_boundary_mask\nfrom util.panoptic_quality import panoptic_quality, panoptic_quality_match, _panoptic_quality_compute\n\n\ndef get_keyframe_indices(filenames, window_size):\n \"\"\"\n select non-blurry images within a moving window\n \"\"\"\n scores = []\n for filename in tqdm(filenames, 'processing keyframes'):\n img = cv2.imread(str(filename))\n blur_score = compute_blur_score_opencv(img)\n scores.append(blur_score)\n\n keyframes = [i + np.argmin(scores[i:i + window_size]) for i in range(0, len(scores), window_size)]\n return keyframes, scores\n\n\ndef compute_blur_score_opencv(image):\n \"\"\"\n Estimate the amount of blur an image has with the variance of the Laplacian.\n Normalize by pixel number to offset the effect of image size on pixel gradients & variance\n https://github.com/deepfakes/faceswap/blob/ac40b0f52f5a745aa058f92339302065177dd28b/tools/sort/sort.py#L626\n \"\"\"\n if image.ndim == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur_map = cv2.Laplacian(image, cv2.CV_32F)\n score = np.var(blur_map) / np.sqrt(image.shape[0] * image.shape[1])\n return 1.0 - score\n\n\ndef subsample_scannet(src_folder, rate):\n \"\"\"\n sample every nth frame from scannet\n \"\"\"\n all_frames = sorted(list(x.stem for x in (src_folder / 'pose').iterdir()), key=lambda y: int(y) if y.isnumeric() else y)\n total_sampled = int(len(all_frames) * rate)\n sampled_frames = [all_frames[i * (len(all_frames) // total_sampled)] for i in range(total_sampled)]\n unsampled_frames = [x for x in all_frames if x not in sampled_frames]\n for frame in sampled_frames:\n if 'inf' in Path(src_folder / \"pose\" / f\"{frame}.txt\").read_text():\n unsampled_frames.append(frame)\n folders = [\"color\", \"depth\", \"instance\", \"pose\", \"semantics\"]\n exts = ['jpg', 'png', 'png', 'txt', 'png']\n for folder, ext in tqdm(zip(folders, exts), desc='sampling'):\n assert (src_folder / folder).exists(), src_folder\n for frame in unsampled_frames:\n if (src_folder / folder / f'{frame}.{ext}').exists():\n os.remove(str(src_folder / folder / f'{frame}.{ext}'))\n else:\n print(str(src_folder / folder / f'{frame}.{ext}'), \"already exists!\")\n\n\ndef subsample_scannet_blur_window(src_folder, min_frames):\n \"\"\"\n sample non blurry frames from scannet\n \"\"\"\n all_frames = sorted(list(x.stem for x in (src_folder / 'pose').iterdir()), key=lambda y: int(y) if y.isnumeric() else y)\n all_frame_paths = sorted(list(x for x in (src_folder / 'color').iterdir()), key=lambda y: int(y.stem) if y.stem.isnumeric() else y.stem)\n if len(all_frame_paths) <= min_frames:\n sampled_frames = all_frames\n else:\n window_size = max(2, int(math.ceil(len(all_frames) / min_frames)))\n frame_indices, _ = get_keyframe_indices(all_frame_paths, window_size)\n print(\"Using a window size of\", window_size, \"got\", len(frame_indices), \"frames\")\n sampled_frames = [all_frames[i] for i in frame_indices]\n unsampled_frames = [x for x in all_frames if x not in sampled_frames]\n for frame in sampled_frames:\n if 'inf' in Path(src_folder / \"pose\" / f\"{frame}.txt\").read_text():\n unsampled_frames.append(frame)\n folders = [\"color\", \"depth\", \"instance\", \"pose\", \"semantics\"]\n exts = ['jpg', 'png', 'png', 'txt', 'png']\n for folder, ext in tqdm(zip(folders, exts), desc='sampling'):\n assert (src_folder / folder).exists(), src_folder\n for frame in unsampled_frames:\n if (src_folder / folder / f'{frame}.{ext}').exists():\n os.remove(str(src_folder / folder / f'{frame}.{ext}'))\n else:\n print(str(src_folder / folder / f'{frame}.{ext}'), \"already exists!\")\n\n\n# manual fix for objects labeled incorrectly / ambigiously in scannet scenes\nscene_specific_fixes_objectid = {\n \"scene0050_02\": {\n 24: 37,\n 26: 37,\n 12: 6,\n 1: 6,\n 16: 9\n },\n \"scene0144_01\": {\n 4: 3,\n 13: 3,\n 5: 3\n },\n \"scene0221_01\": {\n 15: 7,\n 36: 15,\n 37: 1,\n 38: 1\n },\n \"scene0300_01\": {\n 13: 25,\n 14: 25,\n 20: 37\n },\n \"scene0389_00\": {\n 19: 37,\n 20: 3,\n 21: 3,\n 28: 37\n },\n \"scene0423_02\": {\n 6: 7\n },\n \"scene0616_00\": {\n 21: 1,\n 22: 1,\n 24: 1,\n 25: 1,\n 30: 1,\n 31: 1,\n },\n \"scene0645_02\": {\n 5: 7,\n 6: 3,\n 25: 5,\n 41: 5,\n 27: 37,\n 32: 37,\n 34: 37,\n 60: 0,\n 61: 37\n },\n \"scene0693_00\": {\n 1: 1,\n 4: 1,\n 6: 3,\n 11: 8,\n 20: 40,\n }\n}\nmmdet_export_fixes = {\n \"office_020737\": {\n \"rotation\": [math.radians(0.997707), -0.017, -0.065, -0.001],\n \"translation\": [0, 0, 1],\n \"scale\": 0.33\n },\n \"office_0213meeting\": {\n \"rotation\": None,\n \"translation\": [0, 0, 0.75],\n \"scale\": 0.33\n },\n \"koenig_0200\": {\n \"rotation\": [math.radians(0.9999), -0.034897, -0.013082, -0.000457],\n \"translation\": [0, 0, 1.1],\n \"scale\": 0.25\n }\n}\n\n\ndef extract_scan(path_sens_root, path_dest):\n sd = SensorData(str(path_sens_root / f'{path_sens_root.stem}.sens'))\n sd.export_depth_images(str(path_dest / 'depth'))\n sd.export_color_images(str(path_dest / 'color'))\n sd.export_poses(str(path_dest / 'pose'))\n sd.export_intrinsics(str(path_dest / 'intrinsic'))\n\n\ndef extract_labels(path_sens_root, path_dest):\n os.system(f'unzip {str(path_sens_root / f\"{path_sens_root.stem}_2d-label-filt.zip\")} -d {str(path_dest)}')\n os.system(f'unzip {str(path_sens_root / f\"{path_sens_root.stem}_2d-instance-filt.zip\")} -d {str(path_dest)}')\n if (path_dest / \"instance\").exists():\n shutil.rmtree(str(path_dest / \"instance\"))\n if (path_dest / \"semantics\").exists():\n shutil.rmtree(str(path_dest / \"semantics\"))\n os.rename(str(path_dest / \"instance-filt\"), str(path_dest / \"instance\"))\n os.rename(str(path_dest / \"label-filt\"), str(path_dest / \"semantics\"))\n\n\ndef visualize_mask_folder(path_to_folder, offset=0):\n (path_to_folder.parent / f\"visualized_{path_to_folder.stem}\").mkdir(exist_ok=True)\n for f in tqdm(list(path_to_folder.iterdir()), desc='visualizing masks'):\n visualize_mask(np.array(Image.open(f)) + offset, path_to_folder.parent / f\"visualized_{path_to_folder.stem}\" / f.name)\n\n\ndef visualize_confidence_notta(path_to_confidence_file):\n data = torch.load(gzip.open(path_to_confidence_file), map_location='cpu')\n semantics = np.array(Image.open(path_to_confidence_file.parents[1] / \"m2f_notta_semantics\" / f\"{path_to_confidence_file.stem}.png\"))\n probability, confidence, confidence_notta = data['probabilities'], data['confidences'], data['confidences_notta']\n confidence_notta[semantics == 0] = 0\n Image.fromarray((cm.get_cmap('gray')(confidence_notta) * 255).astype(np.uint8)).save(f\"{path_to_confidence_file.stem}.png\")\n\n\ndef visualize_confidence(path_to_confidence_file):\n data = torch.load(gzip.open(path_to_confidence_file), map_location='cpu')\n semantics = np.array(Image.open(path_to_confidence_file.parents[1] / \"m2f_semantics\" / f\"{path_to_confidence_file.stem}.png\"))\n probability, confidence, confidence_notta = data['probabilities'], data['confidences'], data['confidences_notta']\n confidence[semantics == 0] = 0\n Image.fromarray((cm.get_cmap('gray')(confidence) * 255).astype(np.uint8)).save(f\"{path_to_confidence_file.stem}_tta.png\")\n\n\ndef visualize_labels(src_folder):\n visualize_mask_folder(src_folder / \"instance\")\n visualize_mask_folder(src_folder / \"semantics\")\n\n\ndef get_scannet_to_nyu_map():\n scannetid_to_nyuid = {int(x.split('\\t')[0]): x.split('\\t')[4] for x in Path(\"resources/scannet-labels.combined.tsv\").read_text().splitlines()[1:]}\n scannetid_to_nyuid[0] = 0\n scannetid_to_nyuid_arr = np.ones(1280, dtype=np.int32) * 40\n\n for scid, nyuid in scannetid_to_nyuid.items():\n if nyuid == '':\n nyuid = 40\n else:\n nyuid = int(nyuid)\n scannetid_to_nyuid_arr[scid] = nyuid\n return scannetid_to_nyuid_arr\n\n\ndef scannet_to_nyu(semantics):\n scannetid_to_nyuid_arr = get_scannet_to_nyu_map()\n nyu_semantics = semantics.reshape(-1)\n nyu_semantics = scannetid_to_nyuid_arr[nyu_semantics.tolist()]\n return nyu_semantics.reshape(semantics.shape)\n\n\ndef get_reduce_and_fold_map():\n all_classes = []\n for cllist in [x.strip().split(',') for x in Path(\"resources/scannet_to_reduced_scannet.csv\").read_text().strip().splitlines()]:\n all_classes.append(cllist[0])\n reduce_map = np.zeros(41).astype(np.int)\n for idx, cllist in enumerate([x.strip().split(',') for x in Path(\"resources/scannet_to_reduced_scannet.csv\").read_text().strip().splitlines()]):\n if cllist[1] != '':\n reduce_map[idx + 1] = all_classes.index(cllist[1]) + 1\n else:\n reduce_map[idx + 1] = idx + 1\n fold_map = np.zeros(41).astype(np.int)\n for idx, cllist in enumerate([x.strip().split(',') for x in Path(\"resources/scannet_reduced_to_coco.csv\").read_text().strip().splitlines()]):\n fold_map[all_classes.index(cllist[0]) + 1] = idx + 1\n return reduce_map, fold_map\n\n\ndef fold_scannet_classes(src_folder):\n reduce_map, fold_map = get_reduce_and_fold_map()\n output_folder = src_folder / \"rs_semantics\"\n output_folder.mkdir(exist_ok=True)\n for f in tqdm((src_folder / \"semantics\").iterdir(), desc='folding semantics'):\n arr = scannet_to_nyu(np.array(Image.open(f)))\n ins_arr = np.array(Image.open(src_folder / \"instance\" / f.name))\n if src_folder.stem in scene_specific_fixes_objectid:\n for ob_id in scene_specific_fixes_objectid[src_folder.stem]:\n arr[ins_arr == ob_id] = scene_specific_fixes_objectid[src_folder.stem][ob_id]\n shape = arr.shape\n Image.fromarray(fold_map[reduce_map[arr.flatten()]].reshape(shape).astype(np.int8)).save(output_folder / f.name)\n\n\ndef get_thing_semantics(sc_classes='reduced'):\n thing_semantics = [False]\n for cllist in [x.strip().split(',') for x in Path(f\"resources/scannet_{sc_classes}_things.csv\").read_text().strip().splitlines()]:\n thing_semantics.append(bool(int(cllist[1])))\n return thing_semantics\n\n\ndef get_classnames(sc_classes='reduced'):\n classnames = [\"void\"]\n for cllist in [x.strip().split(',') for x in Path(f\"resources/scannet_{sc_classes}_things.csv\").read_text().strip().splitlines()]:\n classnames.append(cllist[0])\n return classnames\n\n\ndef renumber_instances(src_folder, prefix='rs'):\n all_frame_names = sorted([x.stem for x in (src_folder / f\"color\").iterdir() if x.name.endswith('.jpg')], key=lambda y: int(y))\n thing_semantics = get_thing_semantics()\n print('len thing_semantics', len(thing_semantics))\n semantics, instances = [], []\n for frame_name in tqdm(all_frame_names, desc='read labels'):\n semantics.append(torch.from_numpy(np.array(Image.open(src_folder / f\"{prefix}_semantics\" / f\"{frame_name}.png\"))))\n instances.append(torch.from_numpy(np.array(Image.open(src_folder / f\"instance\" / f\"{frame_name}.png\"))))\n semantics = torch.stack(semantics, 0)\n instances = torch.stack(instances, 0)\n\n instance_semantics_counts = defaultdict(Counter)\n unique_instances = torch.unique(instances)\n for instance in unique_instances:\n usem, uctr = torch.unique(semantics[instances == instance], return_counts=True)\n for usem_idx in range(usem.shape[0]):\n instance_semantics_counts[instance.item()][usem[usem_idx].item()] += uctr[usem_idx].item()\n instance_to_semantic = {}\n for instance in instance_semantics_counts:\n instance_to_semantic[instance] = instance_semantics_counts[instance].most_common(1)[0][0]\n\n instance_to_remapped_instance = {}\n remapped_instance_to_instance = {0: 0}\n new_instance_id = 1\n for instance in sorted(instance_to_semantic.keys()):\n if thing_semantics[instance_to_semantic[instance]]:\n instance_to_remapped_instance[instance] = new_instance_id\n remapped_instance_to_instance[new_instance_id] = instance\n new_instance_id += 1\n else:\n instance_to_remapped_instance[instance] = 0\n\n pprint(instance_to_remapped_instance)\n\n remapped_instances = torch.zeros_like(instances)\n for uinst in unique_instances:\n remapped_instances[instances == uinst.item()] = instance_to_remapped_instance[uinst.item()]\n\n if (src_folder / 'segmentation_data.pkl').exists():\n export_dict = pickle.load(open(src_folder / 'segmentation_data.pkl', 'rb'))\n else:\n export_dict = {}\n export_dict['num_semantic_classes'] = len(thing_semantics)\n export_dict['fg_classes'] = [i for i, is_thing in enumerate(thing_semantics) if is_thing]\n export_dict['bg_classes'] = [i for i, is_thing in enumerate(thing_semantics) if not is_thing]\n instance_to_semantic[0] = 0\n remapped_instance_to_semantic = {k: instance_to_semantic[remapped_instance_to_instance[k]] for k in range(new_instance_id)}\n export_dict[f'{prefix}_instance_to_semantic'] = remapped_instance_to_semantic\n\n Path(src_folder / f\"{prefix}_instance\").mkdir(exist_ok=True)\n\n # save instances\n for iidx in range(remapped_instances.shape[0]):\n Image.fromarray(remapped_instances[iidx].numpy()).save(src_folder / f\"{prefix}_instance\" / f\"{all_frame_names[iidx]}.png\")\n # save bboxes\n pickle.dump(export_dict, open(src_folder / 'segmentation_data.pkl', 'wb'))\n\n\ndef create_inconsistent_instance_map_dataset(src_folder, prefix='rs'):\n all_frame_names = sorted([x.stem for x in (src_folder / \"color\").iterdir() if x.name.endswith('.jpg')], key=lambda y: int(y))\n sample_indices = list(range(len(all_frame_names)))\n export_dict = pickle.load(open(src_folder / 'segmentation_data.pkl', 'rb'))\n semantics, instances = [], []\n for frame_name in tqdm(all_frame_names, desc='read labels'):\n semantics.append(torch.from_numpy(np.array(Image.open(src_folder / f\"{prefix}_semantics\" / f\"{frame_name}.png\"))))\n instances.append(torch.from_numpy(np.array(Image.open(src_folder / f\"{prefix}_instance\" / f\"{frame_name}.png\"))))\n semantics = torch.stack(semantics, 0)\n instances = torch.stack(instances, 0)\n\n instance_to_semantics = export_dict[f'{prefix}_instance_to_semantic']\n fg_classes = export_dict['fg_classes']\n\n print(instance_to_semantics)\n remapped_instances_inc = instances.clone().long()\n remapped_instances_sem = instances.clone()\n remapped_instance_to_semantics_inc = {}\n new_instance_ctr = 1\n\n for sidx in tqdm(sorted(list(set(instance_to_semantics.values())))):\n for iidx in range(instances.shape[0]):\n for inst_id in [x for x in sorted(torch.unique(instances[iidx]).tolist()) if x != 0]:\n if instance_to_semantics[inst_id] == sidx:\n remapped_instances_inc[iidx][instances[iidx] == inst_id] = new_instance_ctr\n remapped_instance_to_semantics_inc[new_instance_ctr] = sidx\n new_instance_ctr += 1\n\n print(remapped_instances_inc.max())\n\n for i in range(len(fg_classes)):\n remapped_instances_sem[semantics == fg_classes[i]] = i + 1\n\n Path(src_folder / f\"{prefix}_instance_inc\").mkdir(exist_ok=True)\n Path(src_folder / f\"{prefix}_instance_sem\").mkdir(exist_ok=True)\n\n for iidx in tqdm(range(remapped_instances_inc.shape[0])):\n sample_index = sample_indices[iidx]\n Image.fromarray(remapped_instances_inc[iidx].numpy().astype(np.uint16)).save(src_folder / f\"{prefix}_instance_inc\" / f\"{all_frame_names[sample_index]}.png\")\n Image.fromarray(remapped_instances_sem[iidx].numpy()).save(src_folder / f\"{prefix}_instance_sem\" / f\"{all_frame_names[sample_index]}.png\")\n\n export_dict[f'{prefix}_instance_to_semantic_inc'] = remapped_instance_to_semantics_inc\n pickle.dump(export_dict, open(src_folder / 'segmentation_data.pkl', 'wb'))\n\n\ndef convert_from_mask_to_semantics_and_instances(original_mask, segments, coco_to_scannet, is_thing, instance_ctr, instance_to_semantic):\n id_to_class = torch.zeros(1024).int()\n instance_mask = torch.zeros_like(original_mask)\n invalid_mask = original_mask == 0\n for s in segments:\n if s['category_name'] in coco_to_scannet:\n id_to_class[s['id']] = coco_to_scannet[s['category_name']]\n if is_thing[coco_to_scannet[s['category_name']]]:\n instance_mask[original_mask == s['id']] = instance_ctr\n instance_to_semantic[instance_ctr] = coco_to_scannet[s['category_name']]\n instance_ctr += 1\n return id_to_class[original_mask.flatten().numpy().tolist()].reshape(original_mask.shape), instance_mask, invalid_mask, instance_ctr, instance_to_semantic\n\n\ndef convert_from_mask_to_semantics_and_instances_no_remap(original_mask, segments, _coco_to_scannet, is_thing, instance_ctr, instance_to_semantic):\n id_to_class = torch.zeros(1024).int()\n instance_mask = torch.zeros_like(original_mask)\n invalid_mask = original_mask == 0\n for s in segments:\n id_to_class[s['id']] = s['category_id']\n if is_thing[s['category_id']]:\n instance_mask[original_mask == s['id']] = instance_ctr\n instance_to_semantic[instance_ctr] = s['category_id']\n instance_ctr += 1\n return id_to_class[original_mask.flatten().numpy().tolist()].reshape(original_mask.shape), instance_mask, invalid_mask, instance_ctr, instance_to_semantic\n\n\ndef map_panoptic_coco(src_folder, sc_classes='reduced', undistort=False):\n coco_to_scannet = {}\n thing_semantics = get_thing_semantics(sc_classes)\n for cidx, cllist in enumerate([x.strip().split(',') for x in Path(f\"resources/scannet_{sc_classes}_to_coco.csv\").read_text().strip().splitlines()]):\n for c in cllist[1:]:\n coco_to_scannet[c.split('/')[1]] = cidx + 1\n instance_ctr = 1\n instance_to_semantic = {}\n instance_ctr_notta = 1\n segment_ctr = 1\n instance_to_semantic_notta = {}\n (src_folder / \"m2f_instance\").mkdir(exist_ok=True)\n (src_folder / \"m2f_semantics\").mkdir(exist_ok=True)\n (src_folder / \"m2f_notta_instance\").mkdir(exist_ok=True)\n (src_folder / \"m2f_notta_semantics\").mkdir(exist_ok=True)\n (src_folder / \"m2f_feats\").mkdir(exist_ok=True)\n (src_folder / \"m2f_probabilities\").mkdir(exist_ok=True)\n (src_folder / \"m2f_invalid\").mkdir(exist_ok=True)\n (src_folder / \"m2f_segments\").mkdir(exist_ok=True)\n\n if undistort:\n transforms = json.loads((src_folder / \"transforms.json\").read_text())\n if \"camera_model\" in transforms and transforms[\"camera_model\"] == \"OPENCV_FISHEYE\":\n h, w, cx, cy, k1, k2, k3, k4 = int(transforms[\"h\"]), int(transforms[\"w\"]), transforms[\"cx\"], transforms[\"cy\"], transforms[\"k1\"], transforms[\"k2\"], transforms[\"k3\"], transforms[\"k4\"]\n distortion_params = np.array([k1, k2, k3, k4])\n mtx = np.array([[transforms[\"fl_x\"], 0, cx], [0, transforms[\"fl_y\"], cy], [0, 0, 1]])\n newcameramtx = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(mtx, distortion_params, [w, h], np.eye(3), balance=1)\n mapx, mapy = cv2.fisheye.initUndistortRectifyMap(mtx, distortion_params, np.eye(3), newcameramtx, [w, h], cv2.CV_16SC2)\n mask = np.ones([h, w]).astype(np.uint8) * 255\n distorted_mask = 255 - ((cv2.remap(mask, mapx, mapy, interpolation=cv2.INTER_CUBIC) > 0) * 255).astype(np.uint8)\n roi = [0, 0, w - 1, h - 1]\n else:\n undistort = False\n\n for idx, fpath in enumerate(tqdm(sorted(list((src_folder / \"color\").iterdir()), key=lambda x: x.stem), desc='map labels')):\n data = torch.load(gzip.open(src_folder / \"panoptic\" / f'{fpath.stem}.ptz'), map_location='cpu')\n probability, confidence, confidence_notta = data['probabilities'], data['confidences'], data['confidences_notta']\n\n if undistort:\n probability_numpy = probability.cpu().numpy()\n probability_undistorted = np.zeros_like(probability_numpy)\n for cidx in range(probability_numpy.shape[-1]):\n probability_undistorted[:, :, cidx] = cv2.remap(probability_numpy[:, :, cidx], mapx, mapy, cv2.INTER_CUBIC)\n confidence_undistorted = cv2.remap(confidence.cpu().numpy(), mapx, mapy, cv2.INTER_CUBIC)\n confidence_notta_undistorted = cv2.remap(confidence_notta.cpu().numpy(), mapx, mapy, cv2.INTER_CUBIC)\n probability, confidence, confidence_notta = torch.clip(torch.from_numpy(probability_undistorted), 0, 1), torch.clip(torch.from_numpy(confidence_undistorted), 0, 1), torch.clip(torch.from_numpy(confidence_notta_undistorted), 0, 1)\n\n semantic, instance, invalid_mask, instance_ctr, instance_to_semantic = convert_from_mask_to_semantics_and_instances_no_remap(data['mask'], data['segments'], coco_to_scannet, thing_semantics, instance_ctr, instance_to_semantic)\n semantic_notta, instance_notta, _, instance_ctr_notta, instance_to_semantic_notta = convert_from_mask_to_semantics_and_instances_no_remap(data['mask_notta'], data['segments_notta'], coco_to_scannet, thing_semantics,\n instance_ctr_notta, instance_to_semantic_notta)\n segment_mask = torch.zeros_like(data['mask'])\n for s in data['segments']:\n segment_mask[data['mask'] == s['id']] = segment_ctr\n segment_ctr += 1\n Image.fromarray(segment_mask.numpy().astype(np.uint16)).save(src_folder / \"m2f_segments\" / f\"{fpath.stem}.png\")\n Image.fromarray(semantic.numpy().astype(np.uint16)).save(src_folder / \"m2f_semantics\" / f\"{fpath.stem}.png\")\n Image.fromarray(instance.numpy()).save(src_folder / \"m2f_instance\" / f\"{fpath.stem}.png\")\n Image.fromarray(semantic_notta.numpy().astype(np.uint16)).save(src_folder / \"m2f_notta_semantics\" / f\"{fpath.stem}.png\")\n Image.fromarray(instance_notta.numpy()).save(src_folder / \"m2f_notta_instance\" / f\"{fpath.stem}.png\")\n Image.fromarray(invalid_mask.numpy().astype(np.uint8) * 255).save(src_folder / \"m2f_invalid\" / f\"{fpath.stem}.png\")\n # interpolated_p = torch.nn.functional.interpolate(torch.cat([probability.permute((2, 0, 1)), confidence.unsqueeze(0), confidence_notta.unsqueeze(0)], 0).unsqueeze(0), size=(256, 256), mode='bilinear', align_corners=False).squeeze(0)\n # probability, confidence, confidence_notta = interpolated_p[:-2, :, :].permute((1, 2, 0)).cpu(), interpolated_p[-2, :, :].cpu(), interpolated_p[-1, :, :].cpu()\n np.savez_compressed(src_folder / \"m2f_probabilities\" / f\"{fpath.stem}.npz\", probability=probability.float().numpy(), confidence=confidence.float().numpy(), confidence_notta=confidence_notta.float().numpy())\n \n if undistort:\n to_undistort = [\n src_folder / \"m2f_segments\" / f\"{fpath.stem}.png\",\n src_folder / \"m2f_semantics\" / f\"{fpath.stem}.png\",\n src_folder / \"m2f_instance\" / f\"{fpath.stem}.png\",\n src_folder / \"m2f_notta_semantics\" / f\"{fpath.stem}.png\",\n src_folder / \"m2f_notta_instance\" / f\"{fpath.stem}.png\",\n src_folder / \"m2f_invalid\" / f\"{fpath.stem}.png\"\n ]\n for img_p in to_undistort:\n img = np.array(Image.open(img_p))\n img = cv2.remap(img, mapx, mapy, cv2.INTER_NEAREST)\n img[distorted_mask > 0] = 0\n img = img[roi[1]: roi[3] + 1, roi[0]: roi[2] + 1]\n Image.fromarray(img).save(img_p)\n\n # feats = data['feats']\n # np.savez_compressed(src_folder / \"m2f_feats\" / f\"{fpath.stem}.npz\", feats=feats.float().numpy())\n\n export_dict = pickle.load(open(src_folder / 'segmentation_data.pkl', 'rb'))\n export_dict[f'm2f_instance_to_semantic'] = instance_to_semantic\n export_dict[f'm2f_notta_instance_to_semantic'] = instance_to_semantic\n pprint(instance_to_semantic)\n pickle.dump(export_dict, open(src_folder / 'segmentation_data.pkl', 'wb'))\n\n\ndef map_gt_bboxes(path_sens_root, src_folder):\n reduce_map, fold_map = get_reduce_and_fold_map()\n thing_semantics = get_thing_semantics()\n distinct_colors = DistinctColors()\n bboxes = {}\n valid_boxid = 0\n (src_folder / \"visualized_gtboxes\").mkdir(exist_ok=True)\n pkl_segmentation_data = pickle.load(open(src_folder / f'segmentation_data.pkl', 'rb'))\n bbox_annot = np.load(path_sens_root / f\"{path_sens_root.stem}_bbox.npy\")\n for bbox_idx in range(bbox_annot.shape[0]):\n position = bbox_annot[bbox_idx][0:3]\n orientation = np.eye(3)\n extent = bbox_annot[bbox_idx][3:6]\n instance_id = int(bbox_annot[bbox_idx][7]) + 1\n if src_folder.stem in scene_specific_fixes_objectid:\n if instance_id in scene_specific_fixes_objectid[src_folder.stem]:\n bbox_annot[bbox_idx][6] = scene_specific_fixes_objectid[src_folder.stem][instance_id]\n label = fold_map[reduce_map[int(bbox_annot[bbox_idx][6])]]\n if thing_semantics[label]:\n bboxes[valid_boxid] = {\n 'position': position,\n 'orientation': orientation,\n 'extent': extent,\n 'class': label\n }\n create_box(position, extent, orientation, distinct_colors.get_color_fast_numpy(label)).export(src_folder / \"visualized_gtboxes\" / f\"{label}_{valid_boxid}.obj\")\n valid_boxid += 1\n pkl_segmentation_data['gt_bboxes'] = bboxes\n pickle.dump(pkl_segmentation_data, open(src_folder / f'segmentation_data.pkl', 'wb'))\n\n\ndef map_imvoxnet_boxes(path_bboxes, src_folder, class_set=\"reduced\"):\n mmdet_to_scannet_reduced = {}\n for cidx, cllist in enumerate([x.strip().split(',') for x in Path(f\"resources/scannet_mmdet_to_scannet_{class_set}.csv\").read_text().strip().splitlines()]):\n mmdet_to_scannet_reduced[cllist[0]] = cllist[1]\n classes = [\"\"]\n for idx, cllist in enumerate([x.strip().split(',') for x in Path(f\"resources/scannet_{class_set}_to_coco.csv\").read_text().strip().splitlines()]):\n classes.append(cllist[0])\n thing_semantics = get_thing_semantics()\n distinct_colors = DistinctColors()\n bboxes = {}\n valid_boxid = 0\n (src_folder / \"visualized_mmdetboxes\").mkdir(exist_ok=True)\n pkl_segmentation_data = pickle.load(open(src_folder / f'segmentation_data.pkl', 'rb'))\n bbox_annot = json.loads(Path(path_bboxes).read_text())\n for bbox in bbox_annot:\n corners = np.array(bbox['corners'])\n if src_folder.stem in mmdet_export_fixes:\n rotation_fix = np.eye(4)\n axangle = mmdet_export_fixes[src_folder.stem][\"rotation\"]\n if axangle is not None:\n rotation_fix[:3, :3] = axangle2mat(axangle[1:4], axangle[0])\n translation_fix = hmg(np.eye(3))\n translation_fix[:3, 3] = np.array(mmdet_export_fixes[src_folder.stem][\"translation\"])\n scale_fix = hmg(np.eye(3) * mmdet_export_fixes[src_folder.stem][\"scale\"])\n corners = dot(np.linalg.inv(translation_fix @ scale_fix @ rotation_fix), corners)\n cmin = np.min(corners, axis=0)\n cmax = np.max(corners, axis=0)\n position = (cmax + cmin) / 2\n orientation = np.eye(3)\n label = classes.index(mmdet_to_scannet_reduced[bbox['label']].lower())\n extent = cmax - cmin\n if thing_semantics[label]:\n bboxes[valid_boxid] = {\n 'position': position,\n 'orientation': orientation,\n 'extent': extent,\n 'class': label\n }\n create_box(position, extent, orientation, distinct_colors.get_color_fast_numpy(label)).export(src_folder / \"visualized_mmdetboxes\" / f\"{label}_{valid_boxid}.obj\")\n valid_boxid += 1\n pkl_segmentation_data['mmdet_bboxes'] = bboxes\n pickle.dump(pkl_segmentation_data, open(src_folder / f'segmentation_data.pkl', 'wb'))\n\n\ndef read_and_resize_labels(path, size):\n image = Image.open(path)\n return np.array(image.resize(size, Image.NEAREST))\n\n\ndef calculate_iou_folders_image_wise(path_pred, path_target, image_size, pred_offset=0):\n num_semantic_classes = 1 + len(Path(\"resources/scannet_reduced_to_coco.csv\").read_text().strip().splitlines())\n iou_avg = 0\n val_set = json.loads(Path(path_target.parent / \"splits.json\").read_text())['test']\n val_paths = [y for y in sorted(list(path_pred.iterdir()), key=lambda x: int(x.stem)) if y.stem in val_set]\n faulty_gt_classes = [0]\n for p in tqdm(val_paths):\n img_pred = read_and_resize_labels(p, image_size) + pred_offset\n img_target = read_and_resize_labels(path_target / p.name, image_size)\n valid_mask = ~np.isin(img_target, faulty_gt_classes)\n train_cm = ConfusionMatrix(num_classes=num_semantic_classes, ignore_class=[])\n iou = train_cm.add_batch(img_pred[valid_mask], img_target[valid_mask], return_miou=True)\n iou_avg += iou\n iou_avg /= len(val_paths)\n return iou_avg\n\n\ndef calculate_iou_folders(path_pred, path_target, image_size, pred_offset=0):\n num_semantic_classes = 1 + len(Path(\"resources/scannet_reduced_to_coco.csv\").read_text().strip().splitlines())\n val_set = json.loads(Path(path_target.parent / \"splits.json\").read_text())['test']\n val_paths = [y for y in sorted(list(path_pred.iterdir()), key=lambda x: int(x.stem)) if y.stem in val_set]\n faulty_gt_classes = [0]\n train_cm = ConfusionMatrix(num_classes=num_semantic_classes, ignore_class=[])\n for p in tqdm(val_paths):\n img_pred = read_and_resize_labels(p, image_size) + pred_offset\n img_target = read_and_resize_labels(path_target / p.name, image_size)\n valid_mask = ~np.isin(img_target, faulty_gt_classes)\n train_cm.add_batch(img_pred[valid_mask], img_target[valid_mask], return_miou=False)\n return train_cm.get_miou()\n\n\ndef calculate_panoptic_quality_folders_image_wise(path_pred_sem, path_pred_inst, path_target_sem, path_target_inst, image_size):\n is_thing = get_thing_semantics()\n val_set = json.loads(Path(path_target_sem.parent / \"splits.json\").read_text())['test']\n faulty_gt = [0]\n things = set([i for i in range(len(is_thing)) if is_thing[i]])\n stuff = set([i for i in range(len(is_thing)) if not is_thing[i]])\n pq_avg, sq_avg, rq_avg = 0, 0, 0\n val_paths = [y for y in sorted(list(path_pred_sem.iterdir()), key=lambda x: int(x.stem)) if y.stem in val_set]\n for p in tqdm(val_paths):\n img_target_sem = read_and_resize_labels((path_target_sem / p.name), image_size)\n valid_mask = ~np.isin(img_target_sem, faulty_gt)\n img_pred_sem = torch.from_numpy(read_and_resize_labels(p, image_size)[valid_mask]).unsqueeze(-1)\n img_target_sem = torch.from_numpy(img_target_sem[valid_mask]).unsqueeze(-1)\n img_pred_inst = torch.from_numpy(read_and_resize_labels((path_pred_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n img_target_inst = torch.from_numpy(read_and_resize_labels((path_target_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n pred = torch.cat([img_pred_sem, img_pred_inst], dim=1).cuda()\n target = torch.cat([img_target_sem, img_target_inst], dim=1).cuda()\n pq, sq, rq = panoptic_quality(pred, target, things, stuff, allow_unknown_preds_category=True)\n pq_avg += pq.item()\n sq_avg += sq.item()\n rq_avg += rq.item()\n pq_avg /= len(val_paths)\n sq_avg /= len(val_paths)\n rq_avg /= len(val_paths)\n return pq_avg, sq_avg, rq_avg\n\n\ndef calculate_panoptic_quality_folders(path_pred_sem, path_pred_inst, path_target_sem, path_target_inst, image_size):\n is_thing = get_thing_semantics()\n val_set = json.loads(Path(path_target_sem.parent / \"splits.json\").read_text())['test']\n faulty_gt = [0]\n things = set([i for i in range(len(is_thing)) if is_thing[i]])\n stuff = set([i for i in range(len(is_thing)) if not is_thing[i]])\n val_paths = [y for y in sorted(list(path_pred_sem.iterdir()), key=lambda x: int(x.stem)) if y.stem in val_set]\n pred, target = [], []\n for p in tqdm(val_paths):\n img_target_sem = read_and_resize_labels((path_target_sem / p.name), image_size)\n valid_mask = ~np.isin(img_target_sem, faulty_gt)\n img_pred_sem = torch.from_numpy(read_and_resize_labels(p, image_size)[valid_mask]).unsqueeze(-1)\n img_target_sem = torch.from_numpy(img_target_sem[valid_mask]).unsqueeze(-1)\n img_pred_inst = torch.from_numpy(read_and_resize_labels((path_pred_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n img_target_inst = torch.from_numpy(read_and_resize_labels((path_target_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n pred_ = torch.cat([img_pred_sem, img_pred_inst], dim=1).reshape(-1, 2)\n target_ = torch.cat([img_target_sem, img_target_inst], dim=1).reshape(-1, 2)\n pred.append(pred_)\n target.append(target_)\n pq, sq, rq = panoptic_quality(torch.cat(pred, dim=0).cuda(), torch.cat(target, dim=0).cuda(), things, stuff, allow_unknown_preds_category=True)\n return pq.item(), sq.item(), rq.item()\n\n\ndef calculate_panoptic_quality_per_frame_folders(path_pred_sem, path_pred_inst, path_target_sem, path_target_inst, image_size):\n is_thing = get_thing_semantics()\n val_set = json.loads(Path(path_target_sem.parent / \"splits.json\").read_text())['test']\n faulty_gt = [0]\n things = set([i for i in range(len(is_thing)) if is_thing[i]])\n stuff = set([i for i in range(len(is_thing)) if not is_thing[i]])\n val_paths = [y for y in sorted(list(path_pred_sem.iterdir()), key=lambda x: int(x.stem)) if y.stem in val_set]\n things_, stuff_, iou_sum_, true_positives_, false_positives_, false_negatives_ = set(), set(), [], [], [], []\n for p in tqdm(val_paths):\n img_target_sem = read_and_resize_labels((path_target_sem / p.name), image_size)\n valid_mask = ~np.isin(img_target_sem, faulty_gt)\n img_pred_sem = torch.from_numpy(read_and_resize_labels(p, image_size)[valid_mask]).unsqueeze(-1)\n img_target_sem = torch.from_numpy(img_target_sem[valid_mask]).unsqueeze(-1)\n img_pred_inst = torch.from_numpy(read_and_resize_labels((path_pred_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n img_target_inst = torch.from_numpy(read_and_resize_labels((path_target_inst / p.name), image_size)[valid_mask]).unsqueeze(-1)\n pred_ = torch.cat([img_pred_sem, img_pred_inst], dim=1).reshape(-1, 2)\n target_ = torch.cat([img_target_sem, img_target_inst], dim=1).reshape(-1, 2)\n _things, _stuff, _iou_sum, _true_positives, _false_positives, _false_negatives = panoptic_quality_match(pred_, target_, things, stuff, True)\n things_.union(_things)\n stuff_.union(_stuff)\n iou_sum_.append(_iou_sum)\n true_positives_.append(_true_positives)\n false_positives_.append(_false_positives)\n false_negatives_.append(_false_negatives)\n results = _panoptic_quality_compute(things_, stuff_, torch.cat(iou_sum_, 0), torch.cat(true_positives_, 0), torch.cat(false_positives_, 0), torch.cat(false_negatives_, 0))\n return results[\"all\"][\"pq\"].item(), results[\"all\"][\"sq\"].item(), results[\"all\"][\"rq\"].item()\n\n\ndef create_validation_set(src_folder, fraction):\n all_frames = [x.stem for x in sorted(list((src_folder / \"color\").iterdir()), key=lambda x: int(x.stem))]\n selected_val = [all_frames[i] for i in range(0, len(all_frames), int(1 / fraction))]\n selected_train = [x for x in all_frames if x not in selected_val]\n print(len(selected_train), len(selected_val))\n Path(src_folder / \"splits.json\").write_text(json.dumps({\n 'train': selected_train,\n 'test': selected_val\n }))\n\n\ndef create_mask2former_split_data(src_folder):\n all_frame_names = sorted([x.stem for x in (src_folder / f\"color\").iterdir() if x.name.endswith('.jpg')], key=lambda y: int(y) if y.isnumeric() else y)\n thing_semantics = get_thing_semantics()\n print('len thing_semantics', len(thing_semantics))\n semantics = []\n for frame_name in tqdm(all_frame_names, desc='read labels'):\n semantics.append(torch.from_numpy(np.array(Image.open(src_folder / f\"m2f_semantics\" / f\"{frame_name}.png\"))))\n\n semantics = torch.stack(semantics, 0)\n export_dict = pickle.load(open(src_folder / 'segmentation_data.pkl', 'rb'))\n instance_to_semantics = export_dict[f'm2f_instance_to_semantic']\n fg_classes = export_dict['fg_classes']\n\n print(instance_to_semantics)\n remapped_instances_sem = torch.zeros_like(semantics)\n\n sem_instance_to_semantics = {0: 0}\n for i in range(len(fg_classes)):\n remapped_instances_sem[semantics == fg_classes[i]] = i + 1\n sem_instance_to_semantics[i + 1] = fg_classes[i]\n\n Path(src_folder / f\"m2f_instance_sem\").mkdir(exist_ok=True)\n\n for iidx in tqdm(range(semantics.shape[0])):\n Image.fromarray(remapped_instances_sem[iidx].numpy()).save(src_folder / f\"m2f_instance_sem\" / f\"{all_frame_names[iidx]}.png\")\n\n export_dict['m2f_sem_instance_to_semantics'] = sem_instance_to_semantics\n pickle.dump(export_dict, open(src_folder / f'segmentation_data.pkl', 'wb'))\n\n\ndef create_m2f_used_instances(src_folder):\n export_dict = pickle.load(open(src_folder / 'segmentation_data.pkl', 'rb'))\n instance_to_semantics = export_dict[f'm2f_sem_instance_to_semantics']\n print(instance_to_semantics)\n all_frame_names = sorted([x.stem for x in (src_folder / f\"color\").iterdir() if x.name.endswith('.jpg')], key=lambda y: int(y) if y.isnumeric() else y)\n frame_counts = {k: 0 for k in instance_to_semantics.keys()}\n dims = Image.open(src_folder / f\"m2f_instance_sem\" / f\"{all_frame_names[0]}.png\").size\n for frame_name in tqdm(all_frame_names, desc='read labels'):\n uinsts, ucounts = torch.from_numpy(np.array(Image.open(src_folder / f\"m2f_instance_sem\" / f\"{frame_name}.png\"))).unique(return_counts=True)\n for iidx in range(len(uinsts)):\n percinst = ucounts[iidx] / (dims[0] * dims[1])\n if percinst > 0.005:\n frame_counts[uinsts[iidx].item()] += 1\n is_valid_instance = {}\n for k in frame_counts:\n is_valid_instance[k] = True if frame_counts[k] > len(all_frame_names) * 0.01 else False\n print(is_valid_instance)\n export_dict['m2f_sem_valid_instance'] = is_valid_instance\n pickle.dump(export_dict, open(src_folder / f'segmentation_data.pkl', 'wb'))\n\n\ndef create_instances_for_dmnerf(src_folder, correspondences, class_set='reduced'):\n suffix_o = \"_no_correspondences\" if not correspondences else \"\"\n suffix_i = \"_correspondences\" if correspondences else \"\"\n color_folder = src_folder / \"color\"\n semantics_folder = src_folder / \"m2f_notta_semantics\"\n instance_folder = src_folder / f\"m2f_notta_instance{suffix_i}\"\n output_folder = src_folder / f\"m2f_notta_dmnerf{suffix_o}\"\n if output_folder.exists():\n shutil.rmtree(output_folder)\n output_folder.mkdir(exist_ok=True)\n is_thing_class = get_thing_semantics(sc_classes=class_set)\n stuff_classes = [i for i in range(len(is_thing_class)) if not is_thing_class[i]]\n instance_to_semantics = {}\n for f in tqdm(list(color_folder.iterdir()), desc='creating new mask'):\n semantics = np.array(Image.open(semantics_folder / f\"{f.stem}.png\"))\n instance = np.array(Image.open(instance_folder / f\"{f.stem}.png\"))\n classes = np.unique(semantics)\n new_instance = np.zeros_like(instance)\n for c in classes:\n if c in stuff_classes:\n assigned_index = stuff_classes.index(c)\n new_instance[semantics == c] = assigned_index\n instance_to_semantics[assigned_index] = int(c)\n else:\n uniques = np.unique(instance[semantics == c])\n for u in uniques:\n if u != 0:\n assigned_index = len(stuff_classes) + u\n new_instance[instance == u] = assigned_index\n instance_to_semantics[assigned_index] = int(c)\n Image.fromarray(new_instance).save(output_folder / f\"{f.stem}.png\")\n pickle.dump(instance_to_semantics, open(src_folder / f\"dmnerf_i2s{suffix_o}.pkl\", \"wb\"))\n\n\ndef from_ours_to_replica_traj_w_c(src_folder):\n poses = sorted(list((src_folder / \"pose\").iterdir()), key=lambda x: int(x.stem) if x.stem.isnumeric() else x.stem)\n traj_w_c_string = \"\"\n for pose_file in poses:\n RT = np.array([[float(y.strip()) for y in x.strip().split()] for x in Path(pose_file).read_text().splitlines() if x != ''])\n traj_w_c_string += f\"\"\"{RT[0, 0]} {RT[0, 1]} {RT[0, 2]} {RT[0, 3]} {RT[1, 0]} {RT[1, 1]} {RT[1, 2]} {RT[1, 3]} {RT[2, 0]} {RT[2, 1]} {RT[2, 2]} {RT[2, 3]} 0.00 0.00 0.00 1.00\\n\"\"\"\n (src_folder / \"traj_w_c.txt\").write_text(traj_w_c_string)\n\n\ndef from_trajectory_to_replica_traj_blend(src_folder):\n traj_w_c_string = \"\"\n with open(src_folder / \"trajectories\" / f\"trajectory_blender.pkl\", \"rb\") as fptr:\n trajectories = pickle.load(fptr)\n for i in range(len(trajectories)):\n RT = trajectories[i]\n traj_w_c_string += f\"\"\"{RT[0, 0]} {RT[0, 1]} {RT[0, 2]} {RT[0, 3]} {RT[1, 0]} {RT[1, 1]} {RT[1, 2]} {RT[1, 3]} {RT[2, 0]} {RT[2, 1]} {RT[2, 2]} {RT[2, 3]} 0.00 0.00 0.00 1.00\\n\"\"\"\n (src_folder / \"traj_blender.txt\").write_text(traj_w_c_string)\n\n\ndef debug_dump_instances_for_scene(path):\n instance = np.array(Image.open(path))\n u, c = np.unique(instance, return_counts=True)\n for uin in u:\n visualize_mask((instance == uin).astype(int), f\"inst_{uin}.png\")\n\n\ndef export_all_for_semantic_nerf(src_folder):\n base_dir = src_folder.parent\n all_scenes = [x for x in base_dir.iterdir() if x.name.startswith(\"scene\")]\n for scene in tqdm(all_scenes):\n out_dir = base_dir / \"raw\" / \"from_semantic_nerf\" / scene.name / \"Sequence_1\"\n if out_dir.exists():\n shutil.rmtree(out_dir)\n out_dir.mkdir(parents=True)\n # copy color -> rgb\n # remake splits\n splits = json.loads((base_dir / scene.name / \"splits.json\").read_text())\n for split in [\"train\", \"val\"]:\n splits[split] = [f\"{int(x):04d}\" for x in splits[split]]\n Path(out_dir / \"splits.json\").write_text(json.dumps(splits))\n # copy intrinsics\n shutil.copyfile(base_dir / scene.name / \"intrinsic\" / \"intrinsic_color.txt\", out_dir / \"intrinsic_color.txt\")\n # make trajectory and copy\n from_ours_to_replica_traj_w_c(base_dir / scene.name)\n shutil.copyfile(base_dir / scene.name / \"traj_w_c.txt\", out_dir / \"traj_w_c.txt\")\n (out_dir / \"rgb\").mkdir()\n for f in (base_dir / scene.name / \"color\").iterdir():\n shutil.copyfile(f, out_dir / \"rgb\" / f\"{int(f.stem):04d}.jpg\")\n # copy depth -> depth\n if not (out_dir / \"depth\").exists():\n shutil.copytree(base_dir / scene.name / \"depth\", out_dir / \"depth\")\n\n\ndef export_all_for_dmnerf(src_folder):\n base_dir = src_folder.parent\n all_scenes = [x for x in base_dir.iterdir() if x.name.startswith(\"scene\")]\n for scene in tqdm(all_scenes):\n dm_nerf_path = Path(\"/cluster/gimli/ysiddiqui/dm-nerf-data/scannet\") / scene.name\n out_dir = dm_nerf_path\n if not out_dir.exists():\n shutil.copytree(base_dir / \"raw\" / \"from_semantic_nerf\" / scene.name / \"Sequence_1\", out_dir)\n create_instances_for_dmnerf(base_dir / scene.name, correspondences=False)\n suffix = \"_no_correspondences\"\n output_folder = dm_nerf_path / f\"semantic_instance_m2f{suffix}\"\n output_folder.mkdir(exist_ok=True)\n input_folder = base_dir / scene.name / f\"m2f_notta_dmnerf{suffix}\"\n input_names = sorted(list(input_folder.iterdir()), key=lambda x: int(x.stem))\n output_names = [f\"semantic_instance_{int(x.stem)}\" for x in input_names]\n for idx in range(len(input_names)):\n shutil.copyfile(input_names[idx], output_folder / f\"{output_names[idx]}.png\")\n\n\ndef render_mesh(src_sens_path):\n import trimesh\n import pyrender\n\n def create_groups():\n seg_file = src_sens_path / f\"{sens_root.stem}_vh_clean.segs.json\"\n seg_indices = np.array(json.loads(Path(seg_file).read_text())[\"segIndices\"])\n face_seg_ids = np.concatenate([seg_indices[scannet_mesh.faces[:, 0:1]], seg_indices[scannet_mesh.faces[:, 1:2]], seg_indices[scannet_mesh.faces[:, 2:3]]], axis=-1)\n face_seg_ids = stats.mode(face_seg_ids, axis=1).mode[:, 0]\n vertex_reseg = np.zeros_like(seg_indices)\n vertex_reseg[scannet_mesh.faces[:, 0]] = face_seg_ids\n vertex_reseg[scannet_mesh.faces[:, 1]] = face_seg_ids\n vertex_reseg[scannet_mesh.faces[:, 2]] = face_seg_ids\n # colors = (distinct_colors.get_color_fast_numpy(vertex_reseg) * 255).astype(np.uint8)\n # trimesh.Trimesh(vertices=scannet_mesh.vertices, faces=scannet_mesh.faces, vertex_colors=colors).export(\"test_seg.obj\")\n colors = np.concatenate([x[:, np.newaxis] for x in [(vertex_reseg // 256 ** 2) % 256, (vertex_reseg // 256) % 256, vertex_reseg % 256]], axis=-1)\n return trimesh.Trimesh(vertices=scannet_mesh.vertices, faces=scannet_mesh.faces, vertex_colors=colors)\n\n flip_mat = np.array([\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ])\n src_folder = Path(\"data/scannet/\", sens_root.stem)\n with open(src_folder / \"trajectories\" / f\"trajectory_blender.pkl\", \"rb\") as fptr:\n trajectories = pickle.load(fptr)\n scannet_mesh = trimesh.load(src_sens_path / \"scene0050_02_vh_clean.ply\", process=False)\n mesh = pyrender.Mesh.from_trimesh(create_groups())\n scene = pyrender.Scene()\n scene.add(mesh)\n for i, pose in enumerate(tqdm(trajectories)):\n camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0, aspectRatio=640 / 480)\n camera_pose = pose @ flip_mat\n camera_node = scene.add(camera, pose=camera_pose)\n r = pyrender.OffscreenRenderer(640, 480)\n color, depth = r.render(scene, pyrender.constants.RenderFlags.FLAT)\n Image.fromarray(color).save(f\"runs/scannet_render/segments/{i:04d}.png\")\n scene.remove_node(camera_node)\n\n\ndef map_rendered_mesh(src_sens_path):\n import inflect\n inflect_engine = inflect.engine()\n root = Path(\"runs/scannet_render\")\n out_sem = root / \"semantics\"\n out_ins = root / \"instance\"\n seg_file = src_sens_path / f\"{sens_root.stem}.aggregation.json\"\n seg_groups = json.loads(Path(seg_file).read_text())[\"segGroups\"]\n segment_to_label = np.zeros(2 ** 24).astype(np.int32)\n segment_to_id = np.zeros(2 ** 24).astype(np.int32)\n distinct_colors = DistinctColors()\n scannetlabel_to_nyuid = {x.split('\\t')[1]: x.split('\\t')[4] for x in Path(\"resources/scannet-labels.combined.tsv\").read_text().splitlines()[1:]}\n scannetlabel_to_nyuid['object'] = 40\n for group in seg_groups:\n label = group['label']\n if inflect_engine.singular_noun(group['label']):\n label = inflect_engine.singular_noun(group['label'])\n segment_to_label[group[\"segments\"]] = scannetlabel_to_nyuid[label]\n segment_to_id[group[\"segments\"]] = group['id'] + 1\n bg_classes = [i for i, x in enumerate(get_thing_semantics(\"reduced\")) if not x]\n for item in tqdm(sorted(list((root / \"segments\").iterdir()), key=lambda x: int(x.stem))):\n segment = np.array(Image.open(item))\n segment = segment[:, :, 0] * 256 ** 2 + segment[:, :, 1] * 256 + segment[:, :, 2]\n ids = segment_to_id[segment]\n segment = segment_to_label[segment]\n ids[segment == 0] = -1\n segment[segment == 0] = -1\n segment[segment == (256 * 256 * 255 + 256 * 255 + 255)] = 0\n\n for i in range(1):\n arr_t, arr_r, arr_b, arr_l = segment[1:, :], segment[:, 1:], segment[:-1, :], segment[:, :-1]\n arr_t_1, arr_r_1, arr_b_1, arr_l_1 = segment[2:, :], segment[:, 2:], segment[:-2, :], segment[:, :-2]\n\n arr_t = np.concatenate([arr_t, segment[-1, :][np.newaxis, :]], axis=0)\n arr_r = np.concatenate([arr_r, segment[:, -1][:, np.newaxis]], axis=1)\n arr_b = np.concatenate([segment[0, :][np.newaxis, :], arr_b], axis=0)\n arr_l = np.concatenate([segment[:, 0][:, np.newaxis], arr_l], axis=1)\n\n arr_t_1 = np.concatenate([arr_t_1, segment[-2, :][np.newaxis, :], segment[-1, :][np.newaxis, :]], axis=0)\n arr_r_1 = np.concatenate([arr_r_1, segment[:, -2][:, np.newaxis], segment[:, -1][:, np.newaxis]], axis=1)\n arr_b_1 = np.concatenate([segment[0, :][np.newaxis, :], segment[1, :][np.newaxis, :], arr_b_1], axis=0)\n arr_l_1 = np.concatenate([segment[:, 0][:, np.newaxis], segment[:, 1][:, np.newaxis], arr_l_1], axis=1)\n\n segment[np.logical_and(segment == -1, arr_t != -1)] = arr_t[np.logical_and(segment == -1, arr_t != -1)]\n segment[np.logical_and(segment == -1, arr_r != -1)] = arr_r[np.logical_and(segment == -1, arr_r != -1)]\n segment[np.logical_and(segment == -1, arr_b != -1)] = arr_b[np.logical_and(segment == -1, arr_b != -1)]\n segment[np.logical_and(segment == -1, arr_l != -1)] = arr_l[np.logical_and(segment == -1, arr_l != -1)]\n\n segment[np.logical_and(segment == -1, arr_t_1 != -1)] = arr_t_1[np.logical_and(segment == -1, arr_t_1 != -1)]\n segment[np.logical_and(segment == -1, arr_r_1 != -1)] = arr_r_1[np.logical_and(segment == -1, arr_r_1 != -1)]\n segment[np.logical_and(segment == -1, arr_b_1 != -1)] = arr_b_1[np.logical_and(segment == -1, arr_b_1 != -1)]\n segment[np.logical_and(segment == -1, arr_l_1 != -1)] = arr_l_1[np.logical_and(segment == -1, arr_l_1 != -1)]\n\n arr_t, arr_r, arr_b, arr_l = ids[1:, :], ids[:, 1:], ids[:-1, :], ids[:, :-1]\n arr_t_1, arr_r_1, arr_b_1, arr_l_1 = ids[2:, :], ids[:, 2:], ids[:-2, :], ids[:, :-2]\n\n arr_t = np.concatenate([arr_t, ids[-1, :][np.newaxis, :]], axis=0)\n arr_r = np.concatenate([arr_r, ids[:, -1][:, np.newaxis]], axis=1)\n arr_b = np.concatenate([ids[0, :][np.newaxis, :], arr_b], axis=0)\n arr_l = np.concatenate([ids[:, 0][:, np.newaxis], arr_l], axis=1)\n\n arr_t_1 = np.concatenate([arr_t_1, ids[-2, :][np.newaxis, :], ids[-1, :][np.newaxis, :]], axis=0)\n arr_r_1 = np.concatenate([arr_r_1, ids[:, -2][:, np.newaxis], ids[:, -1][:, np.newaxis]], axis=1)\n arr_b_1 = np.concatenate([ids[0, :][np.newaxis, :], ids[1, :][np.newaxis, :], arr_b_1], axis=0)\n arr_l_1 = np.concatenate([ids[:, 0][:, np.newaxis], segment[:, 1][:, np.newaxis], arr_l_1], axis=1)\n\n ids[np.logical_and(ids == -1, arr_t != -1)] = arr_t[np.logical_and(ids == -1, arr_t != -1)]\n ids[np.logical_and(ids == -1, arr_r != -1)] = arr_r[np.logical_and(ids == -1, arr_r != -1)]\n ids[np.logical_and(ids == -1, arr_b != -1)] = arr_b[np.logical_and(ids == -1, arr_b != -1)]\n ids[np.logical_and(ids == -1, arr_l != -1)] = arr_l[np.logical_and(ids == -1, arr_l != -1)]\n\n ids[np.logical_and(ids == -1, arr_t_1 != -1)] = arr_t_1[np.logical_and(ids == -1, arr_t_1 != -1)]\n ids[np.logical_and(ids == -1, arr_r_1 != -1)] = arr_r_1[np.logical_and(ids == -1, arr_r_1 != -1)]\n ids[np.logical_and(ids == -1, arr_b_1 != -1)] = arr_b_1[np.logical_and(ids == -1, arr_b_1 != -1)]\n ids[np.logical_and(ids == -1, arr_l_1 != -1)] = arr_l_1[np.logical_and(ids == -1, arr_l_1 != -1)]\n\n segment[segment == -1] = 0\n ids[ids == -1] = 0\n\n if src_sens_path.stem in scene_specific_fixes_objectid:\n for ob_id in scene_specific_fixes_objectid[src_sens_path.stem]:\n segment[ids == ob_id] = scene_specific_fixes_objectid[src_sens_path.stem][ob_id]\n\n reduce_map, fold_map = get_reduce_and_fold_map()\n segment = fold_map[reduce_map[segment.flatten()]].reshape(segment.shape).astype(np.int8)\n semantic_bg = np.isin(segment, bg_classes)\n ids[semantic_bg] = 0\n\n alpha = 0.75\n\n ids = cv2.medianBlur(ids.astype(np.uint8), 5)\n segment = cv2.medianBlur(segment.astype(np.uint8), 5)\n\n boundaries_semantics = get_boundary_mask(segment)\n boundaries_instance = get_boundary_mask(ids)\n\n segment = (distinct_colors.get_color_fast_numpy(segment.reshape(-1)).reshape(list(segment.shape) + [3]) * 255).astype(np.uint8)\n ids = (distinct_colors.get_color_fast_numpy(ids.reshape(-1), override_color_0=True).reshape(list(ids.shape) + [3]) * 255).astype(np.uint8)\n color_image = np.array(Image.open(root / \"rgb\" / f\"{item.stem}.jpg\"))\n\n segment = segment * alpha + color_image * (1 - alpha)\n ids = ids * alpha + color_image * (1 - alpha)\n segment[boundaries_semantics > 0, :] = 0\n ids[boundaries_instance > 0, :] = 0\n\n Image.fromarray(ids.astype(np.uint8)).save(out_ins / item.name)\n Image.fromarray(segment.astype(np.uint8)).save(out_sem / item.name)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='scannet preprocessing')\n parser.add_argument('--sens_root', required=False, default='/cluster/gimli/ysiddiqui/scannet_val_scans/scene0050_02', help='sens file path')\n parser.add_argument('--n', required=False, type=int, default=1, help='num proc')\n parser.add_argument('--p', required=False, type=int, default=0, help='current proc')\n args = parser.parse_args()\n\n sens_root = Path(args.sens_root)\n dest = Path(\"data/scannet/\", sens_root.stem)\n dest.mkdir(exist_ok=True)\n print('#' * 80)\n print(f'extracting sens from {str(sens_root)} to {str(dest)}...')\n extract_scan(sens_root, dest)\n extract_labels(sens_root, dest)\n print('#' * 80)\n print('subsampling...')\n subsample_scannet_blur_window(dest, min_frames=900)\n visualize_labels(dest)\n print('#' * 80)\n print('mapping labels...')\n fold_scannet_classes(dest)\n visualize_mask_folder(dest / \"rs_semantics\")\n print('#' * 80)\n print('renumbering instances...')\n renumber_instances(dest)\n visualize_mask_folder(dest / \"rs_instance\")\n print('#' * 80)\n print('please run the following command for mask2former to generate machine generated panoptic segmentation')\n print(\n f'python demo.py --config-file ../configs/coco/panoptic-segmentation/swin/maskformer2_swin_large_IN21k_384_bs16_100ep.yaml --input {str(dest.absolute())}/color --output {str(dest.absolute())}/panoptic --predictions {str(dest.absolute())}/panoptic --opts MODEL.WEIGHTS ../checkpoints/model_final_f07440.pkl')\n print('#' * 80)\n print('mapping coco labels...')\n map_panoptic_coco(dest)\n visualize_mask_folder(dest / \"m2f_semantics\")\n visualize_mask_folder(dest / \"m2f_instance\")\n visualize_mask_folder(dest / \"m2f_segments\")\n print('creating validation set')\n print('#' * 80)\n print('creating validation set')\n create_validation_set(dest, 0.20)\n print('#' * 80)\n", "repo_name": "nihalsid/panoptic-lifting", "sub_path": "dataset/preprocessing/preprocess_scannet.py", "file_name": "preprocess_scannet.py", "file_ext": "py", "file_size_in_byte": 55899, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 266, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tqdm.tqdm", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.Laplacian", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.CV_32F", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.var", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 52, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 65, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 69, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 73, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 87, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 93, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 97, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 101, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 169, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 179, "usage_type": "call"}, {"api_name": "dataset.preprocessing.sens_reader.SensorData.SensorData", "line_number": 187, "usage_type": "call"}, {"api_name": "os.system", "line_number": 195, "usage_type": "call"}, {"api_name": "os.system", "line_number": 196, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 198, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 200, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 201, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 202, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 207, "usage_type": "call"}, {"api_name": "util.misc.visualize_mask", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 208, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 208, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 212, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 213, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 213, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 213, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 216, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 216, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 220, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 221, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 221, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 224, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 224, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 235, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 263, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 264, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 274, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 274, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 274, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 275, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 275, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 275, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 280, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 280, "usage_type": "name"}, {"api_name": "numpy.int8", "line_number": 280, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 285, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 292, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 302, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 303, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 303, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 304, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 304, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 306, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 308, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 308, "usage_type": "argument"}, {"api_name": "torch.unique", "line_number": 309, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 311, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 329, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 331, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 336, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 346, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 350, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 350, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 352, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 358, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 360, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 361, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 361, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 361, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 361, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 362, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 362, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 362, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 364, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 377, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 388, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 389, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 391, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 393, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 393, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 393, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 394, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 394, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 397, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 402, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 415, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 416, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 430, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 448, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 452, "usage_type": "call"}, {"api_name": "cv2.fisheye.estimateNewCameraMatrixForUndistortRectify", "line_number": 453, "usage_type": "call"}, {"api_name": "cv2.fisheye", "line_number": 453, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 453, "usage_type": "call"}, {"api_name": "cv2.fisheye.initUndistortRectifyMap", "line_number": 454, "usage_type": "call"}, {"api_name": "cv2.fisheye", "line_number": 454, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 454, "usage_type": "call"}, {"api_name": "cv2.CV_16SC2", "line_number": 454, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 455, "usage_type": "attribute"}, {"api_name": "cv2.remap", "line_number": 456, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 456, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 456, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 461, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 462, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 462, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 467, "usage_type": "call"}, {"api_name": "cv2.remap", "line_number": 469, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 469, "usage_type": "attribute"}, {"api_name": "cv2.remap", "line_number": 470, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 470, "usage_type": "attribute"}, {"api_name": "cv2.remap", "line_number": 471, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 471, "usage_type": "attribute"}, {"api_name": "torch.clip", "line_number": 472, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 472, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 477, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 481, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 481, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 481, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 482, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 482, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 482, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 483, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 483, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 484, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 484, "usage_type": "name"}, {"api_name": "numpy.uint16", "line_number": 484, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 485, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 485, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 486, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 486, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 486, "usage_type": "attribute"}, {"api_name": "numpy.savez_compressed", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 501, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 501, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 501, "usage_type": "name"}, {"api_name": "cv2.remap", "line_number": 502, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 502, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 505, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 505, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 510, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 513, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 514, "usage_type": "call"}, {"api_name": "util.distinct_colors.DistinctColors", "line_number": 520, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 525, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 528, "usage_type": "call"}, {"api_name": "util.misc.create_box", "line_number": 542, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 545, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 550, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 553, "usage_type": "call"}, {"api_name": "util.distinct_colors.DistinctColors", "line_number": 556, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 560, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 561, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 563, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 565, "usage_type": "call"}, {"api_name": "transforms3d.axangles.axangle2mat", "line_number": 568, "usage_type": "call"}, {"api_name": "util.transforms.hmg", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 569, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 570, "usage_type": "call"}, {"api_name": "util.transforms.hmg", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 571, "usage_type": "call"}, {"api_name": "util.transforms.dot", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 572, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 572, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 574, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 576, "usage_type": "call"}, {"api_name": "util.misc.create_box", "line_number": 586, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 589, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 593, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 593, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 594, "usage_type": "call"}, {"api_name": "PIL.Image.NEAREST", "line_number": 594, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 594, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 598, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 600, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 600, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 606, "usage_type": "call"}, {"api_name": "util.metrics.ConfusionMatrix", "line_number": 607, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 615, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 616, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 616, "usage_type": "call"}, {"api_name": "util.metrics.ConfusionMatrix", "line_number": 619, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 620, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 623, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 630, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 630, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 636, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 638, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 639, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 640, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 641, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 642, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 643, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 644, "usage_type": "call"}, {"api_name": "util.panoptic_quality.panoptic_quality", "line_number": 645, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 657, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 657, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 663, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 665, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 666, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 667, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 668, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 669, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 670, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 671, "usage_type": "call"}, {"api_name": "util.panoptic_quality.panoptic_quality", "line_number": 674, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 674, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 680, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 680, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 686, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 688, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 689, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 690, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 691, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 692, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 693, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 694, "usage_type": "call"}, {"api_name": "util.panoptic_quality.panoptic_quality_match", "line_number": 695, "usage_type": "call"}, {"api_name": "util.panoptic_quality._panoptic_quality_compute", "line_number": 702, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 702, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 711, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 711, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 722, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 723, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 723, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 723, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 723, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 725, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 726, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 731, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 738, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 740, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 741, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 741, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 744, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 748, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 753, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 753, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 754, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 755, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 755, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 755, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 755, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 765, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 776, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 782, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 782, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 782, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 783, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 783, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 783, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 784, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 785, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 792, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 798, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 798, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 799, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 806, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 806, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 814, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 822, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 822, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 822, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 823, "usage_type": "call"}, {"api_name": "util.misc.visualize_mask", "line_number": 825, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 831, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 834, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 838, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 841, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 841, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 843, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 846, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 849, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 852, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 858, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 859, "usage_type": "call"}, {"api_name": "shutil.copytree", "line_number": 862, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 871, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 880, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 880, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 880, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 881, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 882, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 882, "usage_type": "name"}, {"api_name": "numpy.zeros_like", "line_number": 883, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 889, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 889, "usage_type": "attribute"}, {"api_name": "trimesh.Trimesh", "line_number": 890, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 892, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 898, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 900, "usage_type": "call"}, {"api_name": "trimesh.load", "line_number": 901, "usage_type": "call"}, {"api_name": "pyrender.Mesh.from_trimesh", "line_number": 902, "usage_type": "call"}, {"api_name": "pyrender.Mesh", "line_number": 902, "usage_type": "attribute"}, {"api_name": "pyrender.Scene", "line_number": 903, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 905, "usage_type": "call"}, {"api_name": "pyrender.PerspectiveCamera", "line_number": 906, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 906, "usage_type": "attribute"}, {"api_name": "pyrender.OffscreenRenderer", "line_number": 909, "usage_type": "call"}, {"api_name": "pyrender.constants", "line_number": 910, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 911, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 911, "usage_type": "name"}, {"api_name": "inflect.engine", "line_number": 917, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 918, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 922, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 922, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 923, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 923, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 924, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 924, "usage_type": "attribute"}, {"api_name": "util.distinct_colors.DistinctColors", "line_number": 925, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 926, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 935, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 936, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 936, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 936, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 948, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 948, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 949, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 949, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 950, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 950, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 951, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 951, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 953, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 953, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 954, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 954, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 955, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 955, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 956, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 956, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 958, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 959, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 960, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 961, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 963, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 964, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 965, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 966, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 971, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 971, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 972, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 972, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 973, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 973, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 974, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 974, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 976, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 976, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 977, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 977, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 978, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 978, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 979, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 979, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 981, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 982, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 983, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 984, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 986, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 987, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 988, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 989, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 999, "usage_type": "attribute"}, {"api_name": "numpy.isin", "line_number": 1000, "usage_type": "call"}, {"api_name": "cv2.medianBlur", "line_number": 1005, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 1005, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 1006, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 1006, "usage_type": "attribute"}, {"api_name": "util.misc.get_boundary_mask", "line_number": 1008, "usage_type": "call"}, {"api_name": "util.misc.get_boundary_mask", "line_number": 1009, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 1013, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 1013, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 1013, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 1020, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 1020, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 1020, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 1021, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 1021, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 1021, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 1025, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 1031, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 1032, "usage_type": "call"}]} +{"seq_id": "70231138405", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport sqlite3\n\n\n# 기사 데이터 추출\ndef crawling(link):\n link_req = requests.get(link).text\n soup = BeautifulSoup(link_req, \"html.parser\")\n div_title = soup.find(\"div\", class_= \"article_info\").h3\n div_contents = soup.find(\"div\", class_= \"articleCont\")\n span = soup.find(\"span\", class_= \"article_date\")\n\n id = link.split(\"article_id=\")[1].split(\"&\")[0]\n title = div_title.get_text().strip()\n contents = div_contents.get_text().replace(\"\\t\",\"\").replace(\"\\n\",\"\").strip()\n date = span.get_text()\n print(id, date, title, contents)\n return (id, date, title, contents)\n\n# 현재 페이지의 모든 기사 링크를 리스트로 추출\ndef get_link(url):\n req = requests.get(url).text\n soup = BeautifulSoup(req, 'html.parser')\n links = []\n for tag in soup.select('.articleSubject'):\n links.append(\"https://finance.naver.com/\" + tag.select_one('a')[\"href\"].split('§')[0])\n return links\n\n\n# 실시간 속보\ndef breaking_news():\n print(\"- 실시간 속보 -\")\n\n base_url = \"https://finance.naver.com/news/news_list.naver?mode=LSS2D§ion_id=101§ion_id2=258\"\n\n links = []\n result = []\n page = int(input(\"몇번째 페이지까지 크롤링하시겠습니까? \"))\n for current_page in range(1, page + 1):\n url = base_url + \"&page=\" + str(current_page)\n links += get_link(url)\n\n for link in links:\n result.append(crawling(link))\n return result\n\n# 주요뉴스\ndef main_news():\n print(\"- 주요뉴스 -\")\n\n base_url = \"https://finance.naver.com/news/mainnews.naver\"\n links = []\n result = []\n\n day = int(input((\"며칠 전 뉴스까지 크롤링 하시겠습니까? (0입력 시 오늘의 주요뉴스만): \")))\n today = datetime.datetime.now()\n\n for d in range(day+1):\n target_day = (today - datetime.timedelta(days=d)).strftime('%Y-%m-%d')\n url = base_url + \"?date=\" + str(target_day)\n links += get_link(url)\n\n for link in links:\n result.append(crawling(link))\n return result\n\n# 많이 본 뉴스\ndef most_viewed_news():\n print(\"- 많이 본 뉴스 -\")\n base_url = \"https://finance.naver.com/news/news_list.naver?mode=RANK\"\n links = []\n result = []\n\n day = int(input((\"며칠 전 뉴스까지 크롤링 하시겠습니까? (0입력 시 오늘의 많이 본 뉴스만): \")))\n today = datetime.datetime.now()\n\n for d in range(day+1):\n for page in range(1,5): # 4페이지까지\n target_day = (today - datetime.timedelta(days=d)).strftime('%Y%m%d')\n url = base_url + \"&date=\" + str(target_day) + \"&page=\" +str(page)\n req = requests.get(url).text\n soup = BeautifulSoup(req, 'html.parser')\n for tag in soup.select('.simpleNewsList'):\n for li in tag.select('li'):\n links.append(\"https://finance.naver.com/\" + li.select_one('a')[\"href\"].split('§')[0])\n\n for link in links:\n result.append(crawling(link))\n return result\n\n# 데이터 베이스 테이블 생성\ndef creatDB():\n con = sqlite3.connect(\"./news_data.db\")\n cur = con.cursor()\n sql = \"CREATE TABLE newsTable (id INTEGER PRIMARY KEY, date text, title text, contents text)\"\n try:\n cur.execute(sql)\n print(\"The table has been created successfully!\")\n except:\n print(\"table newsTable already exists!\")\n finally:\n con.close()\n\ndef main():\n data = []\n # creatDB()\n try:\n con = sqlite3.connect(\"./news_data.db\")\n cursor = con.cursor()\n print(\"Successfully connected to the database!\")\n except:\n print(\"Connection failed!\")\n\n while True:\n print(\"---------------------------------------\")\n category = int(input(\"1. 실시간 속보\\n2. 주요뉴스\\n3. 많이 본 뉴스\\n4. 종료\\n원하는 카테고리 번호를 입력하세요: \"))\n print(\"---------------------------------------\")\n\n if category==1:\n data += breaking_news()\n elif category == 2:\n data += main_news()\n elif category == 3:\n data += most_viewed_news()\n elif category == 4:\n break\n\n try:\n INSERT_SQL = \"INSERT INTO newsTable(id, date, title, contents) VALUES (?, ?, ?, ?);\"\n cursor.executemany(INSERT_SQL, data)\n print(\"The Data has been inserted Successfully!\")\n except Exception as e:\n print(e)\n\n con.commit()\n con.close()\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "handevmin/moonybot", "sub_path": "crawler/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 58, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 83, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 84, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "4309717454", "text": "import pandas as pd\r\nfrom bokeh.charts import Bar, output_file, show\r\nfrom bokeh.models import FuncTickFormatter\r\n\r\nskills_list = ['cheese making', 'squanching', 'leaving harsh criticisms']\r\npct_counts = [25, 40, 1]\r\ndf = pd.DataFrame({'skill':skills_list, 'pct jobs with skill':pct_counts})\r\np = Bar(df, 'index', values='pct jobs with skill', title=\"Top skills for ___ jobs\", legend=False)\r\nlabel_dict = {}\r\nfor i, s in enumerate(skills_list):\r\n label_dict[i] = s\r\n\r\nprint(label_dict)\r\n\r\np.xaxis.formatter = FuncTickFormatter(code=\"\"\"\r\n var labels = %s;\r\n return labels[tick];\r\n\"\"\" % label_dict)\r\n\r\noutput_file(\"bar.html\")\r\nshow(p)", "repo_name": "DanielLongo/eegML", "sub_path": "eegvis/experiments/fixed_ticks.py", "file_name": "fixed_ticks.py", "file_ext": "py", "file_size_in_byte": 641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "bokeh.charts.Bar", "line_number": 8, "usage_type": "call"}, {"api_name": "bokeh.models.FuncTickFormatter", "line_number": 15, "usage_type": "call"}, {"api_name": "bokeh.charts.output_file", "line_number": 20, "usage_type": "call"}, {"api_name": "bokeh.charts.show", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "5818352931", "text": "from urllib.parse import urlparse\n\nimport re\nimport os\nimport wget\nimport gzip\nimport shutil\nimport zipfile\n\nimport subprocess as sp\n\n\nMETHODS = [\n 'Naive',\n 'Burkhardt',\n 'Cohen',\n 'Sandia',\n 'Sandia2',\n 'SandiaDot',\n 'SandiaDot2',\n]\n\nGRAPHS = {\n 'loc-brightkite_edges.txt': 'http://snap.stanford.edu/data/loc-brightkite_edges.txt.gz',\n 'amazon0302.txt': 'https://snap.stanford.edu/data/amazon0302.txt.gz',\n 'roadNet-PA.txt': 'https://snap.stanford.edu/data/roadNet-PA.txt.gz',\n 'amazon0505.txt': 'https://snap.stanford.edu/data/amazon0505.txt.gz',\n 'soc-Epinions1.txt': 'https://snap.stanford.edu/data/soc-Epinions1.txt.gz',\n 'email-EuAll.txt': 'https://snap.stanford.edu/data/email-EuAll.txt.gz',\n 'loc-gowalla_edges.txt': 'https://snap.stanford.edu/data/loc-gowalla_edges.txt.gz',\n 'soc-Slashdot0902.txt': 'https://snap.stanford.edu/data/soc-Slashdot0902.txt.gz',\n 'soc-Slashdot0811.txt': 'https://snap.stanford.edu/data/soc-Slashdot0811.txt.gz',\n}\n\nFULLGRAPH_POWS = [\n i\n for p in range(0, 4)\n for i in range(10 ** p, 10 ** (p + 1), 10 ** p)\n]\n\n\ndef download_graph(url):\n archive_path = './input/' + os.path.split(urlparse(url).path)[1]\n file_path = os.path.splitext(archive_path)[0]\n\n if os.path.exists(file_path) is False:\n wget.download(url, './input')\n\n with gzip.open(archive_path, 'rb') as f_in:\n with open(file_path, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n content = None\n with open(file_path, 'r') as f_in:\n content = f_in.readlines()\n\n with open(file_path, 'w') as f_out:\n for line in content:\n if not line.startswith('#'):\n f_out.write(str.replace(line, '\t', ' '))\n\n os.remove(archive_path)\n\n\ndef create_fullgraph(n):\n file_path = f'./input/FullGraph/fullgraph_{n}.txt'\n if os.path.exists(file_path) is False:\n with open(file_path, 'w') as f_out:\n for i in range(0, n):\n for j in range(i + 1, n):\n f_out.write(f'{i} {j}\\n')\n\n\ndef init():\n sp.run(f'make', shell=True)\n for url in list(GRAPHS.values()):\n download_graph(url)\n for n in FULLGRAPH_POWS:\n create_fullgraph(n)\n\n\ndef get_time(file_path):\n result = {}\n content = None\n with open(file_path, 'r') as f_in:\n content = f_in.readlines()\n for method in METHODS:\n for line in content:\n if re.fullmatch(f'({method} used time \\(in seconds\\): )(.*)(\\n)', line) is not None:\n result[method] = re.sub(\n f'({method} used time \\(in seconds\\): )(.*)(\\n)', '\\g<2>', line)\n print(result)\n return result\n\n\ndef test_graph(file_path):\n results_path = './results/' + os.path.split(file_path)[1]\n\n res = sp.run(f'./main {file_path} > {results_path}', shell=True)\n\n print(res)\n\n return results_path\n\n\ndef test_all_fullgraphs(n=FULLGRAPH_POWS[-1]):\n with open('./fullgraph_results.md', 'w') as f_out:\n head = '| N |'\n grid = '|:-:|'\n for method in METHODS:\n head += f' {method} time (s) | '\n grid += f':-:|'\n f_out.write(f'{head}\\n')\n f_out.write(f'{grid}\\n')\n\n for n in list(filter(lambda x: x <= n, FULLGRAPH_POWS)):\n time = get_time(test_graph(f'./input/FullGraph/fullgraph_{n}.txt'))\n res = f'| {n} |'\n for method in METHODS:\n res += f' {time.get(method)} |'\n f_out.write(f'{res}\\n')\n\n\ndef test_all_stanford_graphs(n=-1):\n with open('./stanford_graph_results.md', 'w') as f_out:\n head = '| Name |'\n grid = '|:----:|'\n for method in METHODS:\n head += f' {method} time (s) | '\n grid += f':-:|'\n f_out.write(f'{head}\\n')\n f_out.write(f'{grid}\\n')\n\n for g in list(GRAPHS.keys()):\n print(g, n)\n if n == 0:\n break\n n -= 1\n time = get_time(test_graph(f'./input/{g}'))\n print(time)\n res = f'| {g} |'\n for method in METHODS:\n res += f' {time.get(method)} |'\n f_out.write(f'{res}\\n')\n\n\nif __name__ == '__main__':\n init()\n test_all_fullgraphs(1000)\n test_all_stanford_graphs()\n", "repo_name": "Pogozhelskaya/Triangle_counting", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 4310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.split", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wget.download", "line_number": 47, "usage_type": "call"}, {"api_name": "gzip.open", "line_number": 49, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 51, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 75, "usage_type": "call"}, {"api_name": "re.fullmatch", "line_number": 89, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "4384534597", "text": "import tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n # 处理get请求的方法\n def get(self):\n # 给浏览器响应信息\n self.write(\"hello tornado web three!!!\")\n\n\nif __name__ == \"__main__\":\n app = tornado.web.Application(\n [(r\"/\", IndexHandler)]\n )\n # 实例化一个http服务对象\n httpServer = tornado.httpserver.HTTPServer(app)\n # 服务器绑定相应的端口\n httpServer.bind(8000)\n # 开启 多进程\n httpServer.start(5)\n # 值可以不写或None或负数,这些都认为开启服务器对于的cpu核心数\n # 虽然提供了多进程的方式,但是有问题,不建议上面的方式启动多进程\n # 使用手动启动多个进程,并可以绑定不能的端口\n # 有问题:\n # (1)每个子进程都会从父进程中复制中一份IOloop的实例,如果在创建子进程前修改IOLoop, 会影响子进程。\n # (2)所有的进程都是由一个命令启动的,无法做到在不停止服务的情况下修改代码。\n # (3)所有进程共享一个端口,想要分别监控很困难。\n tornado.ioloop.IOLoop.current().start()\n\n", "repo_name": "Thousandhack/tornado_stu", "sub_path": "test/server_03.py", "file_name": "server_03.py", "file_ext": "py", "file_size_in_byte": 1221, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tornado.web.web", "line_number": 6, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 6, "usage_type": "name"}, {"api_name": "tornado.web.web.Application", "line_number": 14, "usage_type": "call"}, {"api_name": "tornado.web.web", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 14, "usage_type": "name"}, {"api_name": "tornado.web.httpserver.HTTPServer", "line_number": 18, "usage_type": "call"}, {"api_name": "tornado.web.httpserver", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 18, "usage_type": "name"}, {"api_name": "tornado.web.ioloop.IOLoop.current", "line_number": 30, "usage_type": "call"}, {"api_name": "tornado.web.ioloop", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "26269984468", "text": "import math\nimport time\nimport random\nimport openai\nimport json\nimport os\nfrom dotenv import load_dotenv\nfrom pythonosc import udp_client\nfrom pythonosc import osc_server\nfrom pythonosc.dispatcher import Dispatcher\nfrom threading import Thread\nimport tkinter as tk\nimport subprocess\nimport webbrowser\nfrom playsound import playsound\n\n# UPD Client for world map visualisation\nclient = udp_client.SimpleUDPClient(\"127.0.0.1\", 12000)\n\n# Imports Catastrophe class\nfrom catastrophe import Catastrophe\n\n# Gets headline constructors\nfrom construct_headline import construct_start_headline, construct_end_headline, get_source\n\n# Loads in .env file which needs to be located in the same folder as this file\nload_dotenv()\n# Fetches api key from .env file (can be generated at https://platform.openai.com/account/api-keys)\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n# Fetches IP address from .env file\nip_address = os.getenv(\"IP_ADDRESS\")\n\nkey_mapping = [\"a\", \"s\", \"d\", \"f\", \"g\", \"h\", \"j\", \"k\", \"l\", \"ö\", \"ä\", \"#\"]\n\nprint(f\"Momentane IP Adresse: {ip_address}\")\n\n\nclass Symptoms:\n def __init__(self):\n # Start values\n self.prompt = \"Generiere 25 kurze, fiktive & sarkastische Schlagzeilen über den Klimawandel. Die Schlagzeilen sollen keine Jahreszahlen oder den Begriff Klimawandel beinhalten. Geb die Schlagzeilen als Liste mit dem key 'headlines' in einer JSON zurück\"\n self.is_game_running = False\n self.are_headlines_loaded = True\n self.start_year = 2025\n self.year = self.start_year\n self.count = 0\n self.death_count = 0\n self.temperature = 1\n self.free_regions = [\"na1\", \"na2\", \"eu1\", \"sa1\", \"sa2\", \"af1\", \"af2\", \"af3\", \"as1\", \"as2\", \"as3\", \"oc1\"]\n self.occupied_regions = set()\n self.region_data = {\n \"na1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"na2\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"eu1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"sa1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"sa2\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"af1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"af2\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"af3\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"as1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"as2\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"as3\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n \"oc1\": {\n \"is_active\": False,\n \"type\": \"\",\n \"resolution_percentage\": 0,\n },\n }\n self.headline_reserve = []\n self.used_headlines = []\n self.sensor_values: list[int] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.has_first_catastrophe_happened = False\n self.annihilation_triggered = False\n\n def reset_attributes(self):\n # Preserves headlines before resetting all attributes\n generated_headlines = self.headline_reserve\n self.__init__()\n self.headline_reserve = generated_headlines\n\n def get_inputs(self):\n # Writes sensor input from Pi Cap into variable\n def get_diff_values(unused_addr, *args):\n self.sensor_values = args\n\n # Maps dispatcher to path of diff values\n dispatcher = Dispatcher()\n dispatcher.map(\"/diff*\", get_diff_values)\n\n # Initiates OSC server\n server = osc_server.BlockingOSCUDPServer((ip_address, 3000), dispatcher)\n server.serve_forever()\n\n def send_data(self):\n tick_count = 0\n while True:\n if tick_count > 5:\n # Sends data to p5project\n client.send_message('/death_count', str(int(self.death_count)))\n client.send_message(\"/are_headlines_loaded\", self.are_headlines_loaded)\n tick_count = 0\n tick_count += 1\n\n region_json = json.dumps(self.region_data, indent=4)\n client.send_message(\"/region_data\", region_json)\n\n client.send_message(\"/is_game_running\", self.is_game_running)\n time.sleep(0.03)\n\n def generate_headlines(self, verbose):\n while True:\n if len(self.headline_reserve) < 100:\n if verbose:\n print(\"Filling up headlines... (currently \" + str(len(self.headline_reserve)) + \"/100)\")\n try:\n # Calls GPT API and requests headlines\n gpt_response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"user\",\n \"content\": self.prompt}\n ]\n )\n # Converts response into JSON\n headlines_json = json.loads(gpt_response.choices[0].message.content)\n # Adds new headlines to headlines array\n for headline in headlines_json['headlines']:\n self.headline_reserve.append({\"headline\": headline, \"source\": get_source()})\n # Rests for 5 seconds\n time.sleep(5)\n except Exception as e:\n # Catches bad response from GPT API\n print(\"Error while generating headlines:\", e)\n\n def set_temperature(self):\n # Temperature graph\n self.temperature = 1.5 * math.cos(0.04 * (self.year - self.start_year) + math.pi) + 2.5\n\n def trigger_headline(self):\n if len(self.headline_reserve) > 0:\n # Randomly picks headline from array\n index = random.randrange(0, len(self.headline_reserve))\n headline = self.headline_reserve[index]\n print(headline[\"headline\"] + \" - \" + headline[\"source\"])\n\n self.used_headlines.insert(0, headline)\n # Removes chosen headline from array\n del self.headline_reserve[index]\n else:\n print(\"--- Blank (headline) ---\")\n\n def trigger_catastrophe(self):\n if len(self.free_regions) != 0:\n # Moves region from free to occupied\n selected_region = random.choice(self.free_regions)\n self.occupied_regions.add(selected_region)\n self.free_regions.remove(selected_region)\n\n # Initialises the catastrophe based on selected region and current temperature\n catastrophe = Catastrophe(selected_region, self.temperature)\n\n # Constructs starting headline based on type and region\n start_headline = {\n \"headline\": construct_start_headline(selected_region, catastrophe.type),\n \"source\": get_source()\n }\n self.used_headlines.insert(0, start_headline)\n # playsound(\"audio/alert.wav\", block=False)\n print(\n \"════════════════════════════════════════════════════════════════════════════════════════════════════════════\")\n print(\n f\"!!! CATASTROPHE - {selected_region} - {catastrophe.type} - {float(catastrophe.wind_up):.3}s wind_up - {float(catastrophe.duration):.3}s dur - {float(catastrophe.resolution_time):.3}s res_time - {int(catastrophe.deaths_per_second):,} d_p_s !!!\")\n print(start_headline[\"headline\"] + \" - \" + start_headline[\"source\"])\n print(\"!!! On electrode \" + str(catastrophe.electrode_index) + \" - \" + key_mapping[\n catastrophe.electrode_index] + \" !!!\")\n print(\n \"════════════════════════════════════════════════════════════════════════════════════════════════════════════\")\n\n # Changes region data\n self.region_data[selected_region][\"is_active\"] = True\n self.region_data[selected_region][\"type\"] = catastrophe.type\n self.region_data[selected_region][\"resolution_percentage\"] = 1\n\n # Sets starting parameters for catastrophe\n current_windup = 0\n current_duration = 0\n current_resolution_time = 0\n current_death_count = 0\n resolved_by_player = False\n\n # Wind up period of catastrophe\n while current_windup < catastrophe.wind_up and self.is_game_running is True:\n if self.sensor_values[catastrophe.electrode_index] > 15:\n current_resolution_time += 0.01\n self.region_data[selected_region][\n \"resolution_percentage\"] = 1 - current_resolution_time / catastrophe.resolution_time\n if current_resolution_time >= catastrophe.resolution_time:\n resolved_by_player = True\n playsound(\"audio/resolved.wav\", block=False)\n break\n current_windup += 0.01\n time.sleep(0.01)\n\n # Main duration of catastrophe if it hasn't been resolved yet\n if catastrophe.resolution_time >= current_resolution_time:\n while current_duration < catastrophe.duration and self.is_game_running is True:\n if self.sensor_values[catastrophe.electrode_index] > 15:\n current_resolution_time += 0.01\n self.region_data[selected_region][\n \"resolution_percentage\"] = 1 - current_resolution_time / catastrophe.resolution_time\n if current_resolution_time >= catastrophe.resolution_time:\n resolved_by_player = True\n playsound(\"audio/resolved.wav\", block=False)\n break\n self.death_count += catastrophe.deaths_per_second * 0.01\n current_death_count += catastrophe.deaths_per_second * 0.01\n current_duration += 0.01\n time.sleep(0.01)\n\n # Changes region data\n self.region_data[selected_region][\"is_active\"] = False\n\n # Constructs ending headline\n end_headline = {\n \"headline\": construct_end_headline(selected_region, catastrophe.type, current_death_count),\n \"source\": get_source()\n }\n self.used_headlines.insert(0, end_headline)\n print(\n \"════════════════════════════════════════════════════════════════════════════════════════════════════════════\")\n print(\n f\">>> RESOLVED - {selected_region} - {catastrophe.type} - resolved by player? {resolved_by_player} <<<\")\n print(end_headline[\"headline\"] + \" - \" + end_headline[\"source\"])\n print(\n \"════════════════════════════════════════════════════════════════════════════════════════════════════════════\")\n\n if self.is_game_running is True:\n # Puts region on 2 second cooldown\n time.sleep(2)\n\n # Moves region back from occupied to free\n self.free_regions.append(selected_region)\n self.occupied_regions.remove(selected_region)\n else:\n print(\"--- Blank (catastrophe) ---\")\n\n def trigger_annihilation(self):\n print(\"☁☢☁ Started annihilation event ☁☢☁\")\n # Occupies regions until it reaches four occupied\n war_regions = []\n while len(war_regions) < 4:\n if len(self.free_regions) > 0:\n selected_region = self.free_regions[0]\n self.free_regions.remove(selected_region)\n self.occupied_regions.add(selected_region)\n print(f\"☁☢☁ Added {selected_region} to annihilation event ☁☢☁\")\n war_regions.append(selected_region)\n\n # Gets sensor value indexes for all four occupied regions\n region_indexes = []\n for region in war_regions:\n catastrophe = Catastrophe(region, self.temperature)\n region_indexes.append(catastrophe.electrode_index)\n self.region_data[region][\"is_active\"] = True\n self.region_data[region][\"type\"] = \"annihilation\"\n self.region_data[region][\"resolution_percentage\"] = 1\n\n # Constructs starting headline for the nuclear war\n start_headline = {\n \"headline\": \"Nach Monaten der Anspannung - DEFCON 1 erreicht: Das Zeitalter der Atomkriege beginnt\",\n \"source\": \"Tiffany\"\n }\n self.used_headlines.insert(0, start_headline)\n playsound(\"audio/annihilation.wav\", block=False)\n print(\n \"☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢\")\n print(f\"☢☢☢ NUCLEAR WAR - {str(war_regions)} ☢☢☢\")\n print(start_headline[\"headline\"] + \" - \" + start_headline[\"source\"])\n print(\"☢☢☢ On electrodes \" + str(region_indexes) + \" ☢☢☢\")\n print(\n \"☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢\")\n\n # Sets starting parameters for annihilation\n resolution_time = 4\n deaths_per_second = 500_000_000\n current_death_count = 0\n current_resolution_time = 0\n\n # Annihilation loop which runs until death_count reaches 10 billion, the game ends or the player resolves the event\n while self.death_count < 10_000_000_000 and self.is_game_running is True:\n if self.sensor_values[region_indexes[0]] > 15 and self.sensor_values[region_indexes[1]] > 15 and \\\n self.sensor_values[region_indexes[2]] > 15 and self.sensor_values[region_indexes[3]] > 15:\n current_resolution_time += 0.01\n for region in war_regions:\n self.region_data[region][\n \"resolution_percentage\"] = 1 - current_resolution_time / resolution_time\n if current_resolution_time >= resolution_time:\n playsound(\"audio/resolved.wav\", block=False)\n break\n self.death_count += deaths_per_second * 0.01\n current_death_count += deaths_per_second * 0.01\n time.sleep(0.01)\n\n # Changes region data\n for region in war_regions:\n self.region_data[region][\"is_active\"] = False\n\n # Sends ending headline\n end_headline = {\n \"headline\": f\"Ein Wunder: Der Atomkrieg ist vorbei! Für den Frieden mussten nur {int(current_death_count):,} Personen sterben\",\n \"source\": \"Tiffany\"\n }\n self.used_headlines.insert(0, end_headline)\n print(\n \"☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢\")\n print(end_headline[\"headline\"] + \" - \" + end_headline[\"source\"])\n print(\n \"☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢☁☢\")\n\n if self.is_game_running is True:\n # Puts regions on 2 second cooldown\n time.sleep(2)\n\n # Moves regions back from occupied to free\n for region in war_regions:\n self.free_regions.append(region)\n self.occupied_regions.remove(region)\n\n def trigger_event(self):\n # Chance of nuclear war\n chance_annihilation = 0.001 * (self.death_count / 10_000_000)\n # Limits chance of nuclear war to 4 %\n if chance_annihilation > 0.04:\n chance_annihilation = 0.04\n # Prevents further nuclear wars if one has already been triggered\n if self.annihilation_triggered is True:\n chance_annihilation = 0\n # Chance of headline occurring\n chance_headline = 0.15\n # Base chance of catastrophe occurring\n base_chance_catastrophe = 0.10\n # Temperature increase since game start\n temperature_delta = self.temperature - 1\n # Chance of nothing happening\n chance_remaining = 1 - chance_headline - base_chance_catastrophe - chance_annihilation\n # Chance of catastrophe occurring depending on temperature\n catastrophe_function = 0.7 * (math.cos(math.pi + (temperature_delta / 3) * math.pi) + 1)\n if catastrophe_function > 1:\n catastrophe_function = 1\n chance_catastrophe = base_chance_catastrophe + catastrophe_function * chance_remaining\n # Increase chance of first catastrophe\n if self.has_first_catastrophe_happened is False:\n chance_catastrophe = 0.5\n\n # Picks random number\n random_number = random.randrange(0, 1000000) / 1000000\n\n # Triggers headline\n if random_number < chance_headline:\n self.trigger_headline()\n\n # Triggers catastrophe\n elif random_number < (chance_headline + chance_catastrophe):\n self.has_first_catastrophe_happened = True\n Thread(target=self.trigger_catastrophe, daemon=True).start()\n\n elif random_number < (chance_headline + chance_catastrophe + chance_annihilation):\n if self.annihilation_triggered is False:\n self.annihilation_triggered = True\n Thread(target=self.trigger_annihilation, daemon=True).start()\n\n # Triggers nothing\n else:\n print(\"--- Blank ---\")\n\n def run(self, skip_headlines):\n while True:\n # Game starts when any of the sensors are touched by the player\n print(\"\\nTouch any electrode to start game.\\n\")\n while self.is_game_running is False:\n if any(sensor > 15 for sensor in self.sensor_values):\n # Clears all attributes except headlines\n self.reset_attributes()\n self.is_game_running = True\n break\n\n # Waits for headline generation until at least 20 are available\n if len(self.headline_reserve) < 20 and not skip_headlines:\n self.are_headlines_loaded = False\n print(\"Waiting for GPT to return headlines...\\n\")\n while len(self.headline_reserve) < 20 and not skip_headlines:\n pass\n\n self.are_headlines_loaded = True\n\n print(\"/// SYMPTOMS startet ///\")\n print(\"\\n\")\n time.sleep(1)\n\n # Main game loop\n while self.year < 2100 and self.death_count < 10_000_000_000:\n self.trigger_event()\n self.count += 1\n if self.count == 10:\n self.year += 1\n self.count = 0\n self.set_temperature()\n print(\n \"┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄\")\n print(\n f\"JAHR {self.year} - {float(self.temperature):.2}°C - {int(self.death_count):,} TOTE - {len(self.occupied_regions)} AKTIVE REGION(EN) - ATOMKRIEG WAHRSCHEINLICHKEIT {(0.001 * (self.death_count / 10_000_000)):.2%}\")\n print(\n \"┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄\")\n time.sleep(0.3)\n self.is_game_running = False\n time.sleep(1)\n if self.death_count >= 10_000_000_000:\n print(f\"\\n// MENSCHHEIT AUSGESTORBEN, SPIEL ZU ENDE: {int(self.death_count):,} TOTE //\")\n else:\n print(f\"\\n// SPIEL ZU ENDE: {int(self.death_count):,} TOTE //\")\n if self.annihilation_triggered:\n print(\"★ DU HAST DIE ZERSTÖRUNG DER MENSCHHEIT DURCH DEN ATOMKRIEG VERHINDERT. GUT GEMACHT! ★\")\n time.sleep(4)\n\n def gui(self):\n def update_labels():\n \n year_label.config(text=str(self.year))\n temperature_label.config(text=f\"Aktuelle Erderwärmung: {self.temperature:.2f}°C\")\n\n headline_strings = self.used_headlines[:25]\n headlines_text = \"\"\n for headline in headline_strings:\n headlines_text += headline[\"headline\"] + \"\\n\" + \" - \" + headline[\"source\"] + \"\\n\\n\\n\"\n \n # if headline_strings is empty, UI shows starting condition\n if not headline_strings:\n placeholder_text = \"Kannst du die Welt retten? Versuch es sofort und berühre einen der leuchtenden Punkte\"\n headline_list_text.delete(1.0, tk.END)\n headline_list_text.insert(tk.END, placeholder_text)\n elif self.year == 2100 or self.death_count >= 10_000_000_000:\n end_game_text = f\"{int(self.death_count):,} Menschen sind durch die Folgen des Klimawandels umgekommen\"\n headline_list_text.delete(1.0, tk.END)\n headline_list_text.insert(tk.END, end_game_text)\n\n # closes Window after delay of 20 seconds\n # window.after(20000, window.destroy)\n else:\n current_text = headline_list_text.get(1.0, tk.END).strip()\n if current_text == \"Kannst du die Welt retten? Versuch es sofort und berühre einen der leuchtenden Punkte\":\n headline_list_text.delete(1.0, tk.END)\n # Insert the headlines text into the headline_list_text\n headline_list_text.delete(1.0, tk.END)\n headline_list_text.insert(tk.END, headlines_text)\n\n window.after(100, update_labels)\n\n window = tk.Tk()\n window.configure(bg='#DFE9F6')\n\n # Newsfeed according to screensize\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n\n window_width = screen_width \n news_wrap = screen_width // 1.5\n titel_size = screen_width // 40\n text_size = screen_width // 80\n news_size = screen_width // 120\n\n window.geometry(f\"{window_width}x{screen_height}\")\n\n # MainFrame\n frame = tk.Frame(window, bg='#DFE9F6', pady=24, padx=32)\n frame.grid(sticky='nsew') # Using grid instead of pack\n\n window.grid_rowconfigure(0, weight=1) # Make the frame expandable\n window.grid_columnconfigure(0, weight=1)\n\n # Header\n header = tk.Frame(frame, bg=window.cget(\"bg\"))\n header.grid(sticky='ew') # Header occupies the entire width\n\n # Logo\n logo_image = tk.PhotoImage(file=\"./assets/SYMPTOMS.png\")\n logo_label = tk.Label(header, image=logo_image, bg=window.cget(\"bg\"), pady=12)\n logo_label.pack()\n\n # Label for year variable\n year_label = tk.Label(header, text=str(self.year), font=(\"Inter\", titel_size), fg=\"#262626\",\n bg=window.cget(\"bg\"))\n year_label.pack(side=tk.LEFT)\n\n # Image for header\n image = tk.PhotoImage(file=\"./assets/Sorting.png\")\n resized_image = image.subsample(2, 2)\n image_label = tk.Label(header, image=resized_image, bg=window.cget(\"bg\"))\n image_label.pack(side=tk.RIGHT)\n\n # Frame for temperature_label\n temperature_frame = tk.Frame(frame, bg=window.cget(\"bg\"))\n temperature_frame.grid(sticky='ew') # temperature_frame occupies the entire width\n\n # Label for temperature variable\n temperature_label = tk.Label(temperature_frame, text=f\"Aktuelle Erderwärmung: {self.temperature:.2f}°C\",\n font=(\"Inter\", text_size), fg=\"#262626\", bg=window.cget(\"bg\"))\n temperature_label.pack(side=tk.TOP, anchor='w')\n\n # Newsframe\n newsframe = tk.Frame(frame, bg=window.cget(\"bg\"), pady=12, padx=24)\n newsframe.grid(sticky='nsew') # Newsframe fills the remaining space\n\n frame.grid_rowconfigure(2, weight=1) # Make the newsframe expandable\n frame.grid_columnconfigure(0, weight=1)\n\n # Postframe\n postframe = tk.Frame(newsframe, bg='#FFFFFF', padx=24)\n postframe.pack(side=tk.LEFT, anchor='n', expand=True)\n\n # TODO: Zum Schluss Windows_width auf 100% stellen\n # Label for the list of headlines\n headline_list_label = tk.Label(newsframe, text=\"Dein Newsfeed\", width=screen_width, font=(\"Inter\", news_size), fg=\"#262626\",\n wraplength=news_wrap, bg=window.cget(\"bg\"))\n headline_list_label.pack(fill=tk.Y, anchor='w')\n\n headline_list_text = tk.Text(newsframe, width=screen_width, pady=24, padx=24, font=(\"Inter\", news_size), fg=\"#262626\", wrap=tk.WORD)\n headline_list_text.pack(fill=tk.BOTH, expand=True) # Changed to fill both sides and expand\n\n update_labels()\n window.title(\"Symptoms\")\n window.mainloop()\n\n def main(self, skip_headlines=False, test_auto_start=False, start_p5=True, verbose=False):\n if start_p5 is True:\n # Starts osc bridge and p5 sketch\n subprocess.Popen(\"bridge.bat\")\n subprocess.Popen(\"serve.bat\")\n\n # Opens sketch in Browser window\n webbrowser.open_new(\"http://127.0.0.1:5000\")\n\n # Starts GUI with headlines, year & temperature\n Thread(target=self.gui, daemon=True).start()\n\n # Headline generation thread\n if not skip_headlines:\n Thread(target=self.generate_headlines, args=(verbose,), daemon=True).start()\n\n # Input fetching thread\n Thread(target=self.get_inputs, daemon=True).start()\n\n # Sends data to p5\n Thread(target=self.send_data, daemon=True).start()\n\n # Automatically starts game if enabled (does not reset game data after game ends!)\n if test_auto_start is True:\n self.is_game_running = True\n\n # Runtime\n self.run(skip_headlines)\n\n\nsymptoms = Symptoms()\n\n# Props:\n# skip_headlines: Whether headline generation is skipped (defaults to False)\n# test_auto_start: Immediately starts game (defaults to False)\n# start_p5: Starts p5 sketch & bridge and opens browser window (defaults to True)\n# verbose: Prints progress of headline generation (defaults to True)\nsymptoms.main(skip_headlines=False, test_auto_start=False, start_p5=True, verbose=False)\n", "repo_name": "kpister/prompt-linter", "sub_path": "data/scraping/repos/m-m-mic~symptoms/installation~runtime~game.py", "file_name": "installation~runtime~game.py", "file_ext": "py", "file_size_in_byte": 28829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pythonosc.udp_client.SimpleUDPClient", "line_number": 18, "usage_type": "call"}, {"api_name": "pythonosc.udp_client", "line_number": 18, "usage_type": "name"}, {"api_name": "dotenv.load_dotenv", "line_number": 27, "usage_type": "call"}, {"api_name": "openai.api_key", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 29, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 32, "usage_type": "call"}, {"api_name": "pythonosc.dispatcher.Dispatcher", "line_number": 132, "usage_type": "call"}, {"api_name": "pythonosc.osc_server.BlockingOSCUDPServer", "line_number": 136, "usage_type": "call"}, {"api_name": "pythonosc.osc_server", "line_number": 136, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 149, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 153, "usage_type": "call"}, {"api_name": "openai.ChatCompletion.create", "line_number": 162, "usage_type": "call"}, {"api_name": "openai.ChatCompletion", "line_number": 162, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 170, "usage_type": "call"}, {"api_name": "construct_headline.get_source", "line_number": 173, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 175, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 182, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 182, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 187, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 200, "usage_type": "call"}, {"api_name": "catastrophe.Catastrophe", "line_number": 205, "usage_type": "call"}, {"api_name": "construct_headline.construct_start_headline", "line_number": 209, "usage_type": "call"}, {"api_name": "catastrophe.type", "line_number": 209, "usage_type": "attribute"}, {"api_name": "construct_headline.get_source", "line_number": 210, "usage_type": "call"}, {"api_name": "catastrophe.type", "line_number": 217, "usage_type": "attribute"}, {"api_name": "catastrophe.wind_up", "line_number": 217, "usage_type": "attribute"}, {"api_name": "catastrophe.duration", "line_number": 217, "usage_type": "attribute"}, {"api_name": "catastrophe.resolution_time", "line_number": 217, "usage_type": "attribute"}, {"api_name": "catastrophe.deaths_per_second", "line_number": 217, "usage_type": "attribute"}, {"api_name": "catastrophe.electrode_index", "line_number": 219, "usage_type": "attribute"}, {"api_name": "catastrophe.electrode_index", "line_number": 220, "usage_type": "attribute"}, {"api_name": "catastrophe.type", "line_number": 226, "usage_type": "attribute"}, {"api_name": "catastrophe.wind_up", "line_number": 237, "usage_type": "attribute"}, {"api_name": "catastrophe.electrode_index", "line_number": 238, "usage_type": "attribute"}, {"api_name": "catastrophe.resolution_time", "line_number": 241, "usage_type": "attribute"}, {"api_name": "catastrophe.resolution_time", "line_number": 242, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 244, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 247, "usage_type": "call"}, {"api_name": "catastrophe.resolution_time", "line_number": 250, "usage_type": "attribute"}, {"api_name": "catastrophe.duration", "line_number": 251, "usage_type": "attribute"}, {"api_name": "catastrophe.electrode_index", "line_number": 252, "usage_type": "attribute"}, {"api_name": "catastrophe.resolution_time", "line_number": 255, "usage_type": "attribute"}, {"api_name": "catastrophe.resolution_time", "line_number": 256, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 258, "usage_type": "call"}, {"api_name": "catastrophe.deaths_per_second", "line_number": 260, "usage_type": "attribute"}, {"api_name": "catastrophe.deaths_per_second", "line_number": 261, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 263, "usage_type": "call"}, {"api_name": "construct_headline.construct_end_headline", "line_number": 270, "usage_type": "call"}, {"api_name": "catastrophe.type", "line_number": 270, "usage_type": "attribute"}, {"api_name": "construct_headline.get_source", "line_number": 271, "usage_type": "call"}, {"api_name": "catastrophe.type", "line_number": 277, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 284, "usage_type": "call"}, {"api_name": "catastrophe.Catastrophe", "line_number": 307, "usage_type": "call"}, {"api_name": "catastrophe.electrode_index", "line_number": 308, "usage_type": "attribute"}, {"api_name": "playsound.playsound", "line_number": 319, "usage_type": "call"}, {"api_name": "playsound.playsound", "line_number": 343, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 347, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 367, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 392, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 392, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 401, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 410, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 415, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 443, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 459, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 461, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 468, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 484, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 485, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 488, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 489, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 494, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 496, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 498, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 499, "usage_type": "attribute"}, {"api_name": "tkinter.Tk", "line_number": 503, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 519, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 526, "usage_type": "call"}, {"api_name": "tkinter.PhotoImage", "line_number": 530, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 531, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 535, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 537, "usage_type": "attribute"}, {"api_name": "tkinter.PhotoImage", "line_number": 540, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 542, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 543, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 546, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 550, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 552, "usage_type": "attribute"}, {"api_name": "tkinter.Frame", "line_number": 555, "usage_type": "call"}, {"api_name": "tkinter.Frame", "line_number": 562, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 563, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 567, "usage_type": "call"}, {"api_name": "tkinter.Y", "line_number": 569, "usage_type": "attribute"}, {"api_name": "tkinter.Text", "line_number": 571, "usage_type": "call"}, {"api_name": "tkinter.WORD", "line_number": 571, "usage_type": "attribute"}, {"api_name": "tkinter.BOTH", "line_number": 572, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 581, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 582, "usage_type": "call"}, {"api_name": "webbrowser.open_new", "line_number": 585, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 588, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 592, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 595, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 598, "usage_type": "call"}]} +{"seq_id": "40177349016", "text": "\nfrom sys import argv\nfrom os import makedirs, path\nfrom fontTools.ttLib import TTFont\nfrom PIL import Image, ImageDraw, ImageFont\n\nif len(argv) < 4:\n print('''\n Usage:\\n\n py font_maker.py \n py font_maker.py all\n ''')\n exit()\n\nfont = ImageFont.truetype(argv[1], 68)\n\n\nif not path.exists(argv[2]):\n makedirs(argv[2])\n\nhstr = []\nif argv[3] == \"all\":\n fobj = TTFont(argv[1])\n dict = fobj.getBestCmap()\n for key, _ in dict.items():\n hstr.append(chr(key))\nelse:\n hstr = argv[3]\n\n\nfor c in hstr:\n try:\n size = font.getbbox(c)\n img = Image.new(\"RGBA\", (size[2], size[3]), (0, 0, 0, 0))\n draw = ImageDraw.Draw(img)\n draw.text((0, 0), c, font=font, fill='white')\n\n width, height = img.size\n minx = 99999\n miny = 99999\n maxx = -10\n maxy = -10\n pix = img.load()\n for x in range(0, width):\n for y in range(0, height):\n if pix[x, y][3] != 0:\n if minx > x:\n minx = x\n if miny > y:\n miny = y\n if maxx < x:\n maxx = x\n if maxy < y:\n maxy = y\n\n img.save(\n argv[2] + \"\\\\\" + str(ord(c)) + \".png\"\n )\n except Exception as r:\n print(r)\n\n", "repo_name": "HKLab/HK.TMProGlyphSupplementation", "sub_path": "font-maker.py", "file_name": "font-maker.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "argument"}, {"api_name": "PIL.ImageFont.truetype", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 15, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "name"}, {"api_name": "fontTools.ttLib.TTFont", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 34, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 35, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "71853669926", "text": "from scipy.special import binom\r\n\r\nfrom dataclasses import dataclass\r\nfrom functools import total_ordering\r\nfrom random import randint\r\n\r\n_pid = 0\r\n\r\nclass GameError(Exception):\r\n pass\r\n\r\n\r\n@total_ordering\r\n@dataclass\r\nclass Bet:\r\n amount: int\r\n face: int\r\n\r\n # A bet is less than another bet by checking the amount first, then the face value.\r\n def __lt__(self, other: \"Bet\"):\r\n if other is None:\r\n return other\r\n return (self.amount, self.face) < (other.amount, other.face)\r\n\r\n def __str__(self) -> str:\r\n return f\"{self.amount}x [{self.face}]\"\r\n\r\n\r\nclass Player:\r\n \"\"\"Represents one individual player.\"\"\"\r\n def __init__(self, name: str, dice_count = 5, dice_size = 6):\r\n global _pid\r\n self.id = _pid\r\n _pid += 1\r\n self.name = name\r\n self.dice_size = dice_size\r\n\r\n # Dice don't need to be special, a list of numbers is probably fine\r\n self.dice: list[int] = []\r\n for i in range(dice_count):\r\n self.dice.append(randint(1, self.dice_size))\r\n\r\n def roll(self):\r\n count = len(self.dice)\r\n self.dice.clear()\r\n for i in range(count):\r\n self.dice.append(randint(1, self.dice_size))\r\n\r\n def remove_die(self):\r\n self.dice.pop()\r\n\r\n # A Player loses when they run out of dice.\r\n @property\r\n def lost(self) -> bool:\r\n return len(self.dice) == 0\r\n\r\n @property\r\n def dice_string(self) -> str:\r\n s = \"\"\r\n for i in self.dice:\r\n s += f\" [{i}]\"\r\n return s.strip()\r\n\r\n def __str__(self) -> str:\r\n s = f\"{self.name:>10}: \"\r\n for i in self.dice:\r\n s += f\" [{i}]\"\r\n return s\r\n\r\n\r\nclass Game:\r\n \"\"\"Represents an entire game's backend logic and state-keeping.\"\"\"\r\n def __init__(self, player_names: list[str] = [], dice_count = 5, dice_size = 6):\r\n self.dice_count = dice_count\r\n self.dice_size = dice_size\r\n\r\n # Create players from names\r\n self.players: list[Player] = []\r\n for name in player_names:\r\n self.players.append(\r\n Player(name, self.dice_count, self.dice_size)\r\n )\r\n\r\n # Game state\r\n self.current_bet: Bet = None\r\n self.previous_player: Player = None\r\n self.current_player: Player = None\r\n\r\n @property\r\n def dice_in_play(self) -> int:\r\n \"\"\"Amount of dice in play.\"\"\"\r\n return sum((len(p.dice) for p in self.players))\r\n\r\n @property\r\n def all_dice(self) -> list[int]:\r\n \"\"\"Every individual die in play.\"\"\"\r\n d = []\r\n for p in self.players:\r\n d += p.dice\r\n return d\r\n\r\n @property\r\n def game_over(self) -> bool:\r\n \"\"\"Return True if only one player is left.\"\"\"\r\n return len(self.players) == 1\r\n\r\n def check_bet(self, bet: Bet) -> bool:\r\n \"\"\"Return True if a bet is valid against the current dice in play.\"\"\"\r\n return self.all_dice.count(bet.face) >= bet.amount\r\n\r\n def check_spot_on(self, bet: Bet) -> bool:\r\n \"\"\"Return True if a bet is spot-on against the current dice in play.\"\"\"\r\n return self.all_dice.count(bet.face) == bet.amount\r\n\r\n def spot_on_equation(self, die_face: int, dice_amount: int, dice_in_play: int, known_dice: list[int] = []) -> float:\r\n \"\"\"What are the odds of a spot-on bet being true?\"\"\"\r\n dice_amount -= known_dice.count(die_face)\r\n dice_in_play -= len(known_dice)\r\n q = dice_amount\r\n n = dice_in_play\r\n c = binom(n, q)\r\n return c * ((1/self.dice_size)**q) * (((self.dice_size - 1)/self.dice_size)**(n - q))\r\n\r\n def bet_equation(self, die_face: int, dice_amount: int, dice_in_play: int, known_dice: list[int] = []) -> float:\r\n \"\"\"What are the odds of a bet being true?\"\"\"\r\n dice_amount -= known_dice.count(die_face)\r\n dice_in_play -= len(known_dice)\r\n return sum([self.spot_on_equation(die_face, i, dice_in_play) for i in range(dice_amount, dice_in_play + 1)])\r\n\r\n def next_player(self) -> Player:\r\n \"\"\"Take a player off the beginning of the list,\r\n and return it after putting it back on the end.\"\"\"\r\n self.previous_player = self.current_player\r\n p = self.players.pop(0)\r\n self.players.append(p)\r\n self.current_player = p\r\n return p\r\n\r\n def remove_lost_players(self) -> list[Player]:\r\n \"\"\"Remove any players with zero dice from the game.\"\"\"\r\n lost_players = [p for p in self.players if p.lost]\r\n for lp in lost_players:\r\n self.players.remove(lp)\r\n return lost_players\r\n\r\n def setup(self):\r\n \"\"\"Called at the beginning of a new game. Get everything prepared.\"\"\"\r\n for player in self.players:\r\n player.roll()\r\n self.current_player = None\r\n self.previous_player = None\r\n self.current_bet = None\r\n self.next_player()\r\n\r\n def reset(self):\r\n \"\"\"Called at the beginning of a new round. Get everything prepared.\"\"\"\r\n for player in self.players:\r\n player.roll()\r\n self.current_bet = None\r\n\r\n # First bet works slightly differently to other bets so it's its own thing.\r\n def first_bet(self, bet: Bet):\r\n if bet.amount > self.dice_in_play:\r\n raise GameError(f\"Bet is for more dice than are on the table. ({bet.amount}).\")\r\n if bet.face > self.dice_size or 1 > bet.face:\r\n raise GameError(f\"Bet is for an invalid die face ({bet.face}).\")\r\n self.current_bet = bet\r\n\r\n def place_bet(self, bet: Bet):\r\n if bet.amount > self.dice_in_play:\r\n raise GameError(f\"Bet is for more dice than are on the table. ({bet.amount}).\")\r\n if bet.face > self.dice_size or 1 > bet.face:\r\n raise GameError(f\"Bet is for an invalid die face ({bet.face}).\")\r\n if bet.amount > self.current_bet.amount or (bet.amount == self.current_bet.amount\r\n and bet.face > self.current_bet.face):\r\n self.current_bet = bet\r\n else:\r\n raise GameError(\"Bet ({bet}) isn't better than the current bet ({self.current_bet})\")\r\n\r\n def call_bluff(self) -> bool:\r\n \"\"\"Call the previous player's bluff. Returns True if the current player was right.\"\"\"\r\n if self.check_bet(self.current_bet):\r\n self.current_player.remove_die()\r\n self.reset()\r\n return False\r\n else:\r\n self.previous_player.remove_die()\r\n self.reset()\r\n return True\r\n\r\n def call_spot_on(self) -> bool:\r\n \"\"\"Call the previous player's bet spot-on. Returns True if the current player was right.\"\"\"\r\n if self.check_spot_on(self.current_bet):\r\n for p in self.players:\r\n if p != self.current_player:\r\n p.remove_die()\r\n self.reset()\r\n return True\r\n else:\r\n self.current_player.remove_die()\r\n self.reset()\r\n return False\r\n\r\n def __str__(self) -> str:\r\n players = \"\\n\".join([str(p) for p in self.players])\r\n s = f\"Current Player: {self.current_player.name}\\nCurrent Bet: {self.current_bet}\\n\"\r\n s += players\r\n return s\r\n", "repo_name": "DigiDuncan/LiarsDice", "sub_path": "dice/lib/game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 7293, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "functools.total_ordering", "line_number": 13, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 14, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 41, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.special.binom", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "9809468271", "text": "from django.test import override_settings\nfrom django.urls import include, path\nfrom django.utils import timezone\nfrom faker import Faker\nfrom freezegun import freeze_time\nfrom rest_framework.status import HTTP_200_OK, HTTP_401_UNAUTHORIZED\nfrom rest_framework.test import APITestCase\n\nfrom signals.apps.api.views import NamespaceView\nfrom signals.apps.my_signals.models import Token\nfrom signals.apps.signals.factories import SignalFactory\n\nurlpatterns = [\n path('v1/relations/', NamespaceView.as_view(), name='signal-namespace'),\n path('', include('signals.apps.my_signals.urls')),\n]\n\n\nclass NameSpace:\n urlpatterns: list\n\n\ntest_urlconf = NameSpace()\ntest_urlconf.urlpatterns = urlpatterns\n\n\nfake = Faker()\n\n\n@override_settings(ROOT_URLCONF=test_urlconf)\nclass TestMySignalsLoggedInReporterEndpoint(APITestCase):\n endpoint = '/my/signals/me'\n\n def test_me_endpoint(self):\n for _ in range(5):\n email = fake.free_email()\n SignalFactory.create(reporter__email=email)\n\n token = Token.objects.create(reporter_email=email)\n request_headers = {'HTTP_AUTHORIZATION': f'Token {token.key}'}\n\n response = self.client.get(self.endpoint, **request_headers)\n\n self.assertEqual(response.status_code, HTTP_200_OK)\n self.assertEqual(response.json()['email'], email)\n\n def test_me_endpoint_expired_token(self):\n email = fake.free_email()\n\n now = timezone.now()\n with freeze_time(now - timezone.timedelta(days=7)):\n token = Token.objects.create(reporter_email=email)\n\n request_headers = {'HTTP_AUTHORIZATION': f'Token {token.key}'}\n\n response = self.client.get(self.endpoint, **request_headers)\n self.assertEqual(response.status_code, HTTP_401_UNAUTHORIZED)\n self.assertEqual(response.json()['detail'], 'Invalid token.')\n", "repo_name": "Amsterdam/signals", "sub_path": "app/signals/apps/my_signals/tests/rest_framework/views/test_my_signals_logged_in_reporter.py", "file_name": "test_my_signals_logged_in_reporter.py", "file_ext": "py", "file_size_in_byte": 1866, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "signals.apps.api.views.NamespaceView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "signals.apps.api.views.NamespaceView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 15, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.test.APITestCase", "line_number": 31, "usage_type": "name"}, {"api_name": "signals.apps.signals.factories.SignalFactory.create", "line_number": 37, "usage_type": "call"}, {"api_name": "signals.apps.signals.factories.SignalFactory", "line_number": 37, "usage_type": "name"}, {"api_name": "signals.apps.my_signals.models.Token.objects.create", "line_number": 39, "usage_type": "call"}, {"api_name": "signals.apps.my_signals.models.Token.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "signals.apps.my_signals.models.Token", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.utils.timezone.now", "line_number": 50, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 50, "usage_type": "name"}, {"api_name": "freezegun.freeze_time", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone.timedelta", "line_number": 51, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 51, "usage_type": "name"}, {"api_name": "signals.apps.my_signals.models.Token.objects.create", "line_number": 52, "usage_type": "call"}, {"api_name": "signals.apps.my_signals.models.Token.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "signals.apps.my_signals.models.Token", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 57, "usage_type": "argument"}, {"api_name": "django.test.override_settings", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "29523466828", "text": "import random\nimport back.tools\nfrom _md5 import md5\nimport config.settings\n\n\ncon = back.tools.con\n\n# {\n# payment_id: int\n# operation_id: int\n# price: int\n# chat_id: int\n# time: int\n# hash: varchar\n# success: bool\n# }\n\n\ndef generate_link(chat_id, price):\n print('generate_link')\n\n while True:\n payment_id = random.randint(10000, 99999)\n if back.tools.get('payments', id_=payment_id) is None:\n break\n\n hash_ = generate_hash(price, payment_id)\n\n with back.tools.DBConnection(config.settings.database_url) as con:\n curs = con.cursor()\n curs.execute(f'''INSERT INTO payments (id, price, chat_id, hash) VALUES ({payment_id}, {price}, {chat_id}, '{hash_}')''')\n curs.execute(f'UPDATE users SET payment_id = array_append(payment_id, {payment_id}) where id = {chat_id}')\n\n print('generate_link', 'm.chat.id', chat_id, 'payment_id', payment_id)\n\n link = f'{config.settings.payment_link}m={config.settings.shop_id}&oa={price}&o={payment_id}' \\\n f'&s={hash_}&us_chat_id={chat_id}&lang=ru&i=&em='\n return link\n\n\ndef generate_hash(price, payment_id):\n # ID Вашего магазина:Сумма платежа:Секретное слово:Номер заказа\n hash_ = md5(f\"{config.settings.shop_id}:{price}:{config.settings.secret}:{payment_id}\".encode('utf-8')).hexdigest()\n return hash_\n", "repo_name": "artemmarkaryan/betmafia", "sub_path": "back/payment.py", "file_name": "payment.py", "file_ext": "py", "file_size_in_byte": 1399, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "back.tools.tools", "line_number": 7, "usage_type": "attribute"}, {"api_name": "back.tools", "line_number": 7, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "back.tools.tools.get", "line_number": 25, "usage_type": "call"}, {"api_name": "back.tools.tools", "line_number": 25, "usage_type": "attribute"}, {"api_name": "back.tools", "line_number": 25, "usage_type": "name"}, {"api_name": "back.tools.tools.DBConnection", "line_number": 30, "usage_type": "call"}, {"api_name": "back.tools.tools", "line_number": 30, "usage_type": "attribute"}, {"api_name": "back.tools", "line_number": 30, "usage_type": "name"}, {"api_name": "config.settings.settings", "line_number": 30, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "config.settings.settings", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 37, "usage_type": "name"}, {"api_name": "_md5.md5", "line_number": 44, "usage_type": "call"}, {"api_name": "config.settings.settings", "line_number": 44, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "18220643659", "text": "from PIL import Image\nimport pytesseract\nimport re\n\nimport requests\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# 이미지 로드\nimg = Image.open('image.jpg')\n# OCR 적용\ntext = pytesseract.image_to_string(img, lang='eng')\nprint(text)\n# 숫자와 문자가 섞인 간단한 주소를 찾는 정규표현식\nregex = r'\\d+ [\\w\\s]+'\naddresses = re.findall(regex, text)\nprint(addresses)\n\n\n\nfor address in addresses:\n # Google Maps Geocoding API로 주소를 위도, 경도로 변환\n geocoding_url = f\"https://maps.googleapis.com/maps/api/geocode/json?address={address}&key=YOUR_GEOCODING_API_KEY\"\n geocoding_response = requests.get(geocoding_url)\n geocode = geocoding_response.json()\n\n lat = geocode['results'][0]['geometry']['location']['lat']\n lng = geocode['results'][0]['geometry']['location']['lng']\n\n # Google Street View API를 통해 로드뷰 이미지를 가져옴\n street_view_url = f\"https://maps.googleapis.com/maps/api/streetview?size=600x300&location={lat},{lng}&fov=80&heading=70&pitch=0&key=YOUR_STREET_VIEW_API_KEY\"\n image_response = requests.get(street_view_url)\n\n # 로드뷰 이미지를 파일로 저장\n with open(f'roadview_{address}.jpg', 'wb') as f:\n f.write(image_response.content)\n\n # 이미지를 화면에 출력\n img = mpimg.imread(f'roadview_{address}.jpg')\n imgplot = plt.imshow(img)\n plt.show()", "repo_name": "jongminKims/meta_EightProject", "sub_path": "nic/MACRO/googleMacro.py", "file_name": "googleMacro.py", "file_ext": "py", "file_size_in_byte": 1402, "program_lang": "python", "lang": "ko", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "pytesseract.image_to_string", "line_number": 12, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.image.imread", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "40714574383", "text": "#%%\nfrom faust_macrogen_graph import parserutils, analyzeutils, graphutils, eades_fas\nfrom pathlib import Path\nimport pandas as pd\nfrom collections import Counter, OrderedDict\nimport networkx as nx\n\n\n \ndef gen_feature_dict(paramlist, special_researchers, tempsyn=False):\n \"\"\"Computes a dictionary with graph and source features as key-value-pairs.\n \n Args:\n paramlist (list): List with the parameters 'approach', 'skipignore' and 'MFAS approach'.\n special_resarchers (dict): Dictionary with sources (string) as keys and their publication year (int) as values.\n tempsyn (bool): If True, the tempsyn-relation-elements will added to the graph.\n Returns:\n Dictionary with graph and source features as key-value-pairs.\n \"\"\"\n \n #####\n # preparation & creation of graph\n #####\n \n #{fas, edges, nodes, cycles, df}\n feature_dict = {} \n fas_algorithm = paramlist[2]\n\n G = graphutils.gen_faustgraph(paramlist, special_researchers, tempsyn)\n G_fas = eades_fas.eades_FAS(G, fas_algorithm)\n \n #####\n # adding graph features to the feature_dict\n #####\n feature_dict[\"fas\"] = len(G_fas)\n feature_dict[\"edges\"] = len(G.edges())\n feature_dict[\"nodes\"] = len(G.nodes())\n feature_dict[\"nodeslist\"] = list(G.nodes())\n feature_dict[\"cycles\"] = len(list(nx.simple_cycles(G)))\n \n #####\n # analysis\n #####\n year_scores = analyzeutils.get_source_year(G, special_researchers)\n year_df = pd.DataFrame(year_scores.items(), columns=[\"source\", \"year\"])\n year_df.set_index(\"source\", inplace=True)\n \n #df with research count scores\n research_scores = analyzeutils.get_research_score(G)\n sorted_research_scores = {k: research_scores[k] \n for k in sorted(research_scores, key=research_scores.get, reverse=True)}\n research_df = pd.DataFrame(sorted_research_scores.items(), columns=[\"source\", \"year_frequency\"])\n research_df.set_index(\"source\", inplace=True)\n \n #df with normed research count scores\n norm_research_scores = analyzeutils.get_norm_research_score(G, special_researchers, 1770, 2017)\n sorted_norm_research_scores = {k: norm_research_scores[k]\n for k in sorted(norm_research_scores, key=norm_research_scores.get, reverse=True)}\n \n norm_research_df = pd.DataFrame(sorted_norm_research_scores.items(), columns=[\"source\", \"norm_year_frequency\"])\n norm_research_df.set_index(\"source\", inplace=True)\n \n #combinig the three dfs\n source_df = research_df.join(norm_research_df)\n \n #adding df with publication year of the source to the source_df\n year_scores = analyzeutils.get_source_year(G, special_researchers)\n year_df = pd.DataFrame(year_scores.items(), columns=[\"source\", \"pub_year\"])\n year_df.set_index(\"source\", inplace=True)\n \n source_df = source_df.join(year_df)\n \n fas_source_counter = Counter()\n for edge in G_fas:\n if G.has_edge(edge[0], edge[1]):\n edge_data = G.get_edge_data(edge[0], edge[1])\n key = edge_data[\"source\"]\n if fas_source_counter[key]:\n fas_source_counter[key] += 1\n else:\n fas_source_counter[key] = 1\n \n \n fasfrequency_df = pd.DataFrame.from_dict(OrderedDict(fas_source_counter.most_common()), \n orient=\"index\").reset_index()\n fasfrequency_df = fasfrequency_df.rename(columns={\"index\":\"source\", 0:\"fas_frequency\"})\n fasfrequency_df.set_index(\"source\", inplace=True)\n \n df = source_df.join(fasfrequency_df)\n df = df.dropna()\n \n percent_fas = (df[\"fas_frequency\"] / df[\"year_frequency\"]) * 100\n norm_percent_fas = (df[\"fas_frequency\"] / df[\"norm_year_frequency\"]) * 100\n percentfas_df = pd.concat([percent_fas, norm_percent_fas], axis=1, sort=True)\n percentfas_df = percentfas_df.rename(columns={0:\"percent_fas\", 1:\"norm_percent_fas\"})\n percentfas_df.sort_values(by=\"percent_fas\", ascending=False)\n df = df.join(percentfas_df, on=\"source\")\n \n \n feature_dict[\"source_df\"] = source_df\n feature_dict[\"fasfrequency_df\"] = fasfrequency_df\n feature_dict[\"percentfas_df\"] = percentfas_df\n \n \n return feature_dict\n\ndef compare_approaches(approaches, special_researchers, temppre=False):\n \"\"\"Computes a DataFrame where the number of nodes, edges, cycles and feedback edges of each approach from the\n approaches list will be listed.\n Args:\n approaches (list): List with the approaches names as strings.\n special_resarchers (dict): Dictionary with sources (string) as keys and their publication year (int) as values. \n temppre (bool): If True, the graph for the approaches will be computed by the combination of \n the temppre- and the dates-graph, if False, only the dates-graph.\n Return:\n DataFrame with the approaches as index and the features \"n nodes\", \"n edges\", \"n cycles\" and \"n feedback edges\" as columns.\n \"\"\"\n\n approaches_graphs = {}\n approaches_fas = {}\n \n filespath = Path('resources')\n date_items = parserutils.xmlparser(filespath, True, skipignore=False)\n \n for approach in approaches:\n \n if temppre:\n temppre_items = parserutils.xmlparser(filespath)\n temppreG = nx.DiGraph()\n for t in temppre_items:\n graphutils.add_egdes_from_node_list(temppreG, t)\n \n datesG = graphutils.graph_from_dates(date_items, approach, special_researchers)\n G = nx.compose(temppreG, datesG)\n \n else:\n G = graphutils.graph_from_dates(date_items, approach, special_researchers)\n \n approaches_graphs[approach] = G\n G_fas = eades_fas.eades_FAS(G, True)\n aG = G.copy()\n aG.remove_edges_from(G_fas)\n \n approaches_fas[approach] = G_fas\n \n graphs_approaches = {}\n columns = [\"n nodes\", \"n edges\", \"n cycles\", \"n feedback edges\"]\n \n for k, v in approaches_graphs.items():\n graphs_approaches[k] = [len(v.nodes()), len(v.edges()), len(list(nx.simple_cycles(v))), len(approaches_fas[k])]\n \n approach_df = pd.DataFrame(graphs_approaches)\n approach_df = approach_df.T\n approach_df.columns = columns\n \n return approach_df\n", "repo_name": "realjanpaulus/faust_macrogen_graph", "sub_path": "src/faust_macrogen_graph/comparisonutils.py", "file_name": "comparisonutils.py", "file_ext": "py", "file_size_in_byte": 6366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "faust_macrogen_graph.graphutils.gen_faustgraph", "line_number": 29, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils", "line_number": 29, "usage_type": "name"}, {"api_name": "faust_macrogen_graph.eades_fas.eades_FAS", "line_number": 30, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.eades_fas", "line_number": 30, "usage_type": "name"}, {"api_name": "networkx.simple_cycles", "line_number": 39, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils.get_source_year", "line_number": 44, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils", "line_number": 44, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils.get_research_score", "line_number": 49, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils", "line_number": 49, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 52, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils.get_norm_research_score", "line_number": 56, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils", "line_number": 56, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 60, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils.get_source_year", "line_number": 67, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.analyzeutils", "line_number": 67, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 68, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 94, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 122, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.parserutils.xmlparser", "line_number": 123, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.parserutils", "line_number": 123, "usage_type": "name"}, {"api_name": "faust_macrogen_graph.parserutils.xmlparser", "line_number": 128, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.parserutils", "line_number": 128, "usage_type": "name"}, {"api_name": "networkx.DiGraph", "line_number": 129, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils.add_egdes_from_node_list", "line_number": 131, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils", "line_number": 131, "usage_type": "name"}, {"api_name": "faust_macrogen_graph.graphutils.graph_from_dates", "line_number": 133, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils", "line_number": 133, "usage_type": "name"}, {"api_name": "networkx.compose", "line_number": 134, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils.graph_from_dates", "line_number": 137, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.graphutils", "line_number": 137, "usage_type": "name"}, {"api_name": "faust_macrogen_graph.eades_fas.eades_FAS", "line_number": 140, "usage_type": "call"}, {"api_name": "faust_macrogen_graph.eades_fas", "line_number": 140, "usage_type": "name"}, {"api_name": "networkx.simple_cycles", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "16858753151", "text": "\"\"\"\n Implementation of the Directional Gaussian Smoothing algorithm\n introduced in https://arxiv.org/abs/2002.03001\n The algorithm is implemented as a scipy.optimize method\n\n This particular algorithm will be modified to change sigma\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import OptimizeResult\nfrom scipy.optimize._numdiff import approx_derivative\nfrom collections import deque\n\n\ndef ADGS(fun, x0, args=(), learning_rate=.01, sigma=10., gamma=.995, quad_points=7,\n maxiter=1000, xtol=1e-6, ftol=1e-4, gtol=1e-4, callback=None, **options):\n \"\"\"\n Minimize a scalar function using the ADGS optimizer.\n It is DGS but with exponential decay on sigma\n \"\"\"\n # initialize DGS variables\n dim = len(x0)\n xk = x0.copy()\n fk = fun(xk)\n t = 0\n ##fun_vals = deque([0]*10, maxlen=10)\n\n # establish search directions and quadrature points\n basis = np.eye(dim)\n gh_roots, gh_weights = np.polynomial.hermite.hermgauss(quad_points)\n\n def step(x):\n '''Perform a step of DGS optimizer'''\n nonlocal t\n t += 1\n # estimate smoothed directional derivative along each basis direction\n df_sigma_basis = np.zeros(dim)\n for d in range(dim):\n # estimate directional derivative via Gauss--Hermite quadrature\n f_d = lambda t: fun(x + t*basis[d])\n f_d_vals = np.array([f_d(sigma * p) for p in gh_roots])\n df_sigma_basis[d] = np.sum(gh_weights * gh_roots * f_d_vals)\\\n / (sigma * np.sqrt(np.pi)/2)\n # assemble smoothed gradient and update minimizer\n grad_sigma = np.matmul(basis, df_sigma_basis)\n x -= learning_rate * grad_sigma\n return x, grad_sigma\n\n # iteratively optimize target function\n success = False\n for _ in range(maxiter):\n x = xk.copy()\n fval = fk\n xk, gfk = step(x.copy())\n fk = fun(xk)\n if callback is not None:\n callback(xk)\n\n # check if sigma should be reduced\n sigma *= gamma\n ##fun_vals.append(fk)\n ##if all(abs(fk - fun_val) < 1e-3 for fun_val in fun_vals):\n ##print(f'iteration {t}, sigma is reduced: {sigma:.4f} --> {sigma/2:.4f}')\n ##print(fk, fun_vals, '\\n')\n ##sigma = max(sigma/2, 1e-6)\n ##fun_vals = deque([0]*10, maxlen=10)\n\n # check termination conditions\n if np.linalg.norm(gfk, np.inf) < gtol:\n msg = 'Optimization terminated succesfully.'\n success = True\n break\n if np.linalg.norm(x - xk, np.inf) < xtol:\n msg = 'Optimization terminated due to x-tolerance.'\n break\n if np.abs((fval - fk) / (fval + 1e-8)) < ftol:\n msg = 'Optimization terminated due to f-tolerance.'\n break\n if t >= maxiter:\n msg = 'The maximum number of iterations is reached.'\n break\n\n return OptimizeResult(x=xk, fun=fk, jac=gfk, nit=t, nfev=quad_points*xk.size*t,\n success=success, msg=msg)\n\n\nif __name__ == '__main__':\n\n fun = lambda x: np.sum(x**2)\n x0 = np.random.randn(100)\n vals = [fun(x0)]\n res = ADGS(fun, x0, callback=lambda x: vals.append(fun(x)))\n print(res)\n", "repo_name": "sukiboo/smoothing_based_optimization", "sub_path": "extra_scripts/adgs.py", "file_name": "adgs.py", "file_ext": "py", "file_size_in_byte": 3253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.eye", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.polynomial.hermite.hermgauss", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.polynomial", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 76, "usage_type": "call"}, {"api_name": "scipy.optimize.OptimizeResult", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "27546273876", "text": "from typing import Optional\n\nimport tensorlayerx as tlx\n\nfrom gammagl.layers.conv import MessagePassing\nfrom gammagl.utils.get_laplacian import get_laplacian\nfrom gammagl.utils.loop import add_self_loops, remove_self_loops\n\n\nclass ChebConv(MessagePassing):\n r\"\"\"The chebyshev spectral graph convolutional operator from the\n `\"Convolutional Neural Networks on Graphs with Fast Localized Spectral\n Filtering\" `_ paper\n\n .. math::\n \\mathbf{X}^{\\prime} = \\sum_{k=1}^{K} \\mathbf{Z}^{(k)} \\cdot\n \\mathbf{\\Theta}^{(k)}\n\n where :math:`\\mathbf{Z}^{(k)}` is computed recursively by\n\n .. math::\n \\mathbf{Z}^{(1)} &= \\mathbf{X}\n\n \\mathbf{Z}^{(2)} &= \\mathbf{\\hat{L}} \\cdot \\mathbf{X}\n\n \\mathbf{Z}^{(k)} &= 2 \\cdot \\mathbf{\\hat{L}} \\cdot\n \\mathbf{Z}^{(k-1)} - \\mathbf{Z}^{(k-2)}\n\n and :math:`\\mathbf{\\hat{L}}` denotes the scaled and normalized Laplacian\n :math:`\\frac{2\\mathbf{L}}{\\lambda_{\\max}} - \\mathbf{I}`.\n\n Args:\n in_channels (int): Size of each input sample\n out_channels (int): Size of each output sample.\n K (int): Chebyshev filter size :math:`K`.\n normalization (str, optional): The normalization scheme for the graph\n Laplacian (default: :obj:`\"sym\"`):\n\n 1. :obj:`None`: No normalization\n :math:`\\mathbf{L} = \\mathbf{D} - \\mathbf{A}`\n\n 2. :obj:`\"sym\"`: Symmetric normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1/2} \\mathbf{A} \\mathbf{D}^{-1/2}`\n\n 3. :obj:`\"rw\"`: Random-walk normalization\n :math:`\\mathbf{L} = \\mathbf{I} - \\mathbf{D}^{-1} \\mathbf{A}`\n\n You need to pass :obj:`lambda_max` to the :meth:`forward` method of\n this operator in case the normalization is non-symmetric.\n **kwargs (optional): Additional arguments of\n :class:`gammagl.layers.conv.MessagePassing`.\n\n Shapes:\n - **input:**\n node features :math:`(|\\mathcal{V}|, F_{in})`,\n edge indices :math:`(2, |\\mathcal{E}|)`,\n edge weights :math:`(|\\mathcal{E}|)` *(optional)*,\n maximum :obj:`lambda` value :math:`(|\\mathcal{G}|)` *(optional)*\n - **output:** node features :math:`(|\\mathcal{V}|, F_{out})`\n\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int, K: int, normalization: Optional = 'sym', **kwargs):\n kwargs.setdefault('aggr', 'add')\n super(ChebConv, self).__init__()\n\n assert K > 0\n assert normalization in [None, 'sym', 'rw'], 'Invalid normalization'\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.normalization = normalization\n self.lins = tlx.nn.ModuleList([\n tlx.layers.Linear(in_features=in_channels, out_features=out_channels) for _ in\n range(K)\n ])\n\n def __normal__(self, edge_index, num_nodes: Optional[int],\n edge_weight, normalization: Optional[str],\n lambda_max, batch=None):\n edge_index, edge_weight = remove_self_loops(edge_index,\n edge_weight)\n edge_index, edge_weight = get_laplacian(edge_index=tlx.convert_to_tensor(edge_index), num_nodes=num_nodes,\n edge_weight=tlx.convert_to_tensor(edge_weight),\n normalization=normalization)\n if batch is not None and lambda_max.numel() > 1:\n lambda_max = lambda_max[batch[edge_index[0]]]\n edge_weight = (2.0 * edge_weight) / lambda_max\n edge_index, edge_weight = add_self_loops(edge_index=tlx.convert_to_tensor(edge_index),\n edge_attr=edge_weight,\n fill_value=-1,\n num_nodes=num_nodes)\n assert edge_weight is not None\n return edge_index, edge_weight\n\n def forward(self, x, edge_index, num_nodes, edge_weight: Optional = None, lambda_max: Optional = None,\n batch: Optional = None):\n if self.normalization != 'sym' and lambda_max is None:\n raise ValueError('You need to pass `lambda_max` to `forward() in`'\n 'case the normalization is non-symmetric.')\n if lambda_max is None:\n lambda_max = tlx.convert_to_tensor(2.0)\n else:\n lambda_max = tlx.convert_to_tensor(lambda_max)\n assert lambda_max is not None\n edge_index, normal = self.__normal__(edge_index, num_nodes,\n edge_weight, self.normalization,\n lambda_max, batch=batch)\n Tx_0 = x\n Tx_1 = x\n out = self.lins[0](Tx_0)\n\n if len(self.lins) > 1:\n Tx_1 = self.propagate(x=x, edge_index=edge_index, edge_weight=normal)\n out = out + self.lins[1](Tx_1)\n\n for lin in self.lins[2:]:\n Tx_2 = self.propagate(x=Tx_1, edge_index=edge_index, edge_weight=normal)\n Tx_2 = 2 * Tx_2 - Tx_0\n out = out + lin.forward(Tx_2)\n Tx_0, Tx_1 = Tx_1, Tx_2\n\n return out\n", "repo_name": "BUPT-GAMMA/GammaGL", "sub_path": "gammagl/layers/conv/cheb_conv.py", "file_name": "cheb_conv.py", "file_ext": "py", "file_size_in_byte": 5367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 157, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gammagl.layers.conv.MessagePassing", "line_number": 10, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 63, "usage_type": "name"}, {"api_name": "tensorlayerx.nn.ModuleList", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorlayerx.nn", "line_number": 72, "usage_type": "attribute"}, {"api_name": "tensorlayerx.layers.Linear", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorlayerx.layers", "line_number": 73, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 78, "usage_type": "name"}, {"api_name": "gammagl.utils.loop.remove_self_loops", "line_number": 80, "usage_type": "call"}, {"api_name": "gammagl.utils.get_laplacian.get_laplacian", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 82, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 83, "usage_type": "call"}, {"api_name": "gammagl.utils.loop.add_self_loops", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 95, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 96, "usage_type": "name"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 101, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "29608893250", "text": "from email.mime import image\nfrom PIL import Image\n\ndef Images_Pdf(filename, output):\n images = []\n\n for file in filename:\n #pegando a imagem\n im = Image.open(file)\n #convertendo a imagem\n im = im.convert('RGB')\n images.append(im)\n\n images[0].save(output, save_all= True, append_images=images[1:])\n\nImages_Pdf([\"hotd.jpg\", \"dragon.jpg\"], \"output.pdf\")\n\n\n", "repo_name": "juliagmf/ImgPdf-python", "sub_path": "image_pdf.py", "file_name": "image_pdf.py", "file_ext": "py", "file_size_in_byte": 410, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "13884447141", "text": "import os\nfrom collections import Counter, defaultdict\nimport csv\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\n\n\ndef get_categories(prop, model_name):\n analysis_type = 'tfidf-raw-10000/each_target_vs_corpus_per_category'\n path_dir = f'../results/{model_name}/{analysis_type}'\n path_dir = f'{path_dir}/{prop}'\n categories = set()\n for d in os.listdir(path_dir):\n categories.add(d)\n return categories\n\ndef get_context_cnts(prop, cat, label, model_name):\n \n analysis_type = 'tfidf-raw-10000/each_target_vs_corpus_per_category'\n path_dir = f'../results/{model_name}/{analysis_type}'\n path_dir = f'{path_dir}/{prop}'\n path_label = f'{path_dir}/{cat}/{label}'\n \n context_cnt = Counter()\n for f in os.listdir(path_label):\n full_path = f'{path_label}/{f}'\n if full_path.endswith('.csv'):\n with open(full_path) as infile:\n data = list(csv.DictReader(infile))\n for d in data:\n context = d['']\n diff = float(d['diff'])\n if diff > 0:\n context_cnt[context] += 1\n return context_cnt\n \ndef get_n_concepts_total(prop, cat, model_name):\n \n analysis_type = 'tfidf-raw-10000/each_target_vs_corpus_per_category'\n path_dir = f'../results/{model_name}/{analysis_type}'\n path_dir = f'{path_dir}/{prop}'\n label = 'pos'\n path_pos = f'{path_dir}/{cat}/{label}'\n label = 'neg'\n path_neg = f'{path_dir}/{cat}/{label}'\n \n files_pos = [f for f in os.listdir(path_pos) if f.endswith('.csv')]\n files_neg = [f for f in os.listdir(path_neg) if f.endswith('.csv')]\n \n return len(files_pos), len(files_neg)\n\ndef get_f1_distinctiveness(n_pos, n_neg, total_pos, total_neg):\n \n \n total_instances = total_pos + total_neg\n labels = []\n [labels.append('pos') for i in range(total_pos)]\n [labels.append('neg') for i in range(total_neg)]\n pred_labels_pos = []\n for i in range(total_pos):\n if i < n_pos:\n pred_labels_pos.append('pos')\n else:\n pred_labels_pos.append('neg')\n# print(n_pos, total_pos)\n# print(pred_labels_pos.count('pos'), pred_labels_pos.count('neg'))\n \n pred_labels_neg = []\n for i in range(total_neg):\n if i < n_neg:\n pred_labels_neg.append('pos')\n else:\n pred_labels_neg.append('neg')\n# print(n_neg, total_neg)\n# print(pred_labels_neg.count('pos'), pred_labels_neg.count('neg'))\n \n predictions = pred_labels_pos + pred_labels_neg\n \n \n #print(len(labels), len(predictions))\n #print(pos_predictions, neg_predictions)\n \n p, r, f1, supp = precision_recall_fscore_support(labels, predictions, average = 'weighted', \n zero_division=0)\n #average='weighted'\n \n return p, r, f1\n\n\n \ndef aggregate_contexts(prop, cutoff, model_name):\n aggregation_name = 'aggregated-tfidf-raw-10000-categories'\n path_dir_agg = f'../analysis/{model_name}/{aggregation_name}/{prop}'\n os.makedirs(path_dir_agg, exist_ok = True)\n \n context_cnts_all = Counter()\n context_cat_dict = defaultdict(set)\n\n cats = get_categories(prop, model_name)\n\n for cat in cats:\n context_cnts_pos = get_context_cnts(prop, cat, 'pos', model_name)\n context_cnts_neg = get_context_cnts(prop, cat, 'neg', model_name)\n total_pos, total_neg = get_n_concepts_total(prop, cat, model_name)\n \n context_f1_dict = Counter()\n context_score_dict = defaultdict(dict)\n \n # get distinctiveness\n for c, cnt_pos in context_cnts_pos.most_common():\n cnt_neg = context_cnts_neg[c]\n p, r, f1 = get_f1_distinctiveness(cnt_pos, cnt_neg, total_pos, total_neg)\n context_f1_dict[c] = f1\n context_score_dict[c] = {'p': p,'r':r, 'f1': f1}\n \n table = []\n for c, f1 in context_f1_dict.most_common():\n scores = context_score_dict[c]\n d = dict()\n d['context'] = c\n d.update(scores)\n d['n_pos'] = context_cnts_pos[c]\n d['total_pos'] = total_pos\n d['n_neg'] = context_cnts_neg[c]\n d['total_neg'] = total_neg\n table.append(d)\n \n # collect and write to file\n f = f'{path_dir_agg}/{cat}.csv'\n \n header = table[0].keys()\n with open(f, 'w') as outfile:\n writer = csv.DictWriter(outfile, fieldnames = header)\n writer.writeheader()\n for d in table:\n writer.writerow(d)\n \n \ndef prepare_annotation(prop, model_name, cutoff=3, cutoff_concepts = 5):\n \n annotation_name = f'annotation-tfidf-top_{cutoff}_{cutoff_concepts}-raw-10000-categories'\n path_dir_annotation = f'../analysis/{model_name}/{annotation_name}/{prop}'\n os.makedirs(path_dir_annotation, exist_ok = True)\n f_annotation = f'../analysis/{model_name}/{annotation_name}/{prop}/annotation-updated.csv'\n \n # paths aggregated files:\n aggregation_name = 'aggregated-tfidf-raw-10000-categories'\n path_dir_agg = f'../analysis/{model_name}/{aggregation_name}/{prop}'\n\n \n # get categories\n cats = get_categories(prop, model_name)\n \n # collect all contexts and categories \n context_cats_dict = defaultdict(set)\n \n # load top per category\n for cat in cats:\n path = f'{path_dir_agg}/{cat}.csv'\n with open(path) as infile:\n data = list(csv.DictReader(infile))\n # sort by f1\n f1_dict = defaultdict(list)\n for d in data:\n f1 = d['f1']\n f1_dict[f1].append(d)\n scores = sorted(list(f1_dict.keys()), reverse=True)\n top_scores = scores[:cutoff]\n top_context_dicts = []\n for ts in top_scores:\n dicts = f1_dict[ts]\n for d in dicts:\n n_pos = int(d['n_pos'])\n if n_pos > cutoff_concepts:\n top_context_dicts.append(d)\n \n contexts = [d['context'] for d in top_context_dicts]\n # record categories\n for c in contexts:\n context_cats_dict[c].add(cat)\n \n with open(f_annotation, 'w') as outfile:\n outfile.write('context,evidence_type,categories\\n')\n for c, cats in context_cats_dict.items():\n outfile.write(f'{c}, ,{\" \".join(cats)}\\n')\n\n\n\n\n", "repo_name": "PiaSommerauer/CorpusDiagnostics", "sub_path": "scripts/prepare_annotations.py", "file_name": "prepare_annotations.py", "file_ext": "py", "file_size_in_byte": 6468, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 25, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 26, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 30, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_fscore_support", "line_number": 84, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 95, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 97, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 98, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 107, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 108, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 134, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 144, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 156, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 162, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "16169752684", "text": "import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nx = np.linspace(0, 5, 10)\r\nfig, ax = plt.subplots()\r\n\r\nax.plot(x, x+1, color='red', alpha=0.5) #alpha 0.5 to półprzezroczystość\r\nax.plot(x, x+2, color='#1155dd')\r\nax.plot(x, x+3, color='#15cc55')\r\nax.legend(loc=2) #upper left corner\r\nax.set_xlabel(r'$\\alpha$')\r\nax.set_ylabel(r'$y$')\r\nax.set_title('jakis tytul tako')\r\n\r\nplt.show()", "repo_name": "hura56/python", "sub_path": "Wykres_kolorowyOOOO.py", "file_name": "Wykres_kolorowyOOOO.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "23585474661", "text": "from django.test import TestCase\n\nfrom ..forms import StockForm\n\n\nclass StockFormTests(TestCase):\n def test_create_stock_form_required_fields(self):\n form = StockForm()\n self.assertTrue(form.fields[\"name\"].required)\n self.assertTrue(form.fields[\"price\"].required)\n\n def test_create_stock_form_field_error_messages(self):\n name_error_messages = {\n \"required\": \"Required.\",\n \"unique\": \"This name has already been registered.\",\n \"max_length\": \"Please use less than 100 characters.\",\n }\n price_error_messages = {\n \"required\": \"Required.\",\n \"invalid\": \"Please use digits.\",\n \"max_value\": \"Please enter a number between 0 and 999999999999.\",\n \"min_value\": \"Please enter a number between 0 and 999999999999.\",\n }\n form = StockForm()\n self.assertEqual(\n form.fields[\"name\"].error_messages, name_error_messages\n )\n self.assertEqual(\n form.fields[\"price\"].error_messages, price_error_messages\n )\n\n def test_create_stock_form_when_valid(self):\n form = StockForm(\n {\n \"name\": \"lemon\",\n \"price\": 200,\n }\n )\n\n self.assertTrue(form.is_bound)\n self.assertTrue(form.is_valid())\n self.assertEqual(form.errors, {})\n self.assertEqual(form.errors.as_text(), \"\")\n\n self.assertEqual(form.cleaned_data[\"name\"], \"lemon\")\n self.assertEqual(form.cleaned_data[\"price\"], \"200\")\n\n # boundデータを確認\n form_output = []\n\n for boundfield in form:\n form_output.append([boundfield.label, boundfield.data])\n\n expected_output = [\n [\"Name\", \"lemon\"],\n [\"Price\", 200],\n ]\n\n self.assertEqual(form_output, expected_output)\n\n def test_create_stock_form_when_empty(self):\n form = StockForm()\n self.assertFalse(form.is_bound)\n self.assertFalse(form.is_valid())\n with self.assertRaises(AttributeError):\n form.cleaned_data\n\n def test_create_stock_form_when_partially_empty(self):\n form = StockForm({\"name\": \"orange\"})\n self.assertEqual(form.errors[\"price\"], [\"Required.\"])\n self.assertFalse(form.is_valid())\n", "repo_name": "4ka0/simple_inventory", "sub_path": "stock/tests/test_stock_form.py", "file_name": "test_stock_form.py", "file_ext": "py", "file_size_in_byte": 2310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "forms.StockForm", "line_number": 8, "usage_type": "call"}, {"api_name": "forms.StockForm", "line_number": 24, "usage_type": "call"}, {"api_name": "forms.StockForm", "line_number": 33, "usage_type": "call"}, {"api_name": "forms.StockForm", "line_number": 62, "usage_type": "call"}, {"api_name": "forms.StockForm", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "26087349568", "text": "import math\nimport torch\nimport sys\nsys.path.append(\"..\")\nimport d2lzh_pytorch as d2l\n\nfeatures, labels = d2l.get_data_ch7()\n\n# 从零开始实现\ndef init_adagrad_states():\n s_w = torch.zeros((features.shape[1], 1), dtype=torch.float32)\n s_b = torch.zeros(1, dtype=torch.float32)\n return (s_w, s_b)\n\ndef adagrad(params, states, hyperparams):\n eps = 1e-6\n for p, s in zip(params, states):\n s.data += (p.grad.data**2)\n p.data -= hyperparams['lr'] * p.grad.data / torch.sqrt(s + eps)\n\nd2l.train_ch7(adagrad, init_adagrad_states(), {'lr': 0.1}, features, labels)\n\n# PyTorch实现\nd2l.train_pytorch_ch7(torch.optim.Adagrad, {'lr': 0.1}, features, labels)\n", "repo_name": "HK404/DeepLearning", "sub_path": "05_Optimizer/25_Adagrad.py", "file_name": "25_Adagrad.py", "file_ext": "py", "file_size_in_byte": 680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "d2lzh_pytorch.get_data_ch7", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 12, "usage_type": "attribute"}, {"api_name": "torch.sqrt", "line_number": 19, "usage_type": "call"}, {"api_name": "d2lzh_pytorch.train_ch7", "line_number": 21, "usage_type": "call"}, {"api_name": "d2lzh_pytorch.train_pytorch_ch7", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "14742917598", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/8/2 17:49\n# @Author : zwh\n# @Site : \n# @File : baiduhandler.py\n# @Software: PyCharm\n\nimport pyaudio\nimport wave\nimport requests\nimport json\n\nclass useBaiduHandler:\n\n def __init__(self):\n # get the token\n self.token = self.gettoken()\n\n def gettoken(self):\n try:\n apiKey = \"BElGG5nsGL8oevAa3gMzMk4Y\"\n secretKey = \"uVla1FdpQ2HgmojeY9e6pobrS3lRGaeY\"\n\n auth_url = \"https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=\" + apiKey + \"&client_secret=\" + secretKey\n response = requests.get(url=auth_url)\n jsondata = response.text\n return json.loads(jsondata)['access_token']\n except Exception as e:\n raise Exception(\"Cannot get the token, the reason is {}\".format(e))\n\n def parse(self, wavefile='16k.wav'):\n try:\n fp = wave.open(wavefile, 'rb')\n # 已经录好音的音频片段内容\n nframes = fp.getnframes()\n filelength = nframes * 2\n audiodata = fp.readframes(nframes)\n\n # 百度语音接口的产品ID\n cuid = '7519663'\n server_url = 'http://vop.baidu.com/server_api' + '?cuid={}&token={}'.format(cuid, self.token)\n headers = {\n 'Content-Type': 'audio/pcm; rete=8000',\n 'Content-Length': '{}'.format(filelength),\n }\n\n response = requests.post(url=server_url, headers=headers, data=audiodata)\n print(response.text)\n data = json.loads(response.text)\n if data['err_msg'] == 'success.':\n return data['result']\n else:\n return '你说的啥啊,听不清听不清!'\n except Exception as e:\n raise Exception(\"Parsing wave file failed. The reason is {}\".format(e))\n", "repo_name": "zhangwuhui212/MachineLearning", "sub_path": "YuyinCenter/handler/baiduhandler.py", "file_name": "baiduhandler.py", "file_ext": "py", "file_size_in_byte": 1921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 28, "usage_type": "call"}, {"api_name": "wave.open", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 48, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "44187025362", "text": "\"\"\"dsrg_site URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.urls import include, path\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n#Index\nfrom website.views import index\n#News\nfrom website.views import news_list, news_list_filter_group, news_details\n#Profiles\nfrom website.views import profiles_list, profiles_details\n#Project\nfrom website.views import project_details, project_list, project_list_filter_group\n#Research\nfrom website.views import research_list, research_details, research_list_filter_group\n#Publication\nfrom website.views import publication_list\n#Resource\nfrom website.views import resource_list\n\nurlpatterns = [\n path('ckeditor/', include('ckeditor_uploader.urls')),\n]+static(settings.STATIC_URL,document_root=settings.STATICFILES_DIRS) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nnews_patterns = ([\n path('', news_list, name='news'),\n path('', news_list_filter_group, name='news_group'),\n path('detail/',news_details, name='detail'),\n], 'news')\n\nprofiles_patterns = ([\n path('', profiles_list, name='profiles'),\n path('',profiles_details, name='detail'),\n], 'profiles')\n\nproject_patterns = ([\n path('', project_list, name='project'),\n path('', project_list_filter_group, name='project_group'),\n path('detail/',project_details, name='detail'),\n], 'project')\n\n\nresearch_patterns = ([\n path('', research_list, name='research'),\n path('', research_list_filter_group, name='research_group'),\n path('detail/',research_details, name='detail'),\n], 'research')\n\npublication_patterns = ([\n path('', publication_list, name='publication'),\n], 'publication')\n\nresource_patterns = ([\n path('', resource_list, name='resource'),\n], 'resource')\n\nurlpatterns += i18n_patterns(\n path('admin/', admin.site.urls),\n path('',view=index, name='home'),\n path('news/', include(news_patterns, namespace='news')),\n path('people/', include(profiles_patterns, namespace='profiles')),\n path('project/', include(project_patterns, namespace='project')),\n path('research/', include(research_patterns, namespace='research')),\n path('publication/', include(publication_patterns, namespace='publication')),\n path('resource/', include(resource_patterns, namespace='resource')),\n)", "repo_name": "KadirKrykl/Ceng4001InternDjango", "sub_path": "dsrg_site/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 3013, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.settings.STATIC_URL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.conf.settings.STATICFILES_DIRS", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.urls.path", "line_number": 41, "usage_type": "call"}, {"api_name": "website.views.news_list", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 42, "usage_type": "call"}, {"api_name": "website.views.news_list_filter_group", "line_number": 42, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 43, "usage_type": "call"}, {"api_name": "website.views.news_details", "line_number": 43, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 47, "usage_type": "call"}, {"api_name": "website.views.profiles_list", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "website.views.profiles_details", "line_number": 48, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 52, "usage_type": "call"}, {"api_name": "website.views.project_list", "line_number": 52, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "website.views.project_list_filter_group", "line_number": 53, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "website.views.project_details", "line_number": 54, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 59, "usage_type": "call"}, {"api_name": "website.views.research_list", "line_number": 59, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 60, "usage_type": "call"}, {"api_name": "website.views.research_list_filter_group", "line_number": 60, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 61, "usage_type": "call"}, {"api_name": "website.views.research_details", "line_number": 61, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 65, "usage_type": "call"}, {"api_name": "website.views.publication_list", "line_number": 65, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 69, "usage_type": "call"}, {"api_name": "website.views.resource_list", "line_number": 69, "usage_type": "argument"}, {"api_name": "django.conf.urls.i18n.i18n_patterns", "line_number": 72, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 73, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 74, "usage_type": "call"}, {"api_name": "website.views.index", "line_number": 74, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 75, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 75, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 76, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 77, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 77, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 78, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 78, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 79, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 79, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 80, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "15487471940", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom .models import Order\n# Create your views here.\ndef index(request):\n print('index() 실행')\n # return render(request,'board/index.html')\n # 반환되는 queryset에 대해서 order_by함수 이용하면 특정 필드 기준으로 정렬\n # order_by에 들어가는 필드 앞에 -를 붙이면 내림차순(desc) 아니면 오름차순\n\n Order_list = Order.objects.all().order_by('-id')\n\n context ={\n 'Order_list':Order_list\n }\n return render(request,'order/index.html', context)\n\ndef home(request):\n # 목록으로\n return HttpResponseRedirect(\"/order/\")\n\ndef order(request):\n if request.method =='GET': #요청방식이 get 방식이면 화면 표시\n return render(request,'order/add_order.html')\n else:\n order_text = request.POST['product_name']\n price = request.POST['price']\n\n # price = request.POST['price']\n address = request.POST['address']\n print(\"등록 완료\",order_text,price,address)\n Order.objects.create(\n order_text = order_text,\n price = price, #세션에 있는 값 저장\n address = address\n )\n return HttpResponseRedirect('/order/')\n\ndef list_order(request):\n # result = None # 필터링 된 리스트\n # context = {}\n\n # # print(request.GET)\n # #검색 조건과 검색 키워드가 있어야 필터링 실행\n # if 'searchType' in request.GET and 'searchWord' in request.GET:\n # search_type = request.GET['searchType'] # get안의 문자열은\n # search_word = request.GET['searchWord'] # html의 name속성과 일치해야함\n # print(\"searchType :{}, search_word : {}\".format(search_type,search_word))\n # order = Order.objects.all().order_by('id')\n context = {\n \"order_list\" : order\n }\n return render(request,'order/list_order.html',context)\n\n# def search_order(request):\n# search_name = request.POST['product_name']\n# check = request.POST['check']\n\n# if check == 'order_text':\n# order_list=Order.objects.filter(order_text__contains = search_name)\n# else:\n# order_list=Order.objects.filter(address__contains = search_name)\n# context={\n# 'order_list':order_list\n# }\n# return render(request,'order/list_order.html',context)\n\ndef search_order(request):\n input_name = request.POST['product_name']\n check = request.POST['option']\n\n oList = []\n if check ==\"order\":\n oList = Order.objects.filter(order_text__contains = input_name)\n elif check ==\"front_add\":\n oList = Order.objects.filter(address__startswith = input_name)\n elif check ==\"address\":\n oList = Order.objects.filter(address__contains = input_name)\n elif check ==\"price\":\n oList = Order.objects.filter(price__contains = input_name)\n\n return render(request,\"order/list_order.html\",{'order_list' : oList})\n\ndef read(request,id):\n print(\"read실행\")\n order = Order.objects.get(id = id)\n context ={\n 'order':order,\n 'oList' : order.order_text.split(\",\")\n }\n return render(request,'order/read.html', context)\n\ndef delete(request,id):\n Order.objects.get(id=id).delete()\n return HttpResponseRedirect('../../list_order')\n\ndef update(request,id):\n order = Order.objects.get(id = id)\n if request.method == \"GET\":\n #id로 찾은 친구 정보를 템플릿에 표시하기 위해서\n print(\"method>>\",request.method)\n context = {'order' : order}\n return render(request,'../update_order.html', context)\n else:\n # id로 찾은 객체에 대해서 폼의 값으로 원래 객체의 값 덮어쓰기\n print(\"method>>\",request.method)\n order.order_text = request.POST['order_text']\n order.price = request.POST['price']\n order.address = request.POST['address']\n\n order.save()\n #수정 후에 해당 글로 다시 이동\n redirect_url = '/order/' +str(id) +'/'\n\n return HttpResponseRedirect(redirect_url)", "repo_name": "qhtneo/wecan", "sub_path": "projects/myorder/order/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "models.Order.objects.all", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Order.objects.create", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 33, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 38, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Order.objects.filter", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 75, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 77, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Order.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 87, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 95, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 95, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 96, "usage_type": "call"}, {"api_name": "models.Order.objects.get", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Order.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Order", "line_number": 99, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 104, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "9808335781", "text": "from django.db.models import Q\nfrom django_filters.rest_framework import FilterSet, filters\n\nfrom signals.apps.signals.models import Category\n\n\nclass QuestionFilterSet(FilterSet):\n main_slug = filters.ModelChoiceFilter(\n queryset=Category.objects.filter(is_active=True, parent__isnull=True).all(),\n to_field_name='slug',\n field_name='category__slug',\n label='Hoofd categorie',\n )\n sub_slug = filters.ModelChoiceFilter(\n queryset=Category.objects.filter(is_active=True, parent__isnull=False).all(),\n to_field_name='slug',\n field_name='category__parent__slug',\n label='Sub categorie',\n )\n\n def filter_queryset(self, queryset):\n main_cat = self.form.cleaned_data.get('main_slug', None)\n main_slug = main_cat.slug if main_cat else None\n sub_cat = self.form.cleaned_data.get('sub_slug', None)\n sub_slug = sub_cat.slug if sub_cat else None\n\n # sort on main category first, then question ordering\n qs = queryset.filter(category__is_active=True).order_by(\n '-categoryquestion__category__parent', 'categoryquestion__order'\n )\n\n if main_slug:\n if sub_slug:\n childq = Q(category__parent__slug=main_slug) & Q(category__slug=sub_slug)\n parentq = Q(category__parent__isnull=True) & Q(category__slug=main_slug)\n qs = qs.filter(childq | parentq)\n else:\n qs = qs.filter(\n category__parent__isnull=True,\n category__slug=main_slug\n )\n return qs\n", "repo_name": "Amsterdam/signals", "sub_path": "app/signals/apps/api/filters/question.py", "file_name": "question.py", "file_ext": "py", "file_size_in_byte": 1606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django_filters.rest_framework.FilterSet", "line_number": 7, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.filters.ModelChoiceFilter", "line_number": 8, "usage_type": "call"}, {"api_name": "django_filters.rest_framework.filters", "line_number": 8, "usage_type": "name"}, {"api_name": "signals.apps.signals.models.Category.objects.filter", "line_number": 9, "usage_type": "call"}, {"api_name": "signals.apps.signals.models.Category.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "signals.apps.signals.models.Category", "line_number": 9, "usage_type": "name"}, {"api_name": "django_filters.rest_framework.filters.ModelChoiceFilter", "line_number": 14, "usage_type": "call"}, {"api_name": "django_filters.rest_framework.filters", "line_number": 14, "usage_type": "name"}, {"api_name": "signals.apps.signals.models.Category.objects.filter", "line_number": 15, "usage_type": "call"}, {"api_name": "signals.apps.signals.models.Category.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "signals.apps.signals.models.Category", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "11660067307", "text": "from db import Db\nfrom sender import Sender\nfrom datetime import datetime\nfrom time import time, sleep\nfrom config import * \nimport json, re\nfrom requests_oauthlib import OAuth2Session\nfrom threading import Thread, active_count\nfrom rq import Queue\nfrom worker import conn\n\nclass User():\n \"\"\"\n Data type for users of this system.\n\n Public methods:\n * create\n * get_by_email\n * email\n * token\n * name\n * created_at\n * filters_made\n * make_filter\n * json\n * delete_filter\n * user_info\n \"\"\"\n\n def __init__(self, data=None):\n if data == None:\n self._email = None\n self._token = None\n self._filters_made = None\n self._name = None\n self._created_at = None\n else:\n data = json.loads(data)\n self._email = data['email']\n self._token = data['token']\n self._filters_made = data['filters_made']\n self._name = data['name']\n self._created_at = datetime.fromtimestamp(data['created_at'])\n\n def __repr__(self):\n return f\"{self.email()} - {self.name()}\"\n\n def create(self, email, name=None, token=None):\n created_at = datetime.now()\n sql = \"INSERT INTO participant (email, name, token, created_at) VALUES (%s, %s, %s, %s);\"\n data = [email, name, token, created_at]\n db = Db()\n try:\n db.query(sql, data)\n self._email = email\n self._name = name\n self._created_at = created_at\n self._token = token\n return True\n except Exception as e:\n print(e)\n return False\n\n\n def email(self):\n return self._email\n\n def token(self):\n return self._token\n\n def name(self):\n return self._name\n\n def created_at(self):\n return self._created_at\n\n def filters_made(self):\n return self._filters_made\n\n def get_by_email(self, email):\n db = Db()\n sql = \"SELECT * FROM participant WHERE email = %s;\"\n data = [email]\n participant = db.query(sql, data)\n if participant is None:\n return None\n self._email = participant['email']\n if participant['token'] is not None:\n self._token = json.loads(participant['token'])\n else:\n self._token = None\n self._filters_made = participant['filters_made']\n self._name = participant['name']\n self._created_at = participant['created_at']\n return self\n\n def json(self):\n _dict = {'email': self.email(), 'name': self.name(), 'token': self.token(), 'filters_made': self.filters_made(), 'created_at': self.created_at().timestamp()}\n return json.dumps(_dict)\n\n def make_filter(self, domain, wait_time=1):\n if self._email is None:\n raise Exception('No user specified: use .get_by_email() or .create() first')\n if self._token is None:\n raise Exception(\"User's Oauth2 token is None\")\n\n google = OAuth2Session(client_id, token=self.token())\n if self.token()['expires_at'] < time()+10:\n google = self.refresh_token()\n if google == 'refresh_error':\n return 'refresh_error'\n headers = {\"Content-Type\": \"application/json\"}\n params = {\n \"criteria\": {\n \"from\": domain\n },\n \"action\": {\n \"removeLabelIds\": [\"SPAM\"],\n \"addLabelIds\": [\"CATEGORY_PERSONAL\"]\n }\n }\n #print(domain)\n r = google.post(\"https://www.googleapis.com/gmail/v1/users/me/settings/filters\", data=json.dumps(params), headers=headers)\n\n if r.status_code == 200:\n filter_id = r.json()['id']\n #print(filter_id)\n db = Db()\n sql = \"INSERT INTO filter (filter_id, sender, participant, filter_made, created_at) VALUES (%s, %s, %s, %s, %s);\"\n data = [filter_id, domain, self.email(), True, datetime.now()]\n db.query(sql, data, True)\n return True\n elif r.status_code == 429:\n if wait_time <= 8:\n sleep(wait_time)\n return self.make_filter(wait_time*2)\n else:\n print(r.status_code, r.text)\n return False\n else:\n if wait_time <= 1:\n wait_time = 2\n return self.make_filter(domain, wait_time)\n else:\n sleep(1)\n print(r.text, r.status_code)\n return False\n\n \n def make_filters(self):\n db = Db()\n sql = \"SELECT * FROM sender;\"\n result = db.query(sql)\n for i, row in enumerate(result):\n self.make_filter(row['domain'])\n self._filters_made = True\n return self.set_filters_made(True)\n\n def set_filters_made(self, state):\n db = Db()\n sql = \"UPDATE participant SET filters_made = %s WHERE email = %s\"\n data = [state, self.email()]\n db.query(sql, data, True)\n return True\n\n def refresh_token(self):\n google = OAuth2Session(client_id, token=self.token())\n extra = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n }\n if 'refresh_token' in self.token().keys():\n try:\n self.set_token(google.refresh_token(refresh_url, **extra))\n print('token updated!')\n return google\n except Exception as e:\n print('Error: ', e)\n return 'refresh_error'\n else:\n return 'refresh_error'\n\n def user_info(self, wait_time=1):\n google = OAuth2Session(client_id, token=self.token())\n if self.token()['expires_at'] < time() + 10:\n google = self.refresh_token()\n if google == 'refresh_error':\n return 'refresh_error'\n r = google.get('https://www.googleapis.com/oauth2/v1/userinfo')\n if r.status_code == 200:\n data = r.json()\n if self._name != data['name']:\n db = Db()\n sql = 'UPDATE participant SET name = %s WHERE email = %s;'\n params = [data['name'], self._email]\n db.query(sql, params)\n self._name = data['name']\n if self._email != data['email']:\n db = Db()\n sql = 'UPDATE participant SET name = %s WHERE email = %s;'\n params = [data['email'], self._email]\n db.query(sql, params)\n self._email = data['email']\n return r.json()\n elif r.status_code == 429:\n if wait_time <= 8:\n sleep(wait_time)\n return self.user_info(wait_time*2)\n else:\n print(r.status_code, r.text)\n return False\n else:\n print(r.status_code, r.text)\n return False\n\n def set_token(self, token):\n if self._email is None:\n raise Exception('Anonymous user, set email first')\n db = Db()\n sql = 'UPDATE participant set token = %s where email = %s;'\n data = [json.dumps(token), self.email()]\n db.query(sql, data)\n self._token = token\n return self\n\n def _get_filter(self, filter_id, wait_time=1):\n google = OAuth2Session(client_id, token=self.token())\n if self.token()['expires_at'] < time()+10:\n google = self.refresh_token()\n if google == 'refresh_error':\n return 'refresh_error'\n url = \"https://www.googleapis.com/gmail/v1/users/me/settings/filters/{}\".format(filter_id)\n r = google.get(url)\n if str(r.status_code)[0] == '2':\n return True\n elif r.status_code == 429:\n if wait_time <= 8:\n sleep(wait_time)\n return self._get_filter(wait_time*2)\n else:\n print(r.status_code, r.text)\n return False\n else:\n print('Filter not found in user account')\n self._reset_filter()\n print(r.status_code, r.text)\n return False\n\n def list_filters(self):\n db = Db()\n sql = \"select * from filter where participant = %s;\"\n data = [self.email()]\n result = db.query(sql, data)\n tmp_list = []\n for row in result:\n tmp_row = {}\n for k, v in row.items():\n tmp_row[k] = v\n tmp_list.append(tmp_row)\n return tmp_list\n\n\n def delete_filter(self, filter_id, wait_time=1):\n google = OAuth2Session(client_id, token=self.token())\n if self.token()['expires_at'] < time()+10:\n google = self.refresh_token()\n if google == 'refresh_error':\n return 'refresh_error'\n url = \"https://www.googleapis.com/gmail/v1/users/me/settings/filters/{}\".format(filter_id)\n r = google.delete(url)\n if str(r.status_code)[0] == '2':\n db = Db()\n sql = 'DELETE FROM filter WHERE filter_id = %s;'\n data = [filter_id]\n db.query(sql, data, True)\n return True\n elif r.status_code == 429:\n if wait_time <= 8:\n sleep(wait_time)\n return self.delete_filter(filter_id, wait_time*2)\n else:\n print(r.status_code, r.text)\n return False\n else:\n if wait_time <= 1:\n sleep(1)\n wait_time = 2\n return self.delete_filter(filter_id, wait_time)\n else:\n print(r.status_code, r.text)\n return False\n\n def delete_filters(self):\n #if self.filters_made != True:\n # raise Exception(\"Filters have not been made yet\")\n filters = self.list_filters()\n for f in filters:\n self.delete_filter(f['filter_id'])\n self._filters_made = False\n return self.set_filters_made(False)\n \n\nif __name__=='__main__':\n pass\n u = User()\n\n # get by email\n #u.get_by_email('graydenshand@gmail.com')\n #print(u.token())\n\n # create\n #u.create('graydenshand+test@gmail.com', 'Grayden Shand')\n #print(u)\n\n # to json --> from json\n #u.get_by_email('graydenshand@gmail.com')\n #string = u.json()\n #p = User(string)\n #print(p)\n\n # user_info \n #u.get_by_email('graydenshand@gmail.com')\n #print(u.user_info())\n\n # set_token\n #u.get_by_email('graydenshand@gmail.com')\n #u.set_token(token)\n #u.get_by_email('graydenshand@gmail.com')\n #print(u.token())\n\n # make_filter\n #u.get_by_email('graydenshand@gmail.com')\n #u.make_filter()\n #print(u.json())\n\n\n u.get_by_email('graydenshand@gmail.com')\n q = Queue(connection=conn)\n #print(u.json())\n #print(u.list_filters())\n from queue_functions import *\n #q.enqueue(make_filters, u.json())\n q.enqueue(delete_filters, u.json())\n\n # delete all filters\n \"\"\"\n db = Db()\n sql = \"select email from participant;\"\n result = db.query(sql)\n for i, row in enumerate(result):\n u = User().get_by_email(row['email'])\n print(u.json())\n u.delete_filter()\n \"\"\"\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "graydenshand/google_promotion_filter", "sub_path": "user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 11332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "name"}, {"api_name": "db.Db", "line_number": 52, "usage_type": "call"}, {"api_name": "db.query", "line_number": 54, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 81, "usage_type": "call"}, {"api_name": "db.query", "line_number": 84, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 89, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 99, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 130, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 130, "usage_type": "name"}, {"api_name": "db.query", "line_number": 131, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 135, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 145, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 151, "usage_type": "call"}, {"api_name": "db.query", "line_number": 153, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 160, "usage_type": "call"}, {"api_name": "db.query", "line_number": 163, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 167, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 184, "usage_type": "call"}, {"api_name": "time.time", "line_number": 185, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 193, "usage_type": "call"}, {"api_name": "db.query", "line_number": 196, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 199, "usage_type": "call"}, {"api_name": "db.query", "line_number": 202, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 207, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 219, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 221, "usage_type": "call"}, {"api_name": "db.query", "line_number": 222, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 227, "usage_type": "call"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 238, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 250, "usage_type": "call"}, {"api_name": "db.query", "line_number": 253, "usage_type": "call"}, {"api_name": "requests_oauthlib.OAuth2Session", "line_number": 264, "usage_type": "call"}, {"api_name": "time.time", "line_number": 265, "usage_type": "call"}, {"api_name": "db.Db", "line_number": 272, "usage_type": "call"}, {"api_name": "db.query", "line_number": 275, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 279, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 286, "usage_type": "call"}, {"api_name": "rq.Queue", "line_number": 338, "usage_type": "call"}, {"api_name": "worker.conn", "line_number": 338, "usage_type": "name"}]} +{"seq_id": "74416533605", "text": "#!/usr/bin/python\n### before spark-submit: export PYTHONIOENCODING=utf8\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.functions import max as max_\ndef quiet_logs(sc):\n logger = sc._jvm.org.apache.log4j\n logger.LogManager.getLogger(\"org\"). setLevel(logger.Level.ERROR)\n logger.LogManager.getLogger(\"akka\").setLevel(logger.Level.ERROR)\n\nconf = SparkConf().setAppName(\"uni\").setMaster(\"spark://spark-master:7077\")\nsc = SparkContext(conf=conf)\nspark = SparkSession(sc)\n\nquiet_logs(spark)\n\nfrom pyspark.sql.types import *\n\n# //////////// READING FROM AUSTRALIA UV INDEX CSV FILE //////////////\nschemaString = \"timestamp Lat Lon UV_Index\"\nfields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]\nschema = StructType(fields)\ndf = spark.read.csv(\"hdfs://namenode:9000/datasets/australia_uv_index.csv\", header=True, mode=\"DROPMALFORMED\", schema=schema)\n\n# //////////// READING FROM CANCER INCIDENCE AND MORTALITY CSV FILE //////////////\nschemaStringMelanoma = \"Data_type Cancer_group Year Sex Territory Count Age_standardised_rate ICD10_codes\"\nfieldsMelanoma = [StructField(field_name, StringType(), True) for field_name in schemaStringMelanoma.split()]\nschemaMelanoma = StructType(fieldsMelanoma)\ndfMelanoma = spark.read.csv(\"hdfs://namenode:9000/datasets/cancer_incidence_and_mortality_by_state_and_territory.csv\", header=True, mode=\"DROPMALFORMED\", schema=schemaMelanoma)\n\n# extracting year, month, day from timestamp\ndf = df.withColumn(\"Year\", year(col(\"timestamp\")))\\\n .withColumn(\"Month\", month(col(\"timestamp\")))\\\n .withColumn(\"Day\", dayofyear(col(\"timestamp\")))\n\ndf = df.filter(col(\"Year\").isNotNull())\ndf = df.withColumn(\"UV_Index\", col(\"UV_Index\").cast(FloatType()))\n\n# adding territory column depending of Lat and Lon\ndf = df.withColumn(\"Territory\", expr(\"case when Lat = -34.04 and Lon = 151.1 then 'New South Wales' \" +\n \"when Lat = -34.92 and Lon = 138.62 then 'South Australia' \" +\n \"when Lat = -37.73 and Lon = 145.1 then 'Victoria' \" +\n \"when Lat = -27.45 and Lon = 153.03 then 'Queensland' \" +\n \"when Lat = -31.92 and Lon = 115.96 then 'Western Australia' \" +\n \"when Lat = -42.99 and Lon = 147.29 then 'Tasmania' \" +\n \"when Lat = -35.31 and Lon = 149.2 then 'Australian Capital Territory' \" +\n \"else 'Northern Territory' end\"))\n\ndf.show(truncate=False)\n# filtering second csv\ndfMelanoma = dfMelanoma.withColumn(\"Year\", col(\"Year\").cast(IntegerType()))\ndfMelanoma = dfMelanoma.filter(dfMelanoma[\"Cancer_group\"] == \"Melanoma of the skin\")\\\n .filter((dfMelanoma[\"Year\"]>2013) & (dfMelanoma[\"Year\"]<2016))\\\n .filter(dfMelanoma[\"Sex\"] == \"Persons\")\\\n .select(\"Territory\", \"Year\", \"Count\", \"Data_type\")\ndfMelanoma = dfMelanoma.withColumn(\"Count\", col(\"Count\").cast(FloatType()))\n\ndfMelanomaIncidence = dfMelanoma.filter(dfMelanoma[\"Data_type\"] == \"Incidence\").select(\"Territory\", \"Year\", \"Count\", \"Data_type\").orderBy('Count', ascending=False) \ndfMelanomaMortality = dfMelanoma.filter(dfMelanoma[\"Data_type\"] == \"Mortality\").select(\"Territory\", \"Year\", \"Count\", \"Data_type\").orderBy('Count', ascending=False) \n\ndfMelanomaIncidence.show(truncate=False)\ndfMelanomaMortality.show(truncate=False)\n\n#max and avg UV Index for each year in whole Australia\nprint(\"---------------------------------------------\")\nprint(\"Maximum and average UV index in Australia\")\ndfMaxAvgAustralia = df.groupBy(\"Year\")\\\n .agg(\n max(col(\"UV_Index\")).alias(\"max_UV_Index\"),\n avg(col(\"UV_Index\")).alias(\"avg_UV_Index\"),\n )\\\n .orderBy('Year', ascending=True)\ndfMaxAvgAustralia.show(truncate=False)\n\nprint(\"---------------------------------------------\")\nprint(\"Maximum UV Index for every territory in each year\")\ndfMaxTerritoryYear = df.groupBy(\"Territory\").pivot(\"Year\").max(\"UV_Index\")\ndfMaxTerritoryYear.show(truncate=False)\n#dfMaxTerritoryYear.repartition(1).write.csv(\"hdfs://namenode:9000/results/MaxTerritoryYear.csv\", sep='|')\n\nprint(\"---------------------------------------------\")\nprint(\"Average UV Index for every territory in each year\")\ndfAvgTerritoryYear = df.groupBy(\"Territory\").pivot(\"Year\").avg(\"UV_Index\")\ndfAvgTerritoryYear.show(truncate=False)\n#dfAvgTerritoryYear.repartition(1).write.csv(\"hdfs://namenode:9000/results/AvgTerritoryYear.csv\", sep='|')\n\nprint(\"---------------------------------------------\")\nprint(\"Maximum UV Index for every year and month\")\ndfMaxYearMonth = df.groupBy(\"Year\").pivot(\"Month\").max(\"UV_Index\").orderBy('Year', ascending=True)\ndfMaxYearMonth.show(truncate=False)\n#dfMaxYearMonth.repartition(1).write.csv(\"hdfs://namenode:9000/results/MaxYearMonth.csv\", sep='|')\n\n# max UV Index for every year and month\nprint(\"---------------------------------------------\")\nprint(\"Month with maximum UV Index in every year\")\ndfYearMax = df.groupBy(\"Year\").max(\"UV_Index\")\ndfMonthMax = dfYearMax.join(df, (dfYearMax[\"Year\"] == df[\"Year\"]) & \\\n (dfYearMax[\"max(UV_Index)\"] == df[\"UV_Index\"]) , \"inner\")\\\n .select(df[\"Year\"], \"Month\", \"max(UV_Index)\")\\\n .orderBy('Year', ascending=True)\ndfMonthMax.show(truncate=False)\n#dfMonthMax.repartition(1).write.csv(\"hdfs://namenode:9000/results/MonthMax.csv\", sep='|')\n\n# group by territory and year\ndf = df.groupBy(\"Territory\", \"Year\").max(\"UV_Index\")\n\n# join with dataset about risk and mortality\ndfJoinIncidence = df.join(dfMelanomaIncidence, (df[\"Territory\"] == dfMelanomaIncidence[\"Territory\"]) & \\\n (df[\"Year\"] == dfMelanomaIncidence[\"Year\"]) , \"inner\")\\\n .select(df[\"Territory\"], df[\"Year\"], \"max(UV_Index)\", \"Count\", \"Data_type\")\\\n .orderBy('Count', ascending=False)\ndfJoinMortality = df.join(dfMelanomaMortality, (df[\"Territory\"] == dfMelanomaMortality[\"Territory\"]) & \\\n (df[\"Year\"] == dfMelanomaMortality[\"Year\"]) , \"inner\")\\\n .select(df[\"Territory\"], df[\"Year\"], \"max(UV_Index)\", \"Count\", \"Data_type\")\\\n .orderBy('Count', ascending=False)\n#dfJoinIncidence.repartition(1).write.csv(\"hdfs://namenode:9000/results/JoinIncidence.csv\", sep='|')\n#dfJoinMortality.repartition(1).write.csv(\"hdfs://namenode:9000/results/JoinMortality.csv\", sep='|')\nprint(\"---------------------------------------------\")\nprint(\"Territory by year, avg UV index and number of incidence\")\ndfJoinIncidence.show(truncate=False)\n\nprint(\"---------------------------------------------\")\nprint(\"Territory by year, avg UV index and number of mortality\")\ndfJoinMortality.show(truncate=False)\n\nprint(\"---------------------------------------------\")\nprint(\"The five cities with the highest number of deaths caused by melanoma of skin\")\ndfJoinMortalityYear = dfJoinMortality.filter(df[\"Year\"] == \"2014\").orderBy('Count', ascending=False)\ndfJoinMortalityYear.show(n=5,truncate=False)", "repo_name": "AnaNikolasevic/BigDataProject", "sub_path": "batch/batch_uv_index.py", "file_name": "batch_uv_index.py", "file_ext": "py", "file_size_in_byte": 7455, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyspark.SparkConf", "line_number": 14, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 15, "usage_type": "call"}, {"api_name": "pyspark.sql.SparkSession", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "9220441750", "text": "import copy\nimport logging\nfrom collections import OrderedDict\nfrom typing import List, Tuple, Any\n\nimport faiss\nimport more_itertools\nimport numpy as np\nimport torch\n\n\nlogger = logging.getLogger(__name__)\n\nclass OrderedDictCache:\n ''' Simple ordered dict.\n '''\n\n def __init__(self, capacity: int, buffer=10, lru: bool = True, callback=None):\n self.cache = OrderedDict()\n self.capacity = capacity\n self.buffer = buffer\n self.lru = lru\n self.callback = callback\n\n def get(self, key: Any) -> Any:\n if key not in self.cache:\n return None\n else:\n if self.lru:\n self.cache.move_to_end(key)\n return self.cache[key]\n\n def put(self, key: Any, value: Any) -> None:\n self.cache[key] = value\n\n if self.lru:\n self.cache.move_to_end(key)\n\n self._cleanup()\n\n def remove(self, key: Any) -> None:\n item = self.cache.pop(key)\n\n if self.callback is not None:\n self.callback([item])\n\n def _cleanup(self):\n\n if len(self.cache) >= (self.capacity + self.buffer):\n removed_list = []\n\n while len(self.cache) > self.capacity:\n removed = self.cache.popitem(last=False)\n removed_list.append(removed)\n\n if self.callback is not None:\n self.callback(removed_list)\n\n def __len__(self):\n return len(self.cache)\n\n def __iter__(self):\n for item in self.cache.items():\n yield item\n\nimport os\ndef parse_bool(b):\n return b == \"True\" or b == \"TRUE\" or b == \"true\" or b == \"1\"\n\nclass MemoryIndex:\n def __init__(self, embedding_dim=768, capacity: int = 9900, buffer=100, lru: bool = True, **kwargs):\n\n self.embedding_dim = embedding_dim\n self.id = 0\n\n ''' This is a hack so that memory has different id range from the knowledgebase.\n '''\n self.id_offset = int(1e9 + 1)\n\n self.init_index()\n\n def remove_from_cache(docs: List[Tuple]):\n doc_ids = [d[0] for d in docs]\n doc_ids_arr = np.asarray(doc_ids)\n self.remove_ids(doc_ids_arr)\n\n self.cache = OrderedDictCache(capacity=capacity, buffer=buffer, lru=lru, callback=remove_from_cache)\n\n self._random_retrieval = parse_bool(os.getenv(\"RANDOM_RETRIEVAL\", default=\"False\"))\n\n def init_index(self):\n \"\"\" Initialise the Faiss index.\n \"\"\"\n index = faiss.IndexFlatIP(self.embedding_dim)\n self.index = faiss.IndexIDMap2(index)\n\n def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:\n \"\"\" Get dictionary information from the requested docs.\n\n Args:\n doc_ids (ndarray):\n \"\"\"\n doc_ids = doc_ids.flatten()\n docs = []\n for id in doc_ids:\n\n id = int(id) - self.id_offset\n\n doc_dict = copy.deepcopy(self.cache.get(int(id)))\n if doc_dict is None:\n doc_dict = {\"id\": f\"{id}\", \"text\": \"\", \"title\": \"\",\n \"embeddings\": np.zeros(self.embedding_dim, dtype=np.float32)}\n else:\n try:\n doc_dict['id'] = f\"{int(doc_dict['id']) + self.id_offset}\"\n except TypeError:\n pass\n except ValueError:\n pass\n\n\n docs.append(doc_dict)\n\n logging.debug(f\"Doc Dicts: {doc_dict['id']}, {doc_dict['title']}, {doc_dict['text']}\")\n return docs\n\n def get_doc_dict(self, doc_id: int) -> List[dict]:\n \"\"\" Get dictionary information from the requested docs.\n\n Args:\n doc_ids (int):\n \"\"\"\n\n doc_id = int(doc_id) - self.id_offset\n\n doc_dict = copy.deepcopy(self.cache.get(int(doc_id)))\n if doc_dict is None:\n doc_dict = {\"id\": f\"{doc_id}\", \"text\": \" \", \"title\": \" \",\n \"embeddings\": np.zeros(self.embedding_dim, dtype=np.float32)}\n else:\n try:\n doc_dict['id'] = f\"{int(doc_dict['id']) + self.id_offset}\"\n except TypeError:\n pass\n except ValueError:\n pass\n\n logging.debug(f\"Doc Dicts: {doc_dict['id']}, {doc_dict['title']}, {doc_dict['text']}\")\n\n return doc_dict\n\n def get_top_docs(self, question_hidden_states: np.ndarray, n_docs: int = 5) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Args:\n question_hidden_states (ndarray): Question states to match against the Faiss index.\n n_docs (int): Number of docs to retrieve.\n\n Returns:\n :obj:`np.ndarray` of shape :obj:`(batch_size, n_docs)`: A tensor of indices of retrieved documents.\n :obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.\n \"\"\"\n assert len(question_hidden_states.shape) == 2\n\n if self._random_retrieval:\n question_hidden_states= np.random.randn(*question_hidden_states.shape)\n\n distances, indices, = self.index.search(np.float32(question_hidden_states), n_docs)\n\n embeddings_list = []\n for ind in indices:\n nearest = []\n for nearest_ind in ind:\n item = self.cache.get(int(nearest_ind))\n\n if item is not None:\n nearest.append(item['embeddings'])\n else:\n nearest.append(np.zeros(self.embedding_dim, dtype=np.float32))\n\n embeddings_list.append(nearest)\n\n indices_array = np.asarray(indices)\n embeddings_array = np.asarray(embeddings_list)\n\n indices_array += self.id_offset\n\n logging.debug(f\"Top Docs: {indices}, {distances}\")\n return indices_array, embeddings_array, distances\n\n def add(self, context_dicts: List[dict], context_hidden_states: np.ndarray):\n \"\"\" Add vectors and dicts to the index.\n Args:\n context_dicts (List[dict]): A list of dictionaries with the representations. Must contain id, title and text fields.\n context_hidden_states (ndarray): The ndarray is batch size * dim.\n \"\"\"\n\n assert len(context_dicts) > 0\n assert len(context_hidden_states.shape) == 2\n\n context_hidden_states = np.float32(context_hidden_states)\n\n id_list = []\n for item, vec in zip(context_dicts, context_hidden_states):\n item['embeddings'] = vec.tolist()\n self.cache.put(self.id, item)\n id_list.append(self.id)\n self.id += 1\n\n ids = np.asarray(id_list)\n\n logger.debug(f\"Add ids to Faiss: {ids}\")\n self.index.add_with_ids(context_hidden_states, ids)\n\n ids += self.id_offset\n\n return ids\n\n def remove_ids(self, doc_ids: np.ndarray) -> List[dict]:\n \"\"\" Remove from the dictionary and the Faiss index.\n Args:\n doc_ids (ndarray): Ids to remove.\n \"\"\"\n logger.debug(f\"Remove from Faiss: {doc_ids}\")\n self.index.remove_ids(doc_ids)\n\n def clear_memory(self):\n \"\"\" Clear the Faiss index.\n \"\"\"\n for chunk in more_itertools.chunked(self.cache.cache.items(),100):\n ids = [c[0] for c in chunk]\n doc_ids = np.asarray(ids)\n print(f\"Clear doc_ids: {doc_ids}\")\n self.index.remove_ids(doc_ids)\n \n\n def __len__(self):\n return len(self.cache)\n\n def __iter__(self):\n for item in self.cache:\n yield item\n", "repo_name": "dwlmt/story-fragments", "sub_path": "story_fragments/modules/memory_cache_index.py", "file_name": "memory_cache_index.py", "file_ext": "py", "file_size_in_byte": 7523, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 19, "usage_type": "call"}, {"api_name": "typing.Any", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 82, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 84, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 89, "usage_type": "call"}, {"api_name": "faiss.IndexFlatIP", "line_number": 94, "usage_type": "call"}, {"api_name": "faiss.IndexIDMap2", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 97, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 112, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 124, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 97, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 148, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 127, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 183, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 187, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 152, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 190, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 218, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 218, "usage_type": "name"}, {"api_name": "more_itertools.chunked", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 231, "usage_type": "call"}]} +{"seq_id": "40213077593", "text": "# Amanda Chen\n# SoftDev1 pd1\n# K25 -- Getting More REST\n# 2019-11-13\n\nfrom flask import Flask, render_template, request, redirect, url_for\nimport json\nfrom urllib.request import urlopen\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef root():\n return render_template('root.html')\n\n#Dark Sky API\n#API_KEY: 1975880ad720859dec79321ed72598c5\n#https://api.darksky.net/forecast/[key]/[latitude],[longitude]\n@app.route(\"/weather\")\ndef weather():\n print(app)\n u = urlopen(\"https://api.darksky.net/forecast/1975880ad720859dec79321ed72598c5/40.713051,-74.007233\")\n response = u.read();\n info = json.loads(response);\n #print(info['daily']['data'][0])\n return render_template('weather.html',\n lat = info['latitude'],\n long = info['longitude'],\n timezone = info['timezone'],\n sum = info['daily']['summary'],\n high = info['daily']['data'][0]['temperatureHigh'],\n low = info['daily']['data'][0]['temperatureLow'])\n\n#Rest Countries API\n#No API Key\n#https://restcountries.eu/rest/v2/\n@app.route(\"/country\")\ndef country():\n print(app)\n u = urlopen(\"https://restcountries.eu/rest/v2/name/France\")\n response = u.read();\n info = json.loads(response);\n #print(info['daily']['data'][0])\n return render_template('country.html',\n name = info[0]['name'],\n pop = info[0]['population'],\n cap = info[0]['capital'],\n cur = info[0]['currencies'][0]['name'],\n reg = info[0]['subregion'],\n area = info[0]['area'])\n\n#Unsplash\n#API_KEY: ae7faf91895099838aac4db456686b8c4dfc427579a7f9c88eaed9e6d3f04c59\n#https://api.unsplash.com/photos/random?client_id=ae7faf91895099838aac4db456686b8c4dfc427579a7f9c88eaed9e6d3f04c59\n@app.route(\"/photo\")\ndef photo():\n print(app)\n u = urlopen(\"https://api.unsplash.com/photos/random?client_id=ae7faf91895099838aac4db456686b8c4dfc427579a7f9c88eaed9e6d3f04c59\")\n response = u.read();\n info = json.loads(response);\n #print(info['daily']['data'][0])\n return render_template('photo.html',\n photo = info['urls']['small'],\n date = info['created_at'][:10],\n descrip = info['alt_description'].title(),\n name = info['user']['name'])\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n", "repo_name": "achen27/SoftDev_Work", "sub_path": "fall/25_restrio/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2569, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 58, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "6810814982", "text": "from django.urls import path\nfrom . import views\n\n#paths for account app.\nurlpatterns = [\n path('',views.LoginView, name='login_url'),\n path('register/',views.RegisterView, name=\"register_url\"),\n path('dashboard/',views.DashboardView, name='dashboard_url'),\n \n path('profile/',views.ProfileView, name='profile_url'),\n path('allUsers/',views.AllUsersView, name='allUsers_url'),\n]", "repo_name": "MadukaPcm/idd", "sub_path": "uaa/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 396, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "16727965345", "text": "import nltk\nimport multiprocessing as mp\nimport pandas as pd\n\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nclass SentimentAnalyzer:\n def __init__(self):\n nltk.download('vader_lexicon')\n self.sid = SentimentIntensityAnalyzer()\n\n def analysedf(self,df):\n print(\"Sentiment analyzing df's text column.\")\n dfsent=df\n print(\"len(dfsent)\",len(dfsent))\n\n dfsent['sent_pos']=dfsent['text'].map(lambda x: self.sid.polarity_scores(x)['pos'])\n dfsent['sent_neg']=dfsent['text'].map(lambda x: self.sid.polarity_scores(x)['neg'])\n dfsent['sent_neu']=dfsent['text'].map(lambda x: self.sid.polarity_scores(x)['neu'])\n dfsent['sent_compound']=dfsent['text'].map(lambda x: self.sid.polarity_scores(x)['compound'])\n\n print(\"analysedf finished\")\n return dfsent\n\n def analyse_worker(self,args):\n print(\"Sentiment analyzing df's text column.\")\n df,newcolname,feature=args\n dfsent=df\n print(\"len(dfsent)\",len(dfsent))\n\n dfsent[newcolname]=dfsent['text'].map(lambda x: self.sid.polarity_scores(x)[feature])\n return dfsent\n\n\n def paralellanalyse(self,df):\n print(__name__)\n print(\"analyzing coin\")\n data=([df,'sent_pos','pos'],[df,'sent_neg','neg'],[df,'sent_neu','neu'],[df,'sent_compound','compound'])\n if __name__ == 'twitter.sentiment':\n with mp.Pool() as pool:\n dfsent = pool.map(self.analyse_worker, data)\n\n i=0\n for d in data:\n #print(dfsent[i][d[1]])\n df[d[1]]=dfsent[i][d[1]]\n i+=1\n return df\n\n def merge_tweets_with_retweets(self,coin):\n coin_retweet_sent=coin.retweets.merge(coin.tweets,how='left',left_on='orig_tweet_id',right_on='id')\n #print((coin_retweet_sent[coin_retweet_sent[\"sent_neg\"]-0.2>coin_retweet_sent['sent_pos']]['text']))\n setattr( coin, 'retweets', coin_retweet_sent)\n\n\n def sent_mul_retweet_followers(self,coin):\n coin_retweet_sent=coin.retweets\n f = lambda x, y : x*y\n coin_retweet_sent['posmulrfollower'] = coin_retweet_sent[['sent_pos','retweeter_followers']].apply(lambda x: f(*x), axis=1)\n coin_retweet_sent['negmulrfollower'] = coin_retweet_sent[['sent_neg','retweeter_followers']].apply(lambda x: f(*x), axis=1)\n coin_retweet_sent['neumulrfollower'] = coin_retweet_sent[['sent_neu','retweeter_followers']].apply(lambda x: f(*x), axis=1)\n coin_retweet_sent['compmulrfollower'] = coin_retweet_sent[['sent_compound','retweeter_followers']].apply(lambda x: f(*x), axis=1)\n\n def sent_mul_tweet_followers(self,coin):\n f = lambda x, y : x*y\n coin_tweet=coin.tweets\n coin_tweet['follower_count']=coin_tweet['follower_count'].astype(float)\n #print(coin_tweet.head())\n print(len(coin_tweet))\n if(len(coin_tweet) >0):\n coin_tweet['posmulfollower'] = coin_tweet[['sent_pos','follower_count']].apply(lambda x: f(*x), axis=1)\n coin_tweet['negmulfollower'] = coin_tweet[['sent_neg','follower_count']].apply(lambda x: f(*x), axis=1)\n coin_tweet['neumulfollower'] = coin_tweet[['sent_neu','follower_count']].apply(lambda x: f(*x), axis=1)\n coin_tweet['compmulfollower'] = coin_tweet[['sent_compound','follower_count']].apply(lambda x: f(*x), axis=1)\n else:\n coin_tweet['posmulfollower']=0\n coin_tweet['negmulfollower']=0\n coin_tweet['neumulfollower']=0\n coin_tweet['compmulfollower']=0\n\n\n\n def group_retweet_by_hour(self,coin):\n coin_retweet_sent=coin.retweets\n coin_retweet_sent['datetime']=pd.to_datetime(coin_retweet_sent['retweet_created_at'],format='%Y-%m-%d %X')\n times = pd.DatetimeIndex(coin_retweet_sent.datetime)\n grt=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).retweeter_followers.sum()\n grtdf=pd.DataFrame(grt)\n grtdf['max_datetime']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).datetime.max()\n grtdf['retweet_count']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).retweeter_followers.count()\n if(len(coin_retweet_sent) >0):\n grtdf['sum_posmulrfollower']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).posmulrfollower.sum()\n grtdf['sum_negmulrfollower']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).negmulrfollower.sum()\n grtdf['sum_neumulrfollower']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).neumulrfollower.sum()\n grtdf['sum_compmulrfollower']=coin_retweet_sent.groupby([times.year, times.month, times.day,times.hour]).compmulrfollower.sum()\n else:\n grtdf['sum_posmulrfollower']=0\n grtdf['sum_negmulrfollower']=0\n grtdf['sum_neumulrfollower']=0\n grtdf['sum_compmulrfollower']=0\n setattr( coin, 'grtdf', grtdf)\n\n def group_tweet_by_hour(self,coin):\n coin_tweet=coin.tweets\n times = pd.DatetimeIndex(coin_tweet.tstamp)\n gt=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).follower_count.sum()\n gtdf=pd.DataFrame(gt)\n gtdf['max_datetime']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).tstamp.max()\n gtdf['tweet_count']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).follower_count.count()\n if(len(coin_tweet) >0):\n gtdf['sum_posmulfollower']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).posmulfollower.sum()\n gtdf['sum_negmulfollower']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).negmulfollower.sum()\n gtdf['sum_neumulfollower']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).neumulfollower.sum()\n gtdf['sum_compmulfollower']=coin_tweet.groupby([times.year, times.month, times.day,times.hour]).compmulfollower.sum()\n else:\n gtdf['sum_posmulfollower']=0\n gtdf['sum_negmulfollower']=0\n gtdf['sum_neumulfollower']=0\n gtdf['sum_compmulfollower']=0\n setattr( coin, 'gtdf', gtdf)\n", "repo_name": "mike576/crypto_predictions", "sub_path": "twitter/sentiment.py", "file_name": "sentiment.py", "file_ext": "py", "file_size_in_byte": 6269, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nltk.download", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.sentiment.vader.SentimentIntensityAnalyzer", "line_number": 10, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.DatetimeIndex", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "75324509284", "text": "import torch\n\n# acc 출력\ndef acc(yhat, y):\n with torch.no_grad():\n yhat = yhat.max(dim=-1)[1] # [0]: max value, [1]: index of max value\n acc = (yhat == y).float()[y != 1].mean() # padding은 acc에서 제거\n return acc\n\n# 학습시 모델에 넣는 입력과 모델의 예측 출력.\ndef train_test(step, y_pred, dec_output, real_value_index, enc_input, args, TEXT, LABEL):\n\n if 0 <= step < 3:\n _, ix = y_pred[real_value_index].data.topk(1)\n train_Q = enc_input[0]\n print(\"<> :\", end=\" \")\n for i in train_Q:\n if TEXT.vocab.itos[i] == \"\":\n break\n print(TEXT.vocab.itos[i], end=\" \")\n\n print(\"\\n<> :\", end=\" \")\n for jj, jx in enumerate(dec_output[real_value_index]):\n if LABEL.vocab.itos[jx] == \"\":\n break\n print(LABEL.vocab.itos[jx], end=\" \")\n\n print(\"\\n<> :\", end=\" \")\n for jj, ix in enumerate(ix):\n if jj == args.max_len:\n break\n if LABEL.vocab.itos[ix] == '':\n break\n print(LABEL.vocab.itos[ix], end=\" \")\n print(\"\\n\")\n", "repo_name": "BM-K/Styling-Chatbot-with-Transformer", "sub_path": "Chatbot/metric.py", "file_name": "metric.py", "file_ext": "py", "file_size_in_byte": 1178, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 33, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.no_grad", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "21892458687", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 24 16:02:55 2022\r\n\r\n\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport re\r\nimport random\r\n#%%\r\n'''Potential Cell for getting data from the web'''\r\n# Can only call this cell with internet, so I sectioned this off from the rest of the data\r\nurl = 'https://raw.githubusercontent.com/PokeAPI/pokeapi/master/data/v2/csv/type_efficacy.csv'\r\ntypechart = pd.read_csv(url)\r\nurl1 = 'https://raw.githubusercontent.com/PokeAPI/pokeapi/master/data/v2/csv/items.csv'\r\nitems = pd.read_csv(url1)\r\nurl2 = 'https://raw.githubusercontent.com/PokeAPI/pokeapi/master/data/v2/csv/natures.csv'\r\nnatures = pd.read_csv(url2)\r\nurl3 = 'https://raw.githubusercontent.com/PokeAPI/pokeapi/master/data/v2/csv/abilities.csv'\r\nabilities = pd.read_csv(url3)\r\nurl4 = 'https://raw.githubusercontent.com/PokeAPI/pokeapi/master/data/v2/csv/pokemon_abilities.csv'\r\npkabilities = pd.read_csv(url4)\r\n#%%\r\n# Add nature into the stat calculator and add it as a input for the statistics method of pkstorage\r\nstats = pd.read_csv(\"C:\\\\Users\\\\cacru\\\\OneDrive\\\\Documents\\\\pokemon_stats.csv\")\r\nspecies = pd.read_csv(\"C:\\\\Users\\\\cacru\\\\OneDrive\\\\Documents\\\\pokemon_species.csv\")\r\ntypes = pd.read_csv(\"C:\\\\Users\\\\cacru\\\\OneDrive\\\\Documents\\\\types.csv\")\r\nptypes = pd.read_csv(\"C:\\\\Users\\\\cacru\\\\OneDrive\\\\Documents\\\\pokemon_types.csv\")\r\nmovesdf = pd.read_csv(\"C:\\\\Users\\\\cacru\\\\OneDrive\\\\Documents\\\\Data Analysis Practice\\\\moves.csv\")\r\nmovesdf.drop(columns='contest_type_id')\r\nmovesdf.drop(columns='contest_effect_id')\r\nmovesdf.drop(columns='super_contest_effect_id')\r\nstatsdf = pd.DataFrame(stats)\r\nspeciesdf = pd.DataFrame(species)\r\ntypesdf = pd.DataFrame(types)\r\n#%%\r\n# This cell, I want to combine species, ptypes, and stats all into one data frame.\r\n# First, lets work with adding the stats to the species df.\r\n# The complication with this is the way the stats df is organized vertically. \r\nf = statsdf.query('pokemon_id == 1')\r\nprint(f)\r\n\r\nptypesdf = pd.DataFrame(ptypes)\r\n\r\n#%%\r\n\r\n\r\n#%%\r\ndef bst(x):\r\n y = statsdf[statsdf['pokemon_id']==x]\r\n lst = list(y['base_stat'])\r\n bstsum = 0\r\n for i in lst:\r\n bstsum = bstsum + i\r\n print('BST =', bstsum)\r\ndef types(x):\r\n y = ptypesdf[ptypesdf['pokemon_id']==x]\r\n t = y['type_id'].values.tolist()\r\n if int(len(t)) == 1:\r\n k = typesdf[typesdf['id']==t[0]]\r\n s = k['identifier'].values.tolist()\r\n print('Type:', s)\r\n else:\r\n k = typesdf[typesdf['id']==t[0]]\r\n s = k['identifier'].values.tolist()\r\n print('Type 1:', s)\r\n k1 = typesdf[typesdf['id']==t[1]]\r\n s1 = k1['identifier'].values.tolist()\r\n print('Type 2:', s1)\r\ndef pokemoninfo(x):\r\n y = speciesdf[speciesdf['id']==x]\r\n print('Name:', y['identifier'].values.tolist())\r\n print('Generation:', str(y['generation_id'].values.tolist()))\r\n if int(y['gender_rate']) < 0:\r\n print('No Gender')\r\n else:\r\n print('Percentage Male:', round((int(y['gender_rate'])/8)*100,3))\r\n print('Capture Rate:', y['capture_rate'].values.tolist())\r\n bst(x) \r\ndef pokemoninfobyname(x):\r\n y = speciesdf[speciesdf['identifier']==x]\r\n print('Pokedex No.:', y['id'].values.tolist())\r\n x = int(y['id'])\r\n types(x)\r\n print('Generation:', str(y['generation_id'].values.tolist()))\r\n if int(y['gender_rate']) < 0:\r\n print('No Gender')\r\n else:\r\n print('Percentage Male:', round((int(y['gender_rate'])/8)*100,3))\r\n print('Capture Rate:', y['capture_rate'].values.tolist())\r\n bst(x)\r\ndef statfinder(x):\r\n y = speciesdf[speciesdf['identifier']==x]\r\n z = int(y['id'])\r\n #Level 50 min stats\r\n df1 = statsdf[statsdf['pokemon_id']==z]\r\n return df1\r\ndef fiftymin(x):\r\n df1 = statfinder(x)\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n atk = df1['base_stat'][df1['stat_id']==2].values.tolist()\r\n defc = df1['base_stat'][df1['stat_id']==3].values.tolist()\r\n spatk = df1['base_stat'][df1['stat_id']==4].values.tolist()\r\n spdef = df1['base_stat'][df1['stat_id']==5].values.tolist()\r\n spd = df1['base_stat'][df1['stat_id']==6].values.tolist()\r\n nm = (2*hp[0]+0+(int(0/4)))*50\r\n hp1 = int(nm/100 + 50 + 10)\r\n ### This is solving for all of the numerators in the stat calculation equations\r\n nm1 = (2*atk[0]+0+(int(0/4)))*50\r\n nm2 = (2*defc[0]+0+(int(0/4)))*50\r\n nm3 = (2*spatk[0]+0+(int(0/4)))*50\r\n nm4 = (2*spdef[0]+0+(int(0/4)))*50\r\n nm5 = (2*spd[0]+0+(int(0/4)))*50\r\n ### This solves the final values\r\n atk1 = int((nm1/100+5)*.9)\r\n def1 = int((nm2/100+5)*.9)\r\n spatk1 = int((nm3/100+5)*.9)\r\n spdef1 = int((nm4/100+5)*.9)\r\n spd1 = int((nm5/100+5)*.9)\r\n ### This prints out the final stats\r\n print('HP:',hp1)\r\n print('ATK:', atk1)\r\n print('DEF:', def1)\r\n print('SpATK:', spatk1)\r\n print('SpDEF:', spdef1)\r\n print('SPD:', spd1)\r\ndef fiftymax(x):\r\n\r\n df1 = statfinder(x)\r\n iv = 31\r\n ev = 252\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n atk = df1['base_stat'][df1['stat_id']==2].values.tolist()\r\n defc = df1['base_stat'][df1['stat_id']==3].values.tolist()\r\n spatk = df1['base_stat'][df1['stat_id']==4].values.tolist()\r\n spdef = df1['base_stat'][df1['stat_id']==5].values.tolist()\r\n spd = df1['base_stat'][df1['stat_id']==6].values.tolist()\r\n nm = (2*hp[0]+iv+(int(ev/4)))*50\r\n hp1 = int(nm/100 + 50 + 10)\r\n ### This is solving for all of the numerators in the stat calculation equations\r\n nm1 = (2*atk[0]+iv+(int(ev/4)))*50\r\n nm2 = (2*defc[0]+iv+(int(ev/4)))*50\r\n nm3 = (2*spatk[0]+iv+(int(ev/4)))*50\r\n nm4 = (2*spdef[0]+iv+(int(ev/4)))*50\r\n nm5 = (2*spd[0]+iv+(int(ev/4)))*50\r\n ### This solves the final values\r\n atk1 = int((nm1/100+5)*1.1)\r\n def1 = int((nm2/100+5)*1.1)\r\n spatk1 = int((nm3/100+5)*1.1)\r\n spdef1 = int((nm4/100+5)*1.1)\r\n spd1 = int((nm5/100+5)*1.1)\r\n ### This prints out the final stats\r\n print('HP:',hp1)\r\n print('ATK:', atk1)\r\n print('DEF:', def1)\r\n print('SpATK:', spatk1)\r\n print('SpDEF:', spdef1)\r\n print('SPD:', spd1)\r\ndef stats(x):\r\n df1 = statfinder(x)\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n atk = df1['base_stat'][df1['stat_id']==2].values.tolist()\r\n defc = df1['base_stat'][df1['stat_id']==3].values.tolist()\r\n spatk = df1['base_stat'][df1['stat_id']==4].values.tolist()\r\n spdef = df1['base_stat'][df1['stat_id']==5].values.tolist()\r\n spd = df1['base_stat'][df1['stat_id']==6].values.tolist()\r\n statdict = {'HP': hp, 'ATK': atk, 'DEF': defc, 'SpATK': spatk, 'SpDEF': spdef, 'SPD': spd}\r\n return statdict\r\ndef statgraph(x):\r\n# Need to input two word moves with a \"-\"\r\n name = x.title()\r\n df1 = statfinder(x)\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n atk = df1['base_stat'][df1['stat_id']==2].values.tolist()\r\n defc = df1['base_stat'][df1['stat_id']==3].values.tolist()\r\n spatk = df1['base_stat'][df1['stat_id']==4].values.tolist()\r\n spdef = df1['base_stat'][df1['stat_id']==5].values.tolist()\r\n spd = df1['base_stat'][df1['stat_id']==6].values.tolist()\r\n statdict = {'HP': hp, 'ATK': atk, 'DEF': defc, 'SpATK': spatk, 'SpDEF': spdef, 'SPD': spd}\r\n x = statdict\r\n labels = list(x.keys())\r\n values = list(x.values())\r\n values1 = []\r\n for i in values:\r\n it = i[0]\r\n values1.append(it)\r\n x = {labels[i]: values1[i] for i in range(len(labels))}\r\n bar = plt.bar(range(len(x)), values1, align='center')\r\n bar[0].set_color('r')\r\n bar[1].set_color('y')\r\n bar[3].set_color('purple')\r\n bar[4].set_color('g')\r\n bar[5].set_color('c')\r\n plt.xticks(range(len(x)), labels)\r\n plt.ylim(0,275)\r\n plt.xlabel('Base Stats')\r\n plt.ylabel('Value')\r\n plt.title(name)\r\n plt.show()\r\ndef moveinfo(x):\r\n y = movesdf[movesdf['identifier'] == x]\r\n power = y['power'].values.tolist()\r\n mtype = y['type_id'].values.tolist()\r\n types = typesdf[typesdf['id'] == mtype[0]]\r\n t = types['identifier'].values.tolist()\r\n sp = y['damage_class_id'].values.tolist()\r\n dmg = {1: 'Status', 2: 'Physical', 3: 'Special'}\r\n dtype = dmg[sp[0]]\r\n print(f'Name: {x}\\nType: {t[0]}\\nPower: {power[0]}\\nDamage Type: {dtype}')\r\ndef stat(x, level):\r\n df1 = statfinder(x)\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n atk = df1['base_stat'][df1['stat_id']==2].values.tolist()\r\n defc = df1['base_stat'][df1['stat_id']==3].values.tolist()\r\n spatk = df1['base_stat'][df1['stat_id']==4].values.tolist()\r\n spdef = df1['base_stat'][df1['stat_id']==5].values.tolist()\r\n spd = df1['base_stat'][df1['stat_id']==6].values.tolist()\r\n nm = (2*hp[0]+0+(int(0/4)))*level\r\n hp1 = int(nm/100 + level + 10)\r\n ### This is solving for all of the numerators in the stat calculation equations\r\n nm1 = (2*atk[0]+0+(int(0/4)))*level\r\n nm2 = (2*defc[0]+0+(int(0/4)))*level\r\n nm3 = (2*spatk[0]+0+(int(0/4)))*level\r\n nm4 = (2*spdef[0]+0+(int(0/4)))*level\r\n nm5 = (2*spd[0]+0+(int(0/4)))*level\r\n ### This solves the final values\r\n atk1 = int((nm1/100+5)*.9)\r\n def1 = int((nm2/100+5)*.9)\r\n spatk1 = int((nm3/100+5)*.9)\r\n spdef1 = int((nm4/100+5)*.9)\r\n spd1 = int((nm5/100+5)*.9)\r\n sts = [hp1, atk1, def1, spatk1, spdef1, spd1]\r\n return sts\r\ndef varstatsHP(species, ev, level, iv=16):\r\n df1 = statfinder(species)\r\n ### This section locates all the stat values from the csv files\r\n hp = df1['base_stat'][df1['stat_id']==1].values.tolist()\r\n nm = (2*hp[0]+iv+(int(ev/4)))*level\r\n hp1 = int(nm/100 + level + 10)\r\n return hp1\r\ndef varstats(species, ev, level, stat, iv=16):\r\n df1 = statfinder(species)\r\n ### This section locates all the stat values from the csv files\r\n spd = df1['base_stat'][df1['stat_id']==stat].values.tolist()\r\n nm5 = (2*spd[0]+iv+(int(ev/4)))*level\r\n ### This solves the final values\r\n spd1 = int((nm5/100+5))\r\n return spd1\r\ndef MoveCalc(move):\r\n blk = \"\"\r\n move = move.lower()\r\n if isinstance(move, (str)) == True:\r\n for i in range(len(move)):\r\n if bool(re.search(r\"\\s\", move)) == True:\r\n if move[i] == ' ':\r\n blk = blk + '-'\r\n else:\r\n blk = blk + move[i] \r\n else:\r\n pass\r\n else:\r\n raise TypeError('The input must be a string')\r\n y = movesdf[movesdf['identifier'] == blk]\r\n power = y['power'].values.tolist()\r\n mtype = y['type_id'].values.tolist()\r\n types = typesdf[typesdf['id'] == mtype[0]]\r\n t = types['identifier'].values.tolist()\r\n sp = y['damage_class_id'].values.tolist()\r\n dmg = {1: 'Status', 2: 'Physical', 3: 'Special'}\r\n dtype = dmg[sp[0]]\r\n movedct = {'Name': blk, 'Power': power, 'Type': t, 'Damage Type': dtype}\r\n return movedct\r\ndef DMGCalc(stata, statb, pwr, lvl):\r\n stata = int(stata)\r\n statb = int(statb)\r\n pwr = int(pwr)\r\n lvl = int(lvl)\r\n num1 = (((2*lvl)/5)+2)\r\n num1a = ((stata/statb))\r\n num2 = (num1*pwr*num1a)\r\n rand = random.uniform(0.85, 1)\r\n dmg = int(((num2/50)+2)*rand)\r\n return dmg\r\n#%%\r\n\r\nclass pkstorage():\r\n def __init__(self, name, move, level=50):\r\n self.name = name\r\n self.level = level\r\n # Creating a storing a moveset for the pokemon\r\n if isinstance(move, (list)) == True:\r\n if len(move) < 5:\r\n self.move = move\r\n else:\r\n raise ValueError('The total moves has to be 4 or less')\r\n else:\r\n raise TypeError('move must be a list')\r\n def moveset(self):\r\n # This section is going to store the moveset information for the pokemon\r\n move = self.move\r\n for i in move:\r\n i = i.lower()\r\n blk = \"\"\r\n # Checking if there is a space in the string since the function cannot use spaces\r\n # https://www.geeksforgeeks.org/python-check-for-spaces-in-string/\r\n # imported re at the top. /s looks for the spaces\r\n if bool(re.search(r\"\\s\", i)) == True:\r\n for item in range(len(i)):\r\n if i[item] == ' ':\r\n blk = blk + '-'\r\n else:\r\n blk = blk + i[item]\r\n print('-----------------------------')\r\n moveinfo(blk)\r\n else:\r\n print('-----------------------------')\r\n moveinfo(i)\r\n # This gives me the stats of the pokemon at its level with 0 IV's and EV's\r\n def minstats(self):\r\n # The three lines below reference the information stored in the class object\r\n name = self.name\r\n name1 = name.lower()\r\n level = self.level\r\n # Calls the stat functioned defined in the functions portion.\r\n sts = stat(name1, level)\r\n print('---------')\r\n print(f'Level: {level}\\n---------\\nHP: {sts[0]}\\nATK: {sts[1]}\\nDEF: {sts[2]}\\nSpATK: {sts[3]}\\nSpDEF: {sts[4]}\\nSPD: {sts[5]}')\r\n # This gives the pokemon information by calling the function defined in the functions section.\r\n def info(self):\r\n name = self.name\r\n name1 = name.lower()\r\n self.info = pokemoninfobyname(name1)\r\n def statistics(self,hp=0,atk=0,deff=0,spatk=0,spdef=0,spd=0):\r\n ev = [hp, atk, deff, spatk, spdef, spd]\r\n name = self.name\r\n name = name.lower()\r\n lvl = self.level\r\n for i in ev:\r\n if isinstance(i,(int)) == False:\r\n raise TypeError('Inputs for the EVs have to be an integer')\r\n elif i > 252:\r\n raise ValueError('Individual EVs cannot be higher than 252')\r\n elif i < 0:\r\n raise ValueError('Individual EVs cannot be negative')\r\n else:\r\n pass\r\n if sum(ev) > 510:\r\n raise ValueError('Total amount of EVs cannot be higher than 510')\r\n else:\r\n pass\r\n #HP \r\n hp1 = varstatsHP(species=name, level=lvl, ev=ev[0])\r\n print('----------------\\n',name.title(),'\\nLevel:',lvl, '\\n----------------')\r\n print(f'HP: {hp1} / EV: {ev[0]}\\n----------------')\r\n labels = ['ATK', 'DEF', 'SpATK', 'SpDEF', 'SPD']\r\n ev1 = ev[1:]\r\n pos = 0\r\n statlist = [hp1]\r\n for i in ev1:\r\n pos1 = pos + 2\r\n st = varstats(species=name, level=lvl, ev=i, stat=pos1)\r\n print(f'{labels[pos]}: {st} / EV: {i}')\r\n pos = pos + 1\r\n statlist.append(st)\r\n return statlist\r\n def sgraph(self):\r\n name = self.name\r\n name = name.lower()\r\n statgraph(name)\r\n def DamageCalc(self, pkatk, pkdef, move):\r\n attack_move = MoveCalc(move)\r\n level = self.level\r\n if attack_move['Damage Type'] == 'Physical':\r\n attacking_stat = pkatk[1]\r\n defending_stat = pkdef[2]\r\n elif attack_move['Damage Type'] == 'Special':\r\n attacking_stat = pkatk[3]\r\n defending_stat = pkdef[4]\r\n else:\r\n print('That is a status move')\r\n exit()\r\n dmg = attack_move['Power'][0]\r\n atktype = attack_move['Type']\r\n damage = DMGCalc(attacking_stat, defending_stat, dmg, level)\r\n print(damage)\r\n#%%\r\nmoveset1 = ['Earthquake', 'Scale Shot', 'Swords Dance', 'Outrage']\r\nmoveset2 = ['Behemoth Blade', 'Play Rough', 'Wild Charge', 'Swords Dance']\r\nslot1 = pkstorage(name='Garchomp', move=moveset1, level=100) \r\nslot2 = pkstorage(name='Zacian', move=moveset2, level=100)\r\n#%%\r\n'''TESTING CELL'''\r\nstat1 = slot2.statistics(hp=4,atk=252,spd=252)\r\nstat2 = slot1.statistics(atk=252,hp=252,deff=4)\r\nslot2.DamageCalc(pkatk=stat1, pkdef=stat2, move='Behemoth Bash')\r\n#%%\r\nstatsdf.info()\r\nspecies.info()\r\n\r\n#%%\r\ndef gen_analysis(x):\r\n print('Generation', x, 'Analysis')\r\n y = speciesdf.groupby('generation_id')['generation_id'].count()\r\n print('Number of Pokemon:', y[x])\r\n \r\ngen_analysis(7)\r\n", "repo_name": "tljorda2/practice", "sub_path": "Pokemon_Practice.py", "file_name": "Pokemon_Practice.py", "file_ext": "py", "file_size_in_byte": 16427, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 32, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "re.search", "line_number": 262, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 289, "usage_type": "call"}, {"api_name": "re.search", "line_number": 315, "usage_type": "call"}]} +{"seq_id": "4689989479", "text": "\"\"\"E_Voting URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.conf.urls import include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',views.HomePage.as_view(),name='home'),\n path('ajax/login/', views.LoginViewAjax.as_view(\n template_name='partial_login_ajax.html'),\n name='login_ajax'),\n path('ajax/logout/', views.logout_view_ajax, name='logout_ajax'),\n path('registration/',include('registration.urls',namespace='registration')),\n path('election/',include('election.urls',namespace='election')),\n path('reporting/',include('reporting.urls',namespace='reporting')),\n path('accounts/login/', views.LoginView.as_view(),\n name='login'),\n path('test/',\n views.TestPage.as_view(),\n name='test'\n ),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "bryan1188/DNHS-EVS", "sub_path": "E_Voting/E_Voting/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 41, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 42, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 42, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "10487797590", "text": "#!/usr/bin/env python\n##############################################\n# Author: Julian Alexander Murillo\n##############################################\n# Lexinerus (GitHub)\n##############################################\n# Application: Nopynews\n# Notify Python News\n# This application show feeds in notifications\n# from your favorite news sources\n##############################################\n# Date: 22-Oct-2011\n##############################################\nimport feedparser\nimport os\nimport sys\nimport settings\nimport config\nimport time\nimport urllib\nfrom subprocess import Popen, PIPE\n\nclass Nopynews:\n\t__source = settings.FEED_SOURCE\n\t\n\t# List of all tweets with specified word on\n\t# settings file.\n\tdef news_list(self):\n\t\tlst_news = []\n\t\t\n\t\tfor news in self.__source:\n\t\t\tfeed = feedparser.parse(news)\n\t\t\tlst_news += feed.entries\n\t\t\n\t\treturn lst_news\n\n\t# Show all news on notifications.\n\tdef show_news(self, news_list):\n\t\tfor entry in news_list:\n\t\t\ttitle = entry.title\n\t\t\tdescription = entry.description\n\t\t\tdescription = description[:config.DESCRIPTION_SIZE]\n\t\t\tdescription += config.SUSPENTION_POINTS\n\t\t\tself.show_news_notification(title, description)\n\t\t\ttime.sleep(settings.SECONDS_BETWEEN_NEWS)\n\n\t# Show news into a notification\n\tdef show_news_notification(self, title, description):\t\n\t\targs = [title, description]\n\t\tcommand = ExternalApp()\n\t\tcommand.executeCommand(config.NOTIFY_APP, args)\n\nclass ExternalApp:\n\tdef executeCommand(self, command, args):\n\t\treturn Popen([command] + args, stdin=PIPE, stdout=PIPE)\n\nif __name__ == '__main__':\n\tnopynews = Nopynews()\n\tlst_news = nopynews.news_list()\n\tnopynews.show_news(lst_news)\n\n", "repo_name": "64lines/Nopynews", "sub_path": "Nopynews.py", "file_name": "Nopynews.py", "file_ext": "py", "file_size_in_byte": 1634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "settings.FEED_SOURCE", "line_number": 24, "usage_type": "attribute"}, {"api_name": "feedparser.parse", "line_number": 32, "usage_type": "call"}, {"api_name": "config.DESCRIPTION_SIZE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "config.SUSPENTION_POINTS", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "settings.SECONDS_BETWEEN_NEWS", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config.NOTIFY_APP", "line_number": 51, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "11716454910", "text": "import collections\n\ndef roadsAndLibraries(n, c_lib, c_road, cities):\n if c_lib <= c_road:\n return c_lib * n\n adj = collections.defaultdict(list)\n for start, des in cities:\n adj[start].append(des)\n adj[des].append(start)\n components = 0\n def dfs(city, visited):\n if city in visited:\n return \n visited.add(city)\n for nei in adj.get(city,[]):\n dfs(nei, visited) \n visited = set() \n for i in range(1,n+1):\n if i not in visited:\n components += 1\n dfs(i, visited)\n res = ((n-components) * c_road) + (c_lib * components) \n return res \n", "repo_name": "MohamedHamisa/Roads-And-Liabraries", "sub_path": "code.py", "file_name": "code.py", "file_ext": "py", "file_size_in_byte": 648, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "9639352754", "text": "from pymongo import MongoClient\n\nclass Client(MongoClient):\n\n \"\"\" Extending functionality of the MongoClient to support ORM models. \"\"\"\n\n def find_one(self, model, *args, db=None):\n\n \"\"\" Retrieve the first record in the database that matches the provided args \n \n Arguments:\n model - The Type of the model being requested.\n *args - Arguments to be sent to MongoCollection.find_one()\n db - Override the model's definition about what database to search\n\n Returns:\n If successful, returns an instance of type model populated with the document record.\n else, None\n \"\"\"\n\n ## Retrieve the database name\n db = db if db else model.__db_name__\n\n ## Retrieve the collection name\n collection_name = model.__collection_name__\n\n ## Retrieve the db.collection from the mongo client\n collection = self[db][collection_name]\n\n ## Find the document in the given mongo collection\n record = collection.find_one(*args)\n\n ## On failure, return None\n if not record: return None\n\n ## Create a instance of type model\n ret = model(db=db, **record)\n\n ## Set the connection details in the new instance.\n ret.__set_collection__(self)\n\n return ret\n\n def find(self, model, *args, db=None):\n\n \"\"\" Retrive a list of records from the database that match the provided args.\n\n Arguments:\n model - The type of the model being requested\n *args - Arguments to be sent to MongoCollection.find()\n db - Override the model's defined db location\n\n Returns:\n If successfull, returns a list of instance models populated with the document records.\n else, an empty list.\n \"\"\"\n\n ret = []\n\n ## Retrieve the database name\n db = db if db else model.__db_name__\n\n ## Retrieve the collection name\n collection_name = model.__collection_name__\n\n ## Retrieve the db.collection from the mongo client\n collection = self[db][collection_name]\n\n ## Find the document in the given mongo collection\n records = collection.find(*args)\n\n ## Loop through each result, create an instance and add to return\n for record in records:\n\n ## Create a instance of type model\n instance = model(db=db, **record)\n\n ## Set the connection details in the new instance.\n instance.__set_collection__(self)\n\n ## Add to the return list\n ret.append(instance)\n\n return ret\n\n def add(self, model):\n\n \"\"\" Associate the given model to the mongo database.\n\n This function does not save the data, it only associates the model.\n To save call model.save()\n \"\"\"\n\n ## Set the connection details in the new instance.\n model.__set_collection__(self)\n\n", "repo_name": "en0/smorm", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 2939, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "24373526302", "text": "#!/usr/bin/env pipenv run python\n\"\"\"Solutions to day 2 of Advent of Code\"\"\"\n\nfrom collections import defaultdict, Counter\nfrom get_input import get_input\n\ndef part1(lines):\n \"\"\"Solution to part 1\"\"\"\n checksum = defaultdict(int)\n for line in lines:\n digits = Counter(list(line))\n for i in range(2, 5):\n if i in digits.values():\n checksum[i] += 1\n result = 1\n for num in checksum.values():\n result *= num\n return result\n\n\ndef part2(lines):\n \"\"\"Solution to part 2\"\"\"\n for h_index, head in enumerate(lines):\n for tail in lines[:h_index]:\n common_letters = ''.join(\n h for h, t in zip(list(head), list(tail)) if h == t)\n if len(common_letters) + 1 == len(head):\n return common_letters\n raise Exception(\"Could not find soltution\")\n\nif __name__ == '__main__':\n LINES = get_input(day=2, year=2018).splitlines()\n print(\"Part 1: {}\".format(part1(LINES)))\n print(\"Part 2: {}\".format(part2(LINES)))\n", "repo_name": "ThomasZumsteg/adventofcode2018", "sub_path": "day02.py", "file_name": "day02.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 9, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 11, "usage_type": "call"}, {"api_name": "get_input.get_input", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "14873546602", "text": "#%%\ndef add_category_lists_sv20classifer(int_dict,prediction,Detached_list,Resonant_list,Scattering_list,Classical_list):\n# Takes in a dictionary of class names.\n# Takes in a prediction (probabilities that an object belongs to each class).\n# Takes in lists of probabilities assigned to membership of each class, for each prior object.\n# Sorts out the probabilities and assigns them to the correct list,\n# ie, Classical probability to the Classical list, Resonant probability to the Resonant list, etc.\n if int_dict[0] == 'Detached':\n Detached_list.append(prediction[0][0])\n elif int_dict[0] == 'Resonant':\n Resonant_list.append(prediction[0][0])\n elif int_dict[0] == 'Scattering':\n Scattering_list.append(prediction[0][0])\n elif int_dict[0] == 'Classical':\n Classical_list.append(prediction[0][0])\n else:\n raise Exception('Unknown format in int_dict.',int_dict)\n if int_dict[1] == 'Detached':\n Detached_list.append(prediction[0][1])\n elif int_dict[1] == 'Resonant':\n Resonant_list.append(prediction[0][1])\n elif int_dict[1] == 'Scattering':\n Scattering_list.append(prediction[0][1])\n elif int_dict[1] == 'Classical':\n Classical_list.append(prediction[0][1])\n else:\n raise Exception('Unknown format in int_dict.',int_dict)\n if int_dict[2] == 'Detached':\n Detached_list.append(prediction[0][2])\n elif int_dict[2] == 'Resonant':\n Resonant_list.append(prediction[0][2])\n elif int_dict[2] == 'Scattering':\n Scattering_list.append(prediction[0][2])\n elif int_dict[2] == 'Classical':\n Classical_list.append(prediction[0][2])\n else:\n raise Exception('Unknown format in int_dict.',int_dict)\n if int_dict[3] == 'Detached':\n Detached_list.append(prediction[0][3])\n elif int_dict[3] == 'Resonant':\n Resonant_list.append(prediction[0][3])\n elif int_dict[3] == 'Scattering':\n Scattering_list.append(prediction[0][3])\n elif int_dict[3] == 'Classical':\n Classical_list.append(prediction[0][3])\n else:\n raise Exception('Unknown format in int_dict.',int_dict)\n return Detached_list,Resonant_list,Scattering_list,Classical_list\n#%%\ndef datestr(datevec):\n # datevec should be [yyyy,mm,dd,hh,mm,ss]\n datestr = ''\n for i in range(len(datevec)-1):\n thisstr = str(datevec[i])\n if len(thisstr) == 1:\n thisstr = '0' + thisstr\n if i == 0:\n datestr = datestr + thisstr\n else:\n datestr = datestr + '_' + thisstr\n thisstr = str(datevec[-1])\n if datevec[-1] < 10:\n thisstr = '0' + thisstr\n datestr = datestr + '_' + thisstr\n return datestr\n#%%\ndef get_GMdict():\n GM_sun = 1\n GM_mercury = 1/6023600\n GM_venus = 1/408523.71\n GM_earthmoon = 1/328900.56\n GM_mars = 1/3098708\n GM_jupiter = 1/1047.3486\n GM_saturn = 1/3497.898\n GM_uranus = 1/22902.98\n GM_neptune = 1/19412.24\n GMdict = {'sun':GM_sun,'mercury':GM_mercury,'venus':GM_venus,\\\n 'earth':GM_earthmoon,'mars':GM_mars,'jupiter':GM_jupiter,\\\n 'saturn':GM_saturn,'uranus':GM_uranus,'neptune':GM_neptune}\n return GMdict\n#%%\ndef get_sv20classifier():\n# Machine learning classifier for Kuiper belt objects. Code is unchanged from\n# code distributed with 'Machine learning classification of Kuiper belt populations'.\n# Smullen/Volk 2020 (sv20), MNRAS 497:2, September 2020, pg 1391-1403,\n# https://doi.org/10.1093/mnras/staa1935\n# Code is found at https://github.com/rsmullen/KBO_Classifier\n import pandas as pd\n from sklearn.model_selection import train_test_split\n from sklearn.ensemble import GradientBoostingClassifier\n training_file = '00_KBO_features.csv'\n all_KBOs = pd.read_csv(training_file,skipinitialspace=True)\n secure_KBOs = all_KBOs[all_KBOs['Securely Classified']==True]\n all_types = list(set(secure_KBOs['Class']))\n types_dict = { all_types[i] : i for i in range( len(all_types) ) }\n int_dict = { i : all_types[i] for i in range( len(all_types) ) }\n classes = secure_KBOs['Class'].map(types_dict)\n features_train, features_test, classes_train, classes_test = train_test_split(secure_KBOs, classes, test_size=0.3, random_state=30)\n features_train.drop(['MPC ID', 'Securely Classified', 'Class'], axis=1, inplace=True)\n features_train = features_train.to_numpy()\n features_test.drop(['MPC ID', 'Securely Classified', 'Class'], axis=1, inplace=True)\n features_test = features_test.to_numpy()\n classifier = GradientBoostingClassifier( learning_rate=0.1, loss='deviance', max_depth=3, max_features='log2', n_estimators=130, random_state=30 )\n classifier.fit(features_train, classes_train)\n return classifier, int_dict, types_dict\n#%%\ndef parsedata_sv20classifier(data,classifier,int_dict):\n# This function computes the necessary features to classify a KBO.\n# Data MUST be a 101 row x 6 column array.\n# Columns are t, a, e, i, Omega, omega.\n# Rows are different time outputs: MUST be 1000yr outputs, ie [0, 1E3, 2E3....99E3,100E3].\n# Returns features for classification.\n# This code is unchanged from sv20, ie\n# Smullen, Rachel A., and Kathryn Volk.\n# 'Machine learning classification of Kuiper belt populations.'\n# Monthly Notices of the Royal Astronomical Society 497.2 (2020): 1391-1403.\n# The classification simulation and data parsing is copied from sv20 code with\n# minimal changes to not have to query Horizons through Rebound when setting up a sim,\n# because Rebound's Horizons query is very slow.\n import numpy as np\n # Take stats of simulations.\n initials = data[0,1:] # a, e, i, Omega, omega\n finals = data[-1,1:]\n mins = np.amin(data[:,1:],axis = 0)\n maxes = np.amax(data[:,1:],axis = 0)\n dels = maxes-mins\n means = np.mean(data[:,1:],axis = 0)\n stdev = np.std(data[:,1:],axis = 0)\n # Take time derivatives.\n diffs = data[1:,:]-data[:-1,:]\n dxdt = diffs[:,1:]/diffs[:,0, np.newaxis] # Add on new axis to time to give same dimensionality as the numerator.\n mindxdt = np.amin(dxdt,axis = 0)\n meandxdt = np.mean(dxdt,axis = 0)\n maxdxdt = np.amax(dxdt,axis = 0)\n deldxdt = maxdxdt-mindxdt\n # Rearrange data into the order we want.\n arrs = [initials,finals,mins,means,maxes,stdev,dels,mindxdt,meandxdt,maxdxdt,deldxdt]\n inds = [0,1,2,3,4] # a, e, i, Omega, omega\n features = []\n ## Features contains all x values, then all y, etc: xi, xf, xmin, xmean, xmax, xsigma, Deltax, xdotmin, xdotmean, xdotmax.\n for i in inds:\n for a in arrs:\n features += [a[i]]\n features_out = np.array(features).reshape(1,-1) # Make sure features is a 2d array.\n prediction = classifier.predict_proba(features_out) # Predict the probabilities of class membership for object.\n if np.max(prediction) == prediction[0][0]:\n category = int_dict[0]\n elif np.max(prediction) == prediction[0][1]:\n category = int_dict[1]\n elif np.max(prediction) == prediction[0][2]:\n category = int_dict[2]\n elif np.max(prediction) == prediction[0][3]:\n category = int_dict[3]\n return category,prediction\n#%%\n# This file classifies all the clones of a single Kuiper belt object.\nTHIS_INSTANCE = 1\nimport time\nimport numpy as np\nimport pandas as pd\nimport rebound\nt0 = time.time()\n# Prime the machine learning classifier.\nclassifier, int_dict, types_dict = get_sv20classifier()\n# Get list of objects to classify.\ndate = '20221012'\nyear = 2022\nmonth = 1\nday = 1\nhour = 0\nminute = 0\nsecond = 0\ndatestr = datestr([year,month,day,hour,minute,second])\nelements_file = 'horizons_barycentric_nom_noclones_' + date + '_' + datestr + '.csv'\ndf = pd.read_csv(elements_file)\ndes_list = df['packed_designation'].tolist()\ndes = des_list[THIS_INSTANCE-1]\n# Read heliocentric orbital elements of clones.\nclones_input_file = 'clones_' + des + '.csv'\ndf = pd.read_csv(clones_input_file)\nNclones = df.shape[0]\nePh = df['ePh'].tolist() # eccentricity, Plutino, heliocentric\nqPh_au = df['qPh_au'].tolist() # perihelion distance, Plutino, heliocentric, au\ntpPh_jd = df['tpPh_jd'].tolist() # time of perihelion passage, Plutino, heliocentric, Julian date TDB\nWPh_deg = df['WPh_deg'].tolist() # longitude of ascending node, Plutino, heliocentric, degrees\nwPh_deg = df['wPh_deg'].tolist() # argument of perihelion, Plutino, heliocentric, degrees\niPh_deg = df['iPh_deg'].tolist() # inclination, Plutino, heliocentric, degrees\n# Read heliocentric orbital elements of planets.\nplanets_file = 'planets_for_' + des + '.csv'\ndf = pd.read_csv(planets_file)\nepochP = df['epochP_jd'][0] # epoch of orbital elements, Julian date\neJh = df['eJh'][0] # Jupiter\nqJh_au = df['qJh_au'][0]\ntpJh_jd = df['tpJh_jd'][0]\nWJh_deg = df['WJh_deg'][0]\nwJh_deg = df['wJh_deg'][0]\niJh_deg = df['iJh_deg'][0]\neSh = df['eSh'][0] # Saturn\nqSh_au = df['qSh_au'][0]\ntpSh_jd = df['tpSh_jd'][0]\nWSh_deg = df['WSh_deg'][0]\nwSh_deg = df['wSh_deg'][0]\niSh_deg = df['iSh_deg'][0]\neUh = df['eUh'][0] # Uranus\nqUh_au = df['qUh_au'][0]\ntpUh_jd = df['tpUh_jd'][0]\nWUh_deg = df['WUh_deg'][0]\nwUh_deg = df['wUh_deg'][0]\niUh_deg = df['iUh_deg'][0]\neNh = df['eNh'][0] # Neptune\nqNh_au = df['qNh_au'][0]\ntpNh_jd = df['tpNh_jd'][0]\nWNh_deg = df['WNh_deg'][0]\nwNh_deg = df['wNh_deg'][0]\niNh_deg = df['iNh_deg'][0]\n# Get masses of outer planets.\nGMdict = get_GMdict()\n# Create lists to store probabilities of class membership for each clone.\nclone_class_list = []\nclassical_list = []\nresonant_list = []\nscattering_list = []\ndetached_list = []\n# Run a Rebound simulation for each clone and classify it.\nfor iclone in range(Nclones):\n sim = rebound.Simulation()\n sim.add(m = 1,hash = '0')\n sim.integrator = 'ias15'\n epochobj = epochP\n # Build simulation, outer planets first.\n eobj = eJh # Jupiter\n qobj = qJh_au\n tpobj = tpJh_jd\n Wobj = np.radians(np.mod(WJh_deg,360))\n wobj = np.radians(np.mod(wJh_deg,360))\n iobj = np.radians(iJh_deg)\n aobj = qobj/(1-eobj)\n dt = epochobj - tpobj # time since pericenter passage in days\n dt = dt * (2*np.pi/365.25) # convert days to yr/(2pi)\n n = np.sqrt(1/aobj**3) # radians / (yr/2pi)\n Mobj = np.mod(n*dt,2*np.pi) # radians\n sim.add(primary=sim.particles[0],m=GMdict['jupiter'],hash='jupiter',\\\n a=aobj,e=eobj,inc=iobj,omega=wobj,Omega=Wobj,M=Mobj)\n eobj = eSh # Saturn\n qobj = qSh_au\n tpobj = tpSh_jd\n Wobj = np.radians(np.mod(WSh_deg,360))\n wobj = np.radians(np.mod(wSh_deg,360))\n iobj = np.radians(iSh_deg)\n aobj = qobj/(1-eobj)\n dt = epochobj - tpobj # time since pericenter passage in days\n dt = dt * (2*np.pi/365.25) # convert days to yr/(2pi)\n n = np.sqrt(1/aobj**3) # radians / (yr/2pi)\n Mobj = np.mod(n*dt,2*np.pi) # radians\n sim.add(primary=sim.particles[0],m=GMdict['saturn'],hash='saturn',\\\n a=aobj,e=eobj,inc=iobj,omega=wobj,Omega=Wobj,M=Mobj)\n eobj = eUh # Uranus\n qobj = qUh_au\n tpobj = tpUh_jd\n Wobj = np.radians(np.mod(WUh_deg,360))\n wobj = np.radians(np.mod(wUh_deg,360))\n iobj = np.radians(iUh_deg)\n aobj = qobj/(1-eobj)\n dt = epochobj - tpobj # time since pericenter passage in days\n dt = dt * (2*np.pi/365.25) # convert days to yr/(2pi)\n n = np.sqrt(1/aobj**3) # radians / (yr/2pi)\n Mobj = np.mod(n*dt,2*np.pi) # radians\n sim.add(primary=sim.particles[0],m=GMdict['uranus'],hash='uranus',\\\n a=aobj,e=eobj,inc=iobj,omega=wobj,Omega=Wobj,M=Mobj)\n eobj = eNh # Neptune\n qobj = qNh_au\n tpobj = tpNh_jd\n Wobj = np.radians(np.mod(WNh_deg,360))\n wobj = np.radians(np.mod(wNh_deg,360))\n iobj = np.radians(iNh_deg)\n aobj = qobj/(1-eobj)\n dt = epochobj - tpobj # time since pericenter passage in days\n dt = dt * (2*np.pi/365.25) # convert days to yr/(2pi)\n n = np.sqrt(1/aobj**3) # radians / (yr/2pi)\n Mobj = np.mod(n*dt,2*np.pi) # radians\n sim.add(primary=sim.particles[0],m=GMdict['neptune'],hash='neptune',\\\n a=aobj,e=eobj,inc=iobj,omega=wobj,Omega=Wobj,M=Mobj)\n # Add the Plutino clone.\n eobj = ePh[iclone]\n qobj = qPh_au[iclone]\n tpobj = tpPh_jd[iclone]\n Wobj = np.radians(np.mod(WPh_deg[iclone],360))\n wobj = np.radians(np.mod(wPh_deg[iclone],360))\n iobj = np.radians(iPh_deg[iclone])\n aobj = qobj/(1-eobj)\n dt = epochobj - tpobj # time since pericenter passage in days\n dt = dt * (2*np.pi/365.25) # convert days to yr/(2pi)\n n = np.sqrt(1/aobj**3) # radians / (yr/2pi)\n Mobj = np.mod(n*dt,2*np.pi) # radians\n sim.add(primary=sim.particles[0],m=0,hash='plutino',\\\n a=aobj,e=eobj,inc=iobj,omega=wobj,Omega=Wobj,M=Mobj)\n # Prepare to integrate simulation.\n sim.N_active = 5\n sim.move_to_com()\n time_outs = np.linspace(0,100E3,101)*2*np.pi # 100 kyr\n data = []\n for i,t in enumerate(time_outs):\n if t>0:\n sim.move_to_com()\n # Integrate to next output.\n sim.integrate(t, exact_finish_time=True)\n orbits = sim.calculate_orbits(primary=sim.calculate_com())\n o = orbits[-1] # take KBO\n # Save t, a, e, i, Omega, omega. Time in data needs to be in years, so divide by 2pi.\n step = np.array([t/2/np.pi, o.a, o.e, np.degrees(o.inc), np.degrees(o.Omega)%360, np.degrees(o.omega)%360])\n # Add step to data.\n if len(data)==0: data = step\n else: data = np.vstack((data,step))\n # Release memory so we don't accidentally keep integrating the same sim with a different clone.\n sim = None\n category,prediction = parsedata_sv20classifier(data,classifier,int_dict)\n clone_class_list.append(category)\n detached_list,resonant_list,scattering_list,classical_list = \\\n add_category_lists_sv20classifer(int_dict,prediction,detached_list,\\\n resonant_list,scattering_list,classical_list)\n# Make new, separate file of clone class lists.\ndf = pd.DataFrame()\ndf['category'] = clone_class_list\ndf['classical_probability'] = classical_list\ndf['resonant_probability'] = resonant_list\ndf['scattering_probability'] = scattering_list\ndf['detached_probability'] = detached_list\nclones_output_file = 'class_lists_' + des + '.csv'\ndf.to_csv(clones_output_file,index=False)\nt1 = time.time()\nelapsed_time_hours = (t1-t0)/3600\nprint('took',elapsed_time_hours,'hrs',THIS_INSTANCE,des)\n", "repo_name": "iwygh/mmk23a", "sub_path": "03a_classify_clones_template.py", "file_name": "03a_classify_clones_template.py", "file_ext": "py", "file_size_in_byte": 14275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 92, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.amin", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 152, "usage_type": "call"}, {"api_name": "time.time", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 180, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 190, "usage_type": "call"}, {"api_name": "rebound.Simulation", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 241, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 254, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 278, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 280, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 280, "usage_type": "attribute"}, {"api_name": "numpy.radians", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 292, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 294, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 310, "usage_type": "attribute"}, {"api_name": "numpy.degrees", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 313, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 322, "usage_type": "call"}, {"api_name": "time.time", "line_number": 330, "usage_type": "call"}]} +{"seq_id": "17291383048", "text": "\nimport select\nimport sys\nimport os\nimport threading\nfrom importlib import util\nfrom apphead import AppHeader\n\nclass MyServer:\n \"\"\" Class to encapsulate the server functions \"\"\"\n def __init__(self, hostname=''):\n self.hostname = hostname\n\n self.entradas = [sys.stdin]\n self.connections = {}\n self.connections_lock = threading.Lock()\n\n self.applications = {}\n\n self.quit_flag = False\n\n def __enter__(self):\n \"\"\" When the server is initialized, the internal sockets must be configured \"\"\"\n for entry in os.scandir():\n if entry.is_file():\n if entry.name != \"apphead.py\" and entry.name.startswith(\"app\"):\n port_num = entry.name.split(\".\")[0][4:]\n self.set_application(port_num, entry.name, entry.path)\n print(f\"Server started, {len(self.applications)} applications are available.\")\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\" When the server is finishing, the internal sockets must be closed \"\"\"\n for app in self.applications.values():\n app.close_connection()\n if app.is_running():\n app.stop(5)\n\n def start(self, app_name=\"app\", app_path=\"\"):\n \"\"\" method to start the server \"\"\"\n self.update(False)\n for port, app in self.applications.items():\n if app.is_executable():\n self._run_app(port, app);\n while not self.quit_flag:\n reciever_list, _, _ = select.select(self.entradas, [], [])\n for reciever in reciever_list:\n if reciever == sys.stdin:\n self._answer_commands()\n else:\n self._attend_client(reciever)\n\n def set_application(self, port_num, app_name, app_path):\n \"\"\" sets the application on a specified port \"\"\"\n if isinstance(port_num, str):\n port_num = int(port_num) # posso fazer isso ?\n self.applications[port_num] = AppHeader(port_num, app_name, app_path)\n self.applications[port_num].configure_socket(self.hostname)\n self.entradas.append(self.applications[port_num].app_socket)\n\n def update(self, verbose=True):\n \"\"\"\n in case the app() method has been redefined, the server can update itself to run the new app\n without having to close the socket that it currently reserves\n \"\"\"\n for app in self.applications.values():\n app.setup(MyServer.load_module(app.name, app.path, piece=\"App\"))\n if verbose:\n print(\"The server has been successfully updated.\")\n\n def _attend_client(self, socket):\n cliente = threading.Thread(target=self._method_caller, args=socket.accept())\n cliente.setDaemon(True)\n cliente.start()\n\n def _run_app(self, portnum, apphead):\n apphead.thread_obj = threading.Thread(target=apphead.obj.main)\n apphead.start()\n print(f\"Application on port {portnum} is now running.\")\n\n def _answer_commands(self):\n cmd = input()\n if cmd == \"exit\":\n if self.connections:\n print(\"Failed to exit the server: there are open connections\")\n else: \n self.quit_flag = True\n elif cmd == \"forceexit\":\n for connection in self.connections.copy():\n connection.close()\n self._unregister_connection(connection)\n self.quit_flag = True\n elif cmd == \"hist\":\n print(str(self.connections.values()))\n elif cmd == \"update\":\n self.update()\n elif cmd == \"ports\":\n print(list(self.applications.keys()))\n elif cmd.startswith(\"run\"):\n pcmd = cmd.split(\" \")\n if self.applications[int(pcmd[1])].is_running():\n print(f\"Requested application is already running.\")\n else:\n self._run_app(pcmd[1], self.applications[int(pcmd[1])])\n elif cmd.startswith(\"stop\"):\n pcmd = cmd.split(\" \")\n if self.applications[int(pcmd[1])].is_running():\n self.applications[int(pcmd[1])].stop()\n print(f\"Application on port {pcmd[1]} was stopped.\")\n else:\n print(f\"Requested application is not running.\")\n elif cmd == \"apps\":\n i = 0\n for app in self.applications.values():\n if app.is_running():\n i += 1\n print(f\"There are currently {i} apps running on this server.\")\n\n def _register_connection(self, conn, addr):\n self.connections_lock.acquire()\n self.connections[conn] = addr\n self.connections_lock.release()\n print(\"Accepted connection from: \", addr)\n\n def _unregister_connection(self, conn):\n self.connections_lock.acquire()\n del self.connections[conn]\n self.connections_lock.release()\n\n def _recvall(self, sock, length):\n buff = bytearray(length)\n pos = 0\n while pos < length:\n read = sock.recv_into(memoryview(buff)[pos:])\n if read == 0:\n raise EOFError\n pos += read\n return buff\n\n def _recv_ott(self, sock):\n size = int.from_bytes(self._recvall(sock, 2), \"big\")\n #size = ord(self._recvall(sock, 1))\n return self._recvall(sock, size)\n\n def _method_caller(self, conn, addr):\n self._register_connection(conn, addr)\n sock_id = conn.getsockname()[1]\n while True:\n data = bytearray()\n try:\n #data = conn.recv(self.applications[sock_id].obj.read_buffer())\n data = self._recv_ott(conn)\n except EOFError:\n print(str(addr) + \" session has ended.\")\n self._unregister_connection(conn)\n conn.close()\n sys.exit()\n response_data = self.applications[sock_id].obj.process(data.decode(\"utf-8\"), addr)\n if isinstance(response_data, str):\n response_data = response_data.encode('utf-8')\n conn.sendall(response_data)\n\n @staticmethod\n def load_module(module_name, module_path, piece=None):\n \"\"\" Method used for dinamic module importing \"\"\"\n # print(f\"Loading module {module_name}\")\n spec = util.spec_from_file_location(module_name, module_path)\n module = util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n\n if piece:\n return getattr(module, piece)\n", "repo_name": "Delpreti/lightchat", "sub_path": "server/myserver.py", "file_name": "myserver.py", "file_ext": "py", "file_size_in_byte": 6592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sys.stdin", "line_number": 14, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 16, "usage_type": "call"}, {"api_name": "os.scandir", "line_number": 24, "usage_type": "call"}, {"api_name": "select.select", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 48, "usage_type": "attribute"}, {"api_name": "apphead.AppHeader", "line_number": 57, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 72, "usage_type": "call"}, {"api_name": "apphead.thread_obj", "line_number": 77, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 77, "usage_type": "call"}, {"api_name": "apphead.obj", "line_number": 77, "usage_type": "attribute"}, {"api_name": "apphead.start", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 157, "usage_type": "call"}, {"api_name": "importlib.util.spec_from_file_location", "line_number": 167, "usage_type": "call"}, {"api_name": "importlib.util", "line_number": 167, "usage_type": "name"}, {"api_name": "importlib.util.module_from_spec", "line_number": 168, "usage_type": "call"}, {"api_name": "importlib.util", "line_number": 168, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 169, "usage_type": "attribute"}]} +{"seq_id": "32162835735", "text": "#! -*- coding: UTF-8 -*-\n\nimport logging\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom skee_t.db import DbEngine\nfrom skee_t.db.models import WxAccessToken\nfrom skee_t.services import BaseService\n\n__author__ = 'rensikun'\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass wxAccessTokenService(BaseService):\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n pass\n\n def add(self, uuid, access_token, expires_in):\n \"\"\"\n 创建用户方法\n :param dict_args:Map类型的参数,封装了由前端传来的用户信息\n :return:\n \"\"\"\n wxAccessToken = WxAccessToken(\n uuid=uuid,\n access_token=access_token,\n expires_in=expires_in\n )\n\n session = None\n rst_code = 0\n rst_desc = 'success'\n try:\n session = DbEngine.get_session_simple()\n session.add(wxAccessToken)\n session.commit()\n except Exception as e:\n LOG.exception(\"Create user information error.\")\n rst_code = '999999'\n rst_desc = e.message\n if session is not None:\n session.rollback()\n finally:\n session.close()\n return {'rst_code':rst_code, 'rst_desc':rst_desc}\n\n def query(self, state):\n \"\"\"\n 创建用户方法\n :param dict_args:Map类型的参数,封装了由前端传来的用户信息\n :return:\n \"\"\"\n session = None\n rst_code = 0\n rst_desc = 'success'\n\n try:\n session = DbEngine.get_session_simple()\n return session.query(WxAccessToken) \\\n .filter(WxAccessToken.state == state)\\\n .order_by(WxAccessToken.entry_time.desc()).first()\n except NoResultFound as e:\n LOG.exception(\"access token error.\")\n return None\n except (TypeError, Exception) as e:\n LOG.exception(\"List SkiResort information error.\")\n # 数据库异常\n rst_code = '999999'\n rst_desc = e.message\n finally:\n session.close()\n return {'rst_code': rst_code, 'rst_desc': rst_desc}\n\n def update(self, uuid, state):\n \"\"\"\n 创建用户方法\n :param dict_args:Map类型的参数,封装了由前端传来的用户信息\n :return:\n \"\"\"\n session = None\n rst_code = 0\n rst_desc = 'success'\n\n try:\n session = DbEngine.get_session_simple()\n session.query(WxAccessToken)\\\n .filter(WxAccessToken.uuid == uuid)\\\n .update({WxAccessToken.state:state}\n ,synchronize_session=False\n )\n session.commit()\n except NoResultFound as e:\n LOG.exception(\"get_user_auth_info error.\")\n return None\n except (TypeError, Exception) as e:\n LOG.exception(\"List SkiResort information error.\")\n # 数据库异常\n rst_code = '999999'\n rst_desc = e.message\n finally:\n session.close()\n return {'rst_code': rst_code, 'rst_desc': rst_desc}", "repo_name": "skee-t/backend", "sub_path": "skee_t/wx/services/accessToken.py", "file_name": "accessToken.py", "file_ext": "py", "file_size_in_byte": 3178, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "skee_t.services.BaseService", "line_number": 17, "usage_type": "name"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 31, "usage_type": "call"}, {"api_name": "skee_t.db.DbEngine.get_session_simple", "line_number": 41, "usage_type": "call"}, {"api_name": "skee_t.db.DbEngine", "line_number": 41, "usage_type": "name"}, {"api_name": "skee_t.db.DbEngine.get_session_simple", "line_number": 65, "usage_type": "call"}, {"api_name": "skee_t.db.DbEngine", "line_number": 65, "usage_type": "name"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 66, "usage_type": "argument"}, {"api_name": "skee_t.db.models.WxAccessToken.state", "line_number": 67, "usage_type": "attribute"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 67, "usage_type": "name"}, {"api_name": "skee_t.db.models.WxAccessToken.entry_time.desc", "line_number": 68, "usage_type": "call"}, {"api_name": "skee_t.db.models.WxAccessToken.entry_time", "line_number": 68, "usage_type": "attribute"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 68, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 69, "usage_type": "name"}, {"api_name": "skee_t.db.DbEngine.get_session_simple", "line_number": 92, "usage_type": "call"}, {"api_name": "skee_t.db.DbEngine", "line_number": 92, "usage_type": "name"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 93, "usage_type": "argument"}, {"api_name": "skee_t.db.models.WxAccessToken.uuid", "line_number": 94, "usage_type": "attribute"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 94, "usage_type": "name"}, {"api_name": "skee_t.db.models.WxAccessToken.state", "line_number": 95, "usage_type": "attribute"}, {"api_name": "skee_t.db.models.WxAccessToken", "line_number": 95, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "12343469784", "text": "from odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\nfrom datetime import timedelta\nimport pytz\n\n\nclass ProjectTask(models.Model):\n _inherit = 'project.task'\n\n show_init_task = fields.Boolean(\n string='Show initiate task button', compute='_compute_show_init_task')\n employee_id = fields.Many2one(\n string='Employee', comodel_name='hr.employee')\n\n def _compute_show_init_task(self):\n for task in self:\n if not task.timesheet_ids:\n task.show_init_task = True\n else:\n found = task.timesheet_ids.filtered(\n lambda x: x.employee_id == (\n self.employee_id) and x.time_stop == x.time_start)\n if found:\n task.show_init_task = False\n else:\n task.show_init_task = True\n\n def action_button_initiate_task(self):\n self.ensure_one()\n timezone = pytz.timezone(self._context.get('tz') or 'UTC')\n date_start = fields.Datetime.now()\n date_start = date_start.replace(tzinfo=pytz.timezone(\n 'UTC')).astimezone(timezone)\n hours = int(date_start.strftime(\"%H\"))\n minutes = int(date_start.strftime(\"%M\"))\n initiate_timesheet_vals = {\n 'name': self.name,\n 'task_id': self.id,\n 'project_id': self.project_id.id,\n 'date': date_start.date(),\n 'time_start': round(hours + minutes/60, 14),\n 'time_stop': round(hours + minutes/60, 14)\n }\n if self.user_id:\n cond = [('user_id', '=', self.user_id.id)]\n self.employee_id = self.env['hr.employee'].search(cond, limit=1).id\n if not self.employee_id:\n raise UserError(_(\n 'Employee not found for user: {}').format(\n self.user_id.name))\n initiate_timesheet_vals.update({\n 'user_id': self.user_id.id,\n 'employee_id': self.employee_id.id})\n return self.env['account.analytic.line'].create(\n initiate_timesheet_vals)\n\n def action_button_end_task(self):\n self.ensure_one()\n lines = self.timesheet_ids.filtered(\n lambda x: x.employee_id == self.employee_id and (\n x.time_stop == x.time_start))\n for line in lines:\n timezone = pytz.timezone(self._context.get('tz') or 'UTC')\n date_end = fields.Datetime.now()\n date_end = date_end.replace(tzinfo=pytz.timezone(\n 'UTC')).astimezone(timezone)\n hours = int(date_end.strftime(\"%H\"))\n minutes = int(date_end.strftime(\"%M\"))\n stop = hours + minutes/60\n stop1 = timedelta(hours=stop)\n start = timedelta(hours=line.time_start)\n if stop1 > start:\n unit_amount = (stop1 - start).seconds / 3600\n vals = {'time_stop': round(stop, 14),\n 'unit_amount': unit_amount}\n line.write(vals)\n", "repo_name": "avanzosc/hr-addons", "sub_path": "hr_timesheet_usability/models/project_task.py", "file_name": "project_task.py", "file_ext": "py", "file_size_in_byte": 3074, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "odoo.models.Model", "line_number": 7, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 7, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 10, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 10, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 12, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 12, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 30, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 31, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 32, "usage_type": "call"}, {"api_name": "odoo.exceptions.UserError", "line_number": 48, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 48, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 63, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 64, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 64, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "42479607125", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom utils.PropertiesReader import PropertiesReader\n\n\nclass Environnement:\n\n def __init__(self, w, h, SMA):\n self.w = w\n self.h = h\n self.grille2d = [[None for x in range(self.w)] for y in range(self.h)]\n self.SMA = SMA\n self.is_torrique = PropertiesReader.prop.toric()\n\n def set_agent(self, agent):\n self.grille2d[agent.y][agent.x] = agent\n\n def delete_agent(self, agent):\n if agent.previous_y or agent.previous_x:\n self.grille2d[agent.previous_y][agent.previous_x] = None\n\n def get_grille(self):\n return self.grille2d\n\n def is_their_a_collision(self, x, y):\n \"\"\"\n Methode pour voir si il y a une quelquonque collision\n :param agent:\n :param x: prochaine position x de l'agent\n :param y: prochaine position y de l'agent\n :return: un agent percute, soit un mur traverse, soit rien\n \"\"\"\n\n if y < 0 or x < 0 or x > self.w-1 or y > self.h-1:\n # si il y a collision avec un mur\n return self.wall_collision_direction(x, y, self.w-1, self.h-1)\n\n potential_agent = self.grille2d[y][x]\n if potential_agent:\n # si il y a collision avec un autre agent, on renvois cet agent\n return potential_agent\n\n @staticmethod\n def wall_collision_direction(x, y, max_x, max_y):\n \"\"\"\n\n :param x: prochaine position x de l'agent\n :param y: prochaine position y de l'agent\n :param max_x: borne max de x\n :param max_y: borne max de y\n :return: un tuple de boolean pour dire quels axes on inverse (x, y, ou les deux)\n \"\"\"\n\n inverse_x_dir = False\n inverse_y_dir = False\n if y < 0 or y > max_y:\n # murhautbas\n inverse_y_dir = True\n if x < 0 or x > max_x:\n # murscote\n inverse_x_dir = True\n\n return (inverse_x_dir, inverse_y_dir)\n", "repo_name": "FlorianDoublet/SMA-discret", "sub_path": "model/core/Environnement.py", "file_name": "Environnement.py", "file_ext": "py", "file_size_in_byte": 1975, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.PropertiesReader.PropertiesReader.prop.toric", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.PropertiesReader.PropertiesReader.prop", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.PropertiesReader.PropertiesReader", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "12297494888", "text": "import json\nimport sys\n\nfrom pyxact import serialize_json\nfrom pyxact import loggingdb\nimport example_schema, utils\n\nrecord_json = json.dumps(example_schema.test_transaction1.transaction,\n cls=serialize_json.PyxactEncoder,\n indent=4)\nrecordlist_json = json.dumps(example_schema.test_transaction1.journal_list,\n cls=serialize_json.PyxactEncoder,\n indent=4)\ntransaction_json = json.dumps(example_schema.test_transaction1,\n cls=serialize_json.PyxactEncoder,\n indent=4)\n\n# PyxactEncoder is a customised JSON encoder class that knows how to serialise SQLRecord,\n# SQLRecordList and SQLTransaction types into JSON.\n\ncustom_decoder = serialize_json.PyxactDecoder()\ncustom_decoder.register_sqlschema(example_schema.accounting)\ncustom_decoder.register_sqlrecordlist(example_schema.JournalList)\ncustom_decoder.register_sqltransaction(example_schema.AccountingTransaction)\n\n# PyxactDecoder can turn the output of PyxactEncoder back into Python objects, provided the relevant\n# classes have been registered with it.\n\nif __name__ == '__main__':\n\n conn = utils.process_command_line('Demonstrate usage of pyxact JSON serialisation')\n\n cursor = conn.cursor()\n example_schema.create_example_schema(cursor)\n example_schema.populate_example_schema(cursor)\n", "repo_name": "jhumphry/pyxact", "sub_path": "doc/examples/example_json.py", "file_name": "example_json.py", "file_ext": "py", "file_size_in_byte": 1411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "json.dumps", "line_number": 8, "usage_type": "call"}, {"api_name": "example_schema.test_transaction1", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json.PyxactEncoder", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json", "line_number": 9, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 11, "usage_type": "call"}, {"api_name": "example_schema.test_transaction1", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json.PyxactEncoder", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json", "line_number": 12, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "example_schema.test_transaction1", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json.PyxactEncoder", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pyxact.serialize_json", "line_number": 15, "usage_type": "name"}, {"api_name": "pyxact.serialize_json.PyxactDecoder", "line_number": 21, "usage_type": "call"}, {"api_name": "pyxact.serialize_json", "line_number": 21, "usage_type": "name"}, {"api_name": "example_schema.accounting", "line_number": 22, "usage_type": "attribute"}, {"api_name": "example_schema.JournalList", "line_number": 23, "usage_type": "attribute"}, {"api_name": "example_schema.AccountingTransaction", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.process_command_line", "line_number": 31, "usage_type": "call"}, {"api_name": "example_schema.create_example_schema", "line_number": 34, "usage_type": "call"}, {"api_name": "example_schema.populate_example_schema", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "7380293358", "text": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nfrom typing import List\n\n\n\nclass FeatureMatchingLoss(nn.Module):\n def __init__(self, loss_type = 'l1', ):\n super(FeatureMatchingLoss, self).__init__()\n self.loss_type = loss_type\n\n def forward(self, \n real_features: List[List[List[torch.Tensor]]], \n fake_features: List[List[List[torch.Tensor]]]\n ) -> torch.Tensor:\n \"\"\"\n features: a list of features of different inputs (the third layer corresponds to\n features of a separate input to each of these discriminators)\n \"\"\"\n loss = 0\n\n for real_feats_net, fake_feats_net in zip(real_features, fake_features):\n # *_feats_net corresponds to outputs of a separate discriminator\n loss_net = 0\n\n for real_feats_layer, fake_feats_layer in zip(real_feats_net, fake_feats_net):\n assert len(real_feats_layer) == 1 or len(real_feats_layer) == len(fake_feats_layer), 'Wrong number of real inputs'\n if len(real_feats_layer) == 1:\n real_feats_layer = [real_feats_layer[0]] * len(fake_feats_layer)\n\n for real_feats_layer_i, fake_feats_layer_i in zip(real_feats_layer, fake_feats_layer):\n if self.loss_type == 'l1':\n loss_net += F.l1_loss(fake_feats_layer_i, real_feats_layer_i)\n elif self.loss_type == 'l2':\n loss_net += F.mse_loss(fake_feats_layer_i, real_feats_layer_i)\n\n loss_net /= len(fake_feats_layer) # normalize by the number of inputs\n loss_net /= len(fake_feats_net) # normalize by the number of layers\n loss += loss_net\n\n loss /= len(real_features) # normalize by the number of networks\n\n return loss", "repo_name": "SamsungLabs/rome", "sub_path": "src/losses/feature_matching.py", "file_name": "feature_matching.py", "file_ext": "py", "file_size_in_byte": 1848, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 386, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.l1_loss", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "72075280805", "text": "#Librerias a utilizar:\r\nimport os\r\nimport numpy as np\r\nimport cv2\r\nimport argparse\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\ndef image_stats(image):\r\n '''Esta función de el vector de características de la imagen,\r\n es decir vuelve la media y la desviación estandar de cada canal'''\r\n \r\n (l, a, b) = cv2.split(image)\r\n (l_mean, l_std) = (l.mean(), l.std())\r\n (a_mean, a_std) = (a.mean(), a.std())\r\n (b_mean, b_std) = (b.mean(), b.std())\r\n\r\n \r\n return (l_mean, l_std, a_mean, a_std, b_mean, b_std)\r\n\r\n\r\ndef extraigo_features_dataset():\r\n grupos=[]\r\n num_clusters=14\r\n for i in range(num_clusters):\r\n path='./styleLib/'+ str(i)\r\n dirs=os.listdir(path)\r\n g=[]\r\n for img in dirs:\r\n pic=cv2.imread(os.path.join(path,img))\r\n\r\n #Paso todas las imágenes a LAB\r\n pic=cv2.cvtColor(pic, cv2.COLOR_BGR2LAB)\r\n g.append(pic)\r\n grupos.append(g)\r\n\r\n #Extraigo el vector de características promedio en cada grupo:\r\n features=[]\r\n for k in range(num_clusters): \r\n features_group=[]\r\n for img in grupos[k]:\r\n f=image_stats(img) #(l_mean, l_std, a_mean, a_std, b_mean, b_std)\r\n features_group.append(f)\r\n features_group=np.array(features_group)\r\n\r\n L_m = np.mean( features_group[:,0] )\r\n L_std= np.mean( features_group[:,1] )\r\n a_m = np.mean( features_group[:,2] )\r\n a_std= np.mean( features_group[:,3] )\r\n b_m = np.mean( features_group[:,4] )\r\n b_std= np.mean( features_group[:,5] )\r\n\r\n features.append([L_m ,L_std,a_m,a_std,b_m,b_std])\r\n return features,grupos\r\n\r\ndef color_transfer(stat_source, img):\r\n \r\n '''Realiza la transferencia de color, \r\n stat_source son las vector de característica medio\r\n img: la imagen que queremos varia el color\r\n \r\n '''\r\n \r\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype(\"float32\")\r\n\r\n # Describo características deseadas y las que tiene la imagen\r\n (l_mean_src, l_std_src, a_mean_src, a_std_src, b_mean_src, b_std_src) = stat_source\r\n (l_mean_dest, l_std_dest, a_mean_dest, a_std_dest, b_mean_dest, b_std_dest) = image_stats(img)\r\n\r\n # Resto el valor medio\r\n (l, a, b) = cv2.split(img)\r\n l -= l_mean_dest\r\n a -= a_mean_dest\r\n b -= b_mean_dest\r\n\r\n # Escalado\r\n l = (l_std_dest / l_std_src) * l\r\n a = (a_std_dest / a_std_src) * a\r\n b = (b_std_dest / b_std_src) * b\r\n\r\n # Agrego valor medio de source\r\n l += l_mean_src\r\n a += a_mean_src\r\n b += b_mean_src\r\n\r\n # Mantengo numeros entre 0 y 255\r\n l = np.clip(l, 0, 255)\r\n a = np.clip(a, 0, 255)\r\n b = np.clip(b, 0, 255)\r\n\r\n # Combino canales\r\n transfer = cv2.merge([l, a, b])\r\n #transfer = cv2.cvtColor(transfer.astype(\"uint8\"), cv2.COLOR_LAB2BGR)\r\n\r\n #Imagen modificada en color\r\n return transfer.astype(\"uint8\")\r\n\r\n\r\ndef show_image(title, image, width = 500):\r\n \r\n r = width / float(image.shape[1])\r\n dim = (width, int(image.shape[0] * r))\r\n image = cv2.cvtColor(image, cv2.COLOR_LAB2RGB)\r\n\r\n resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\r\n # Show the resized image\r\n plt.title(title)\r\n plt.imshow(resized)\r\n plt.axis('off')\r\n plt.show()\r\n \r\n \r\ndef Color_adjustment(img):\r\n #extraigo features del dataset:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype(\"float32\")\r\n \r\n features_dataset, dataset= extraigo_features_dataset() #todo en espacio color LAB\r\n\r\n features_img=image_stats(img) #features de la imagen\r\n\r\n #me fijo la feature mas cercana:\r\n features_dataset=np.array(features_dataset)\r\n\r\n features_img=np.array(features_img)\r\n dist= np.sqrt(np.sum( (features_dataset- features_img )**2, axis=1))\r\n ind=np.argmin(dist)\r\n transferred = color_transfer(features_dataset[ind] , img) \r\n return transferred\r\n \r\n \r\n \r\n \r\n \r\n", "repo_name": "tania-19/Watercolorization", "sub_path": "funciones/colorAdj.py", "file_name": "colorAdj.py", "file_ext": "py", "file_size_in_byte": 3964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.split", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2LAB", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.COLOR_LAB2RGB", "line_number": 103, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 105, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 105, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2LAB", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "16512859415", "text": "import asyncio\nimport json\nimport os\nimport signal\n\nimport pytest\nfrom mock import patch\n\nfrom i3pyblocks import blocks, core, types\nfrom i3pyblocks.blocks import basic\n\nDEFAULT_STATE = dict(\n separator=None,\n urgent=None,\n align=None,\n markup=None,\n)\n\n\n@pytest.mark.asyncio\nasync def test_runner(capsys, mock_stdin):\n class ValidPollingBlock(blocks.PollingBlock):\n def __init__(self, block_name, sleep=0.1):\n self.count = 0\n super().__init__(\n block_name=block_name,\n sleep=sleep,\n default_state=DEFAULT_STATE,\n )\n\n async def run(self):\n self.count += 1\n self.update(str(self.count))\n\n runner = core.Runner()\n\n instance_1 = ValidPollingBlock(block_name=\"instance_1\")\n instance_2 = basic.TextBlock(\"Hello!\", block_name=\"instance_2\")\n instance_3 = basic.TextBlock(\"Another hello!\", block_name=\"instance_3\")\n\n await runner.register_block(instance_1)\n await runner.register_block(instance_2)\n await runner.register_block(instance_3)\n\n await runner.start(timeout=0.5)\n\n captured = capsys.readouterr()\n\n output_lines = captured.out.split(\"\\n\")\n header = json.loads(output_lines[0])\n\n assert header == {\"version\": 1, \"click_events\": True}\n\n results = json.loads(\"\\n\".join(output_lines[1:]))\n\n for i, result in enumerate(results[:5], start=1):\n assert result == [\n {\n \"name\": \"instance_1\",\n \"instance\": str(instance_1.id),\n \"full_text\": str(i),\n },\n {\n \"name\": \"instance_2\",\n \"instance\": str(instance_2.id),\n \"full_text\": \"Hello!\",\n },\n {\n \"name\": \"instance_3\",\n \"instance\": str(instance_3.id),\n \"full_text\": \"Another hello!\",\n },\n ]\n\n assert results[5] is None\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_fault_block(capsys, mock_stdin):\n class FaultPollingBlock(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n self.count = 0\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n self.count += 1\n if self.count > 4:\n raise Exception(\"Boom!\")\n self.update(str(self.count))\n\n runner = core.Runner()\n instance = FaultPollingBlock()\n await runner.register_block(instance)\n\n await runner.start(timeout=0.5)\n\n captured = capsys.readouterr()\n\n output_lines = captured.out.split(\"\\n\")\n\n results = json.loads(\"\\n\".join(output_lines[1:]))\n\n for i, result in enumerate(results[:4], start=1):\n assert result == [\n {\n \"name\": \"FaultPollingBlock\",\n \"instance\": str(instance.id),\n \"full_text\": str(i),\n },\n ]\n\n assert results[4] == [\n {\n \"name\": \"FaultPollingBlock\",\n \"instance\": str(instance.id),\n \"full_text\": \"Exception in FaultPollingBlock: Boom!\",\n \"urgent\": True,\n }\n ]\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_signal_handler(capsys, mock_stdin):\n async def send_signal():\n await asyncio.sleep(0.1)\n os.kill(os.getpid(), signal.SIGUSR1)\n\n async def send_another_signal():\n await asyncio.sleep(0.2)\n os.kill(os.getpid(), signal.SIGUSR2)\n\n class ValidPollingBlockWithSignalHandler(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n self.count = 0\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n pass\n\n async def signal_handler(self, sig):\n if sig == signal.SIGUSR1:\n self.update(\"received_signal\")\n elif sig == signal.SIGUSR2:\n self.update(\"received_another_signal\")\n else:\n raise Exception(\"This shouldn't happen\")\n\n runner = core.Runner()\n instance = ValidPollingBlockWithSignalHandler()\n await runner.register_block(instance, signals=[signal.SIGUSR1, signal.SIGUSR2])\n\n runner.register_task(send_signal())\n runner.register_task(send_another_signal())\n\n await runner.start(timeout=0.5)\n\n captured = capsys.readouterr()\n\n output_lines = captured.out.split(\"\\n\")\n\n results = json.loads(\"\\n\".join(output_lines[1:]))\n\n assert results[0] == [\n {\n \"name\": \"ValidPollingBlockWithSignalHandler\",\n \"instance\": str(instance.id),\n \"full_text\": \"received_signal\",\n }\n ]\n\n assert results[1] == [\n {\n \"name\": \"ValidPollingBlockWithSignalHandler\",\n \"instance\": str(instance.id),\n \"full_text\": \"received_another_signal\",\n }\n ]\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_signal_handler_exception(capsys, mock_stdin):\n async def send_signal():\n await asyncio.sleep(0.1)\n os.kill(os.getpid(), signal.SIGUSR1)\n\n class InvalidPollingBlockWithSignalHandler(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n self.count = 0\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n pass\n\n async def signal_handler(self, sig):\n raise Exception(\"Boom!\")\n\n runner = core.Runner()\n instance = InvalidPollingBlockWithSignalHandler()\n await runner.register_block(instance, signals=[signal.SIGUSR1])\n\n runner.register_task(send_signal())\n\n await runner.start(timeout=0.5)\n\n result = instance.result()\n\n assert (\n result[\"full_text\"]\n == \"Exception in InvalidPollingBlockWithSignalHandler signal handler: Boom!\"\n )\n\n assert result[\"urgent\"] is True\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_click_event():\n class ValidPollingBlockWithClickHandler(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n pass\n\n async def click_handler(\n self, x, y, button, relative_x, relative_y, width, height, modifiers\n ):\n self.update(\n f\"{x}-{y}-{button}-{relative_x}-{relative_y}-{width}-{height}-{modifiers}\"\n )\n\n runner = core.Runner()\n instance = ValidPollingBlockWithClickHandler()\n await runner.register_block(instance)\n\n click_event = json.dumps(\n {\n \"name\": \"ValidPollingBlockWithClickHandler\",\n \"instance\": str(instance.id),\n \"button\": types.MouseButton.LEFT_BUTTON,\n \"modifiers\": [types.KeyModifier.ALT, types.KeyModifier.SUPER],\n \"x\": 123,\n \"y\": 456,\n \"relative_x\": 12,\n \"relative_y\": 34,\n \"width\": 20,\n \"height\": 40,\n \"extra\": \"should be ignored\",\n }\n ).encode()\n\n await runner.click_event(click_event)\n result = instance.result()\n\n assert result == {\n \"name\": \"ValidPollingBlockWithClickHandler\",\n \"instance\": str(instance.id),\n \"full_text\": \"123-456-1-12-34-20-40-['Mod1', 'Mod4']\",\n }\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_click_event_exception():\n class InvalidPollingBlockWithClickHandler(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n pass\n\n async def click_handler(\n self, x, y, button, relative_x, relative_y, width, height, modifiers\n ):\n raise Exception(\"Boom!\")\n\n runner = core.Runner()\n instance = InvalidPollingBlockWithClickHandler()\n await runner.register_block(instance)\n\n click_event = json.dumps(\n {\"name\": \"InvalidPollingBlockWithClickHandler\", \"instance\": str(instance.id)}\n ).encode()\n\n await runner.click_event(click_event)\n\n result = instance.result()\n\n assert (\n result[\"full_text\"]\n == \"Exception in InvalidPollingBlockWithClickHandler click handler: Boom!\"\n )\n\n assert result[\"urgent\"] is True\n\n\n@pytest.mark.asyncio\nasync def test_runner_with_click_events(capsys):\n class ValidPollingBlockWithClickHandler(blocks.PollingBlock):\n def __init__(self, sleep=0.1):\n super().__init__(sleep=sleep, default_state=DEFAULT_STATE)\n\n async def run(self):\n pass\n\n async def click_handler(\n self, x, y, button, relative_x, relative_y, width, height, modifiers\n ):\n self.update(\n f\"{x}-{y}-{button}-{relative_x}-{relative_y}-{width}-{height}-{modifiers}\"\n )\n\n runner = core.Runner()\n instance = ValidPollingBlockWithClickHandler()\n await runner.register_block(instance)\n\n click_event = json.dumps(\n {\n \"name\": \"ValidPollingBlockWithClickHandler\",\n \"instance\": str(instance.id),\n \"button\": 1,\n \"modifiers\": [\"Mod1\"],\n \"x\": 123,\n \"y\": 456,\n \"relative_x\": 12,\n \"relative_y\": 34,\n \"width\": 20,\n \"height\": 40,\n \"extra\": \"should be ignored\",\n }\n ).encode()\n\n mock_input = [b\"[\\n\", click_event, b\",\"]\n\n with patch(\"i3pyblocks._internal.misc.get_aio_reader\") as get_aio_reader_mock:\n reader_mock = get_aio_reader_mock.return_value\n reader_mock.readline.return_value = mock_input[0]\n reader_mock.readuntil.side_effect = mock_input[1:]\n\n await runner.start(timeout=0.5)\n\n captured = capsys.readouterr()\n\n output_lines = captured.out.split(\"\\n\")\n\n results = json.loads(\"\\n\".join(output_lines[1:]))\n\n assert results[0] == [\n {\n \"name\": \"ValidPollingBlockWithClickHandler\",\n \"instance\": str(instance.id),\n \"full_text\": \"123-456-1-12-34-20-40-['Mod1']\",\n }\n ]\n", "repo_name": "thiagokokada/i3pyblocks", "sub_path": "tests/test_core.py", "file_name": "test_core.py", "file_ext": "py", "file_size_in_byte": 9964, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "52", "api": [{"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 22, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 22, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 35, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 35, "usage_type": "name"}, {"api_name": "i3pyblocks.blocks.basic.TextBlock", "line_number": 38, "usage_type": "call"}, {"api_name": "i3pyblocks.blocks.basic", "line_number": 38, "usage_type": "name"}, {"api_name": "i3pyblocks.blocks.basic.TextBlock", "line_number": 39, "usage_type": "call"}, {"api_name": "i3pyblocks.blocks.basic", "line_number": 39, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 20, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 80, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 80, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 91, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 91, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 101, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 78, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 125, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 126, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 126, "usage_type": "call"}, {"api_name": "signal.SIGUSR1", "line_number": 126, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 130, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 130, "usage_type": "call"}, {"api_name": "signal.SIGUSR2", "line_number": 130, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 132, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 132, "usage_type": "name"}, {"api_name": "signal.SIGUSR1", "line_number": 141, "usage_type": "attribute"}, {"api_name": "signal.SIGUSR2", "line_number": 143, "usage_type": "attribute"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 148, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 148, "usage_type": "name"}, {"api_name": "signal.SIGUSR1", "line_number": 150, "usage_type": "attribute"}, {"api_name": "signal.SIGUSR2", "line_number": 150, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 161, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 122, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 183, "usage_type": "call"}, {"api_name": "os.kill", "line_number": 184, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 184, "usage_type": "call"}, {"api_name": "signal.SIGUSR1", "line_number": 184, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 186, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 186, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 197, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 197, "usage_type": "name"}, {"api_name": "signal.SIGUSR1", "line_number": 199, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 180, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 217, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 217, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 231, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 231, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 235, "usage_type": "call"}, {"api_name": "i3pyblocks.types.MouseButton", "line_number": 239, "usage_type": "attribute"}, {"api_name": "i3pyblocks.types", "line_number": 239, "usage_type": "name"}, {"api_name": "i3pyblocks.types.KeyModifier", "line_number": 240, "usage_type": "attribute"}, {"api_name": "i3pyblocks.types", "line_number": 240, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 215, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 263, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 263, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 275, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 275, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 279, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 261, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks.PollingBlock", "line_number": 297, "usage_type": "attribute"}, {"api_name": "i3pyblocks.blocks", "line_number": 297, "usage_type": "name"}, {"api_name": "i3pyblocks.core.Runner", "line_number": 311, "usage_type": "call"}, {"api_name": "i3pyblocks.core", "line_number": 311, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 315, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 333, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 344, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 295, "usage_type": "attribute"}]} +{"seq_id": "32758011691", "text": "#!/usr/bin/env python\nimport socket, _thread, sys, json, os\nfrom game import Game\n\nserver = os.getenv('SERVER')\nport = int(os.getenv('PORT'))\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n s.bind((server, port))\nexcept socket.error as e:\n str(e)\n\ns.settimeout(1.0)\ns.listen(2)\nprint('Server has started, waiting for a connection...')\n\ngames : dict[int, Game] = {}\nid_count = 0\n\ndef threaded_client(conn: socket.socket):\n req = json.loads(conn.recv(4096).decode())\n if req['connection'] == 'create':\n # generate gameid and create game\n side = 1\n game_id = -1\n for i in range(100):\n if i not in games:\n game_id = i\n break\n if game_id == -1:\n conn.send(str.encode(json.dumps({\n 'status': 'server_is_busy'\n })))\n return\n games[game_id] = Game(game_id)\n else:\n game_id = req['game_id']\n if game_id not in games:\n conn.send(str.encode(json.dumps({\n 'status': 'game_does_not_exist'\n })))\n return\n side = -1\n\n conn.send(str.encode(json.dumps({\n 'status': 'success',\n 'side': side,\n 'game_id': game_id,\n })))\n \n while True:\n try:\n req = json.loads(conn.recv(4096).decode())\n if game_id in games:\n game = games[game_id]\n \n game.connected[side] = True\n \n if not all([connected for connected in game.connected.values()]):\n res = {\n 'status': 'wait'\n }\n elif 'method' in req:\n res = game.request(req)\n else:\n res = {\n 'status': 'success'\n }\n\n res = json.dumps(res)\n conn.sendall(str.encode(res))\n else:\n break\n except Exception as e: \n print('Exception in threaded_client()')\n print(e)\n break\n\n print('lost connection')\n try:\n del games[game_id]\n print(f'closing game {game_id}')\n except:\n pass\n conn.close()\n\nif __name__ == '__main__':\n while True:\n conn = None\n try:\n conn, addr = s.accept()\n print(f'Connected to {addr}')\n\n _thread.start_new_thread(threaded_client, (conn,))\n except socket.timeout:\n pass\n except KeyboardInterrupt:\n if conn is not None:\n conn.close()\n break", "repo_name": "HuMangoPP/WizardsChess", "sub_path": "src/server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2643, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 5, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 6, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 8, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 8, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 8, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 12, "usage_type": "attribute"}, {"api_name": "game.Game", "line_number": 19, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 22, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "game.Game", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 47, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "game.connected", "line_number": 59, "usage_type": "attribute"}, {"api_name": "game.connected.values", "line_number": 61, "usage_type": "call"}, {"api_name": "game.connected", "line_number": 61, "usage_type": "attribute"}, {"api_name": "game.request", "line_number": 66, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 72, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 96, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "70720840475", "text": "# https://www.delftstack.com/it/howto/python/python-get-hostname/\n\nimport socket\nimport uuid\nimport requests\n\n\n# Restituisce il nome host del sistema sotto il quale Python è attualmente in esecuzione\nhostIP = socket.gethostbyname(socket.gethostname())\nprint(\"Nome host sistema:\", hostIP)\n\n\n# Restituisce l'indirizzo IP privato\nst = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ntry:\n st.connect(('255.255.255.0', 1)) #10.255.255.255\n IP = st.getsockname()[0]\nexcept Exception:\n IP = '127.0.0.1'\nfinally:\n st.close()\nprint(\"Indirizzo IP privato:\", IP)\n\n\n# Restituisce l'indirizzo IP pubblico\npublicIp = requests.get('https://checkip.amazonaws.com').text.strip()\nprint(\"Indirizzo IP pubblico:\", publicIp)\n\n\n# Restituisce l'indirizzo MAC del dispositivo\nmacAddress = hex(uuid.getnode())\nprint(\"Indirizzo MAC:\", macAddress)", "repo_name": "IGOR10S/Codes", "sub_path": "Python/IP/IP_Address.py", "file_name": "IP_Address.py", "file_ext": "py", "file_size_in_byte": 835, "program_lang": "python", "lang": "it", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "socket.gethostbyname", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 14, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 14, "usage_type": "attribute"}, {"api_name": "socket.SOCK_DGRAM", "line_number": 14, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "uuid.getnode", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "11627160341", "text": "from brt.jit.codegen.storage import KernelStorager\nimport json\nfrom typing import Dict, List\nfrom collections import OrderedDict\n\nkernel_storager = KernelStorager()\ncursor = kernel_storager.cursor\n\nSELECT_ALL_CMD = r\"\"\"\nSELECT Key, Identifier, OpType \nFROM KernelCache\"\"\"\nSELECT_KEY_CMD = r\"\"\"\nSELECT Identifier\nFROM KernelCache\nWHERE Key = ?\"\"\"\nUPDATE_CMD = r\"\"\"\nUPDATE KernelCache\nSET Identifier = ?\nWHERE Key = ?\n\"\"\"\nqrs = cursor.execute(SELECT_ALL_CMD).fetchall()\nfor key, id, optype in qrs:\n id: Dict = json.loads(id)\n\n # Delete parameters.padding_mode\n popret = id['parameters'].pop('padding_mode', None)\n if popret is not None:\n print(id['parameters'])\n new_id = json.dumps(id)\n cursor.execute(UPDATE_CMD, (new_id, key))\n cursor.connection.commit()\n \n # Update output size of ConvTranspose2d\n # {\n # \"output_infos\": {\n # \"output_0\": [34, 3, 32, 32]\n # },\n # ...\n # }\n # if optype == 'ConvTranspose2dBias':\n # outinfo: List = id['output_infos']['output_0']\n # if outinfo is None:\n # id['output_infos']['output_0'] = [id['input_infos']['input_0'][0], 3, 128, 128]\n # else:\n # outinfo[:2].extend([128, 128])\n # id['output_infos']['output_0'] = outinfo\n # print(f\"{id['output_infos']['output_0']}\")\n\n \n # Sort id\n id['input_infos'] = OrderedDict(sorted(id['input_infos'].items()))\n id['output_infos'] = OrderedDict(sorted(id['output_infos'].items()))\n id['parameters'] = OrderedDict(sorted(id['parameters'].items()))\n id = OrderedDict(sorted(id.items()))\n\n\n id = json.dumps(id)\n # print(id)\n cursor.execute(UPDATE_CMD, (id, key))\n cursor.connection.commit()\n \n\n", "repo_name": "Raphael-Hao/brainstorm", "sub_path": "scripts/kernel_db_update.py", "file_name": "kernel_db_update.py", "file_ext": "py", "file_size_in_byte": 1757, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "50", "api": [{"api_name": "brt.jit.codegen.storage.KernelStorager", "line_number": 6, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 23, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 52, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 53, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 54, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "35509400063", "text": "import json\nimport requests\nimport pprint\n\nif __name__=='__main__':\n url='http://ws.audioscrobbler.com/2.0/?method=geo.getTopArtists&api_key=4beab33cc6d65b05800d51f5e83bde1b&country=Spain&format=json'\n data = requests.get(url).text\n data = json.loads(data)\n data = data['topartists']\n data = data['artist']\n topArtist = data[0]\n artist=topArtist['name']\n print(artist)\n #for x,y in topArtist.iteritems():\n # print(\"{0},{1}\".format(x,y))", "repo_name": "kariato/DataSciIntro", "sub_path": "lesson2-2.py", "file_name": "lesson2-2.py", "file_ext": "py", "file_size_in_byte": 469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "40414472604", "text": "import sys\nfrom pathlib import Path\nfrom typing import BinaryIO, Union\n\n\ndef is_silk_data(raw: bytes) -> bool:\n if len(raw) > 10:\n offset = 0\n if raw[0] == 2:\n offset = 1\n if raw[offset:10] == b\"#!SILK_V3\":\n return True\n return False\n\n\ndef get_file(file: Union[str, BinaryIO]) -> BinaryIO:\n if isinstance(file, str):\n path = Path(file)\n if not path.is_file():\n raise FileNotFoundError(file)\n return open(file, \"rb\")\n elif isinstance(file, BinaryIO):\n return file\n else:\n raise TypeError(file)\n\n\ndef force_quit():\n import os\n import multiprocessing\n os.kill(multiprocessing.current_process().pid, 15) # sigterm\n\n\ndef _play_sound(source: Union[str, bytes]):\n import winsound\n from threading import Thread\n\n t = Thread(\n target=winsound.PlaySound,\n name=\"PlayerThread\",\n args=(source, winsound.SND_FILENAME if isinstance(source, str) else winsound.SND_MEMORY),\n )\n t.start()\n try:\n while True:\n t.join(0.5)\n except KeyboardInterrupt:\n print(\"Interrupt received\")\n force_quit()\n\n\ndef play_audio(source: Union[str, bytes]):\n if sys.platform != \"win32\":\n raise RuntimeError(\"PlaySound only support windows\")\n\n _play_sound(source)\n", "repo_name": "DCZYewen/Python-Silk-Module", "sub_path": "src/pysilk/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1325, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Union", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.BinaryIO", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.BinaryIO", "line_number": 22, "usage_type": "argument"}, {"api_name": "os.kill", "line_number": 31, "usage_type": "call"}, {"api_name": "multiprocessing.current_process", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 34, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 38, "usage_type": "call"}, {"api_name": "winsound.PlaySound", "line_number": 39, "usage_type": "attribute"}, {"api_name": "winsound.SND_FILENAME", "line_number": 41, "usage_type": "attribute"}, {"api_name": "winsound.SND_MEMORY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 52, "usage_type": "name"}, {"api_name": "sys.platform", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "27199532874", "text": "import cv2 as cv\nimport os.path\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport random\n\nimg = cv.imread(\"../Qwirkle/qwirkle.jpg\")\n# print(img.shape)\n\nw, h, d = img.shape\n\ndw = int(w/6)\ndh = int(h/6)\n\nprint(\"dw=\", dw, \"dh=\", dh)\n\nfig, axes = plt.subplots(6, 6)\nplt.xticks([])\nplt.yticks([])\n\nfor c in range(6):\n x = c*dw\n for r in range(6):\n y = r*dh\n # print(\"x=\", x, \"y=\", y)\n t = img[x:x+dw,y:y+dh,:]\n # t = cv.resize(t, (50, 50), interpolation=cv.INTER_AREA)\n axes[c, r].imshow(t)\n\nplt.show()\n", "repo_name": "anumby-source/RobotServiceJeunesse2023", "sub_path": "training/cut_tuiles.py", "file_name": "cut_tuiles.py", "file_ext": "py", "file_size_in_byte": 552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "31997701819", "text": "#here we loop over all single mutations of the Wuhan variant and use the pre-trained GeoPPI to get the predicted change in binding energy\nimport pandas as pd\nimport os\nimport subprocess\nimport time\nfrom tqdm import tqdm\nimport csv \n\ndef get_muts(variant):\n \"\"\"This function returns the binding affiity of a given variant given its name\"\"\"\n variant_scores_path = \"../SARS-CoV-2-RBD_DMS_Omicron/results/final_variant_scores/final_variant_scores.csv\"\n\n variants_names = {\n \"Wuhan-Hu-1_v2\": \"Wuhan-Hu-1_v2\",\n \"N501Y\": \"Alpha\", \n \"Beta\": \"Beta\",\n \"Delta\": \"Delta\",\n \"E484K\": \"Eta\",\n \"Omicron_BA1\":\"Omicron_BA1\",\n \"Omicron_BA2\":\"Omicron_BA2\"\n }\n # Expression and Binding scores per variant background -- for the heatmap plots\n scores_df = (\n pd.read_csv(variant_scores_path)\n .rename(columns = {\"position\":\"site\",\n \"delta_expr\":\"RBD expression\", # rename for the selection menus \n \"delta_bind\":\"ACE2 binding\" # rename for the selection menus \n })\n .replace({\"target\":variants_names})\n )\n # Expression and Binding scores per variant background -- for the heatmap plots\n \n muts = scores_df.loc[scores_df[\"target\"] == variant][\"mutation\"]\n epression = scores_df.loc[scores_df[\"target\"] == variant][\"RBD expression\"]\n\n return muts, epression\n\n\ndef foldX_one_var(variant,start):\n\n muts, epression = get_muts(variant) \n\n row = [\"mut_name\", str(5), str(5)]\n with open('./stability_predictions_' + variant + '.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n for mut_name , value in tqdm(zip(muts,epression)):\n # some of these are not single sutants?... looks like tis a mutant of a mutant..\n # mutationinfo = mut_name[:1] + \"E\" + mut_name[1:] # add and E into the mutation name for GeoPPI to be compatable.\n try:\n # edited_mut_name = mut_name[1:-1] + '-' +mut_name[-1] #just for stipped part of the spike protein as the naming system is different. \n # print(edited_mut_name)\n # ignore nanss and loop over both with zip.. \n # pdbfile = \"spike_\" + edited_mut_name + \".pdb\"\n # pdb_dir = '/mnt/ncshare/ozkilim/charge_pca_deepmut/stripped_RBD_classical'\n \n pdb_dir = '../Wuhan_RBDs/structures/' + variant #must be callable for all files... this is the sticking point.\n pdbfile = start + mut_name +'.pdb' #uses start path infor as its different currently for each path.\n # save to some log file and then re-read it and delte it?\n comm = './foldx --command=Stability --pdb={} --pdb-dir={} --output-dir=./stability_output'.format(pdbfile,pdb_dir)\n os.system(comm) \n output_file = \"./stability_output/\" + start + mut_name + \"_0_ST.fxout\"\n file = pd.read_csv(output_file,header=None,sep=\"\\t\") #upack saved reasults from the API and repack for our own data anlysis.\n output = file.values.tolist()[0][1] \n\n row = [mut_name, str(value), str(output)]\n with open('./stability_predictions_' + variant + '.csv', 'a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n \n except:\n pass \n \n# automate all reasults for all varients for figure production. \ndef main():\n '''Repete FoldX predictionsof stability for each varient.'''\n varient_names = [\"Wuhan-Hu-1_v2\",\"Alpha\",\"Beta\",\"Delta\",\"Eta\",'Omicron_BA1',\"Omicron_BA2\"]\n start_path = [\"RBD_331_531_\",\"rot-Alpha_RBD_331_531_\",\"rot-Beta_RBD_331_531_\",\"rot-Delta_RBD_331_531_\",\"Eta_RBD_331_531_\",\"Omicron_RBD_331_531_\",\"rot-OmicronBA2_RBD_331_531_\"] \n for variant,start in zip(varient_names,start_path):\n foldX_one_var(variant,start) \n\nif __name__ == \"__main__\":\n main()\n \n", "repo_name": "csabaiBio/RBD-AlphaFold2-structures-and-phenotypic-information", "sub_path": "foldX_stability/foldx_expression_prediction.py", "file_name": "foldx_expression_prediction.py", "file_ext": "py", "file_size_in_byte": 3883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 24, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 45, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 48, "usage_type": "call"}, {"api_name": "os.system", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 64, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "31150424533", "text": "from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom IGL_account.models import User\nfrom .serializer import IconSerializer, IconTeamSerializer, IconTeamMemberSerializer\nfrom rest_framework import generics\nfrom .models import Icon, IconTeam, IconTeamMember\n\n\n# Create your views here.\n\nclass IconAPI(generics.GenericAPIView):\n serializer_class = IconSerializer\n\n def post(self, request):\n try:\n serializer = IconSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to create icon\"})\n\n def get(self, request, pk=None):\n user_id = pk\n try:\n if user_id is not None:\n user = Icon.objects.get(id=user_id)\n serializer = IconSerializer(user)\n return Response({\"data\": serializer.data})\n user = Icon.objects.all()\n serializer = IconSerializer(user, many=True)\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to get the Icon details\"})\n\n\nclass IconTeamAPI(generics.GenericAPIView):\n serializer_class = IconTeamSerializer\n\n def post(self, request):\n try:\n serializer = IconTeamSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to create the Icon team details\"})\n\n def get(self, request, pk=None):\n try:\n user = IconTeam.objects.all()\n serializer = IconTeamSerializer(user, many=True)\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to find Icon team details\"})\n\n\nclass IconTeamMemberAPI(generics.GenericAPIView):\n serializer_class = IconTeamMemberSerializer\n\n def post(self, request):\n try:\n serializer = IconTeamMemberSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to update the Team member registration\"})\n\n def get(self, request, pk=None):\n user_id = pk\n try:\n if user_id is not None:\n user = IconTeamMember.objects.get(id=user_id)\n serializer = IconTeamMemberSerializer(user)\n return Response({\"data\": serializer.data})\n user = IconTeamMember.objects.all()\n serializer = IconTeamMemberSerializer(user, many=True)\n return Response({\"data\": serializer.data})\n except:\n return Response({\"message\": \"Unable to find team member details\"})\n", "repo_name": "pythondjangoproj/test-proj", "sub_path": "iconApp-main/icon/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "rest_framework.generics.GenericAPIView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 11, "usage_type": "name"}, {"api_name": "serializer.IconSerializer", "line_number": 12, "usage_type": "name"}, {"api_name": "serializer.IconSerializer", "line_number": 16, "usage_type": "call"}, {"api_name": "serializer.is_valid", "line_number": 17, "usage_type": "call"}, {"api_name": "serializer.save", "line_number": 18, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 19, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 21, "usage_type": "call"}, {"api_name": "models.Icon.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Icon.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.Icon", "line_number": 27, "usage_type": "name"}, {"api_name": "serializer.IconSerializer", "line_number": 28, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 29, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.Icon.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Icon.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Icon", "line_number": 30, "usage_type": "name"}, {"api_name": "serializer.IconSerializer", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 32, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 37, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 37, "usage_type": "name"}, {"api_name": "serializer.IconTeamSerializer", "line_number": 38, "usage_type": "name"}, {"api_name": "serializer.IconTeamSerializer", "line_number": 42, "usage_type": "call"}, {"api_name": "serializer.is_valid", "line_number": 43, "usage_type": "call"}, {"api_name": "serializer.save", "line_number": 44, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 45, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 47, "usage_type": "call"}, {"api_name": "models.IconTeam.objects.all", "line_number": 51, "usage_type": "call"}, {"api_name": "models.IconTeam.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.IconTeam", "line_number": 51, "usage_type": "name"}, {"api_name": "serializer.IconTeamSerializer", "line_number": 52, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 53, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 53, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 55, "usage_type": "call"}, {"api_name": "rest_framework.generics.GenericAPIView", "line_number": 58, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 58, "usage_type": "name"}, {"api_name": "serializer.IconTeamMemberSerializer", "line_number": 59, "usage_type": "name"}, {"api_name": "serializer.IconTeamMemberSerializer", "line_number": 63, "usage_type": "call"}, {"api_name": "serializer.is_valid", "line_number": 64, "usage_type": "call"}, {"api_name": "serializer.save", "line_number": 65, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 66, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 66, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 68, "usage_type": "call"}, {"api_name": "models.IconTeamMember.objects.get", "line_number": 74, "usage_type": "call"}, {"api_name": "models.IconTeamMember.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "models.IconTeamMember", "line_number": 74, "usage_type": "name"}, {"api_name": "serializer.IconTeamMemberSerializer", "line_number": 75, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 76, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.IconTeamMember.objects.all", "line_number": 77, "usage_type": "call"}, {"api_name": "models.IconTeamMember.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "models.IconTeamMember", "line_number": 77, "usage_type": "name"}, {"api_name": "serializer.IconTeamMemberSerializer", "line_number": 78, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 79, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 79, "usage_type": "attribute"}, {"api_name": "rest_framework.response.Response", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "27546082886", "text": "import tensorlayerx as tlx\nimport numpy as np\nfrom itertools import chain\nfrom scipy.sparse.csgraph import shortest_path\nfrom gammagl.utils import k_hop_subgraph, to_scipy_sparse_matrix\nfrom gammagl.data import Graph, InMemoryDataset\nfrom gammagl.transforms import RandomLinkSplit\n\nclass SEALDataset(InMemoryDataset):\n def __init__(self, dataset, env, num_hops, neg_sampling_ratio=1.0, split='train'):\n self.data = dataset[0]\n self.num_hops = num_hops\n self.neg_sampling_ratio = neg_sampling_ratio\n self.env = env\n super().__init__(dataset.root)\n index = ['train', 'val', 'test'].index(split)\n self.data, self.slices = tlx.files.load_npy_to_any(name=self.processed_paths[index])\n\n @property\n def processed_file_names(self):\n return [f'{self.env}_SEAL_train_data.npy', f'{self.env}_SEAL_val_data.npy', f'{self.env}_SEAL_test_data.npy']\n\n def process(self):\n transform = RandomLinkSplit(num_val=0.05, num_test=0.1, neg_sampling_ratio=self.neg_sampling_ratio,\n is_undirected=True, split_labels=True)\n train_data, val_data, test_data = transform(self.data)\n\n self._max_z = 0\n\n # Collect a list of subgraphs for training, validation and testing:\n train_pos_data_list = self.extract_enclosing_subgraphs(\n train_data.edge_index, train_data.pos_edge_label_index, 1.)\n train_neg_data_list = self.extract_enclosing_subgraphs(\n train_data.edge_index, train_data.neg_edge_label_index, 0.)\n\n val_pos_data_list = self.extract_enclosing_subgraphs(\n val_data.edge_index, val_data.pos_edge_label_index, 1.)\n val_neg_data_list = self.extract_enclosing_subgraphs(\n val_data.edge_index, val_data.neg_edge_label_index, 0.)\n\n test_pos_data_list = self.extract_enclosing_subgraphs(\n test_data.edge_index, test_data.pos_edge_label_index, 1.)\n test_neg_data_list = self.extract_enclosing_subgraphs(\n test_data.edge_index, test_data.neg_edge_label_index, 0.)\n\n # Convert node labeling to one-hot features.\n for data in chain(train_pos_data_list, train_neg_data_list,\n val_pos_data_list, val_neg_data_list,\n test_pos_data_list, test_neg_data_list):\n data.x = tlx.reshape(\n tlx.scatter_update(\n tlx.zeros((data.z.shape[0] * (self._max_z + 1),), dtype=tlx.float32),\n data.z + tlx.arange(0, data.z.shape[0], dtype=data.z.dtype) * (self._max_z + 1),\n tlx.ones((data.z.shape[0],), dtype=tlx.float32)\n ),\n (data.z.shape[0], self._max_z + 1)\n )\n\n tlx.files.save_any_to_npy(self.collate(train_pos_data_list + train_neg_data_list),\n self.processed_paths[0])\n tlx.files.save_any_to_npy(self.collate(val_pos_data_list + val_neg_data_list),\n self.processed_paths[1])\n tlx.files.save_any_to_npy(self.collate(test_pos_data_list + test_neg_data_list),\n self.processed_paths[2])\n\n def extract_enclosing_subgraphs(self, edge_index, edge_label_index, y):\n data_list = []\n for src, dst in tlx.convert_to_numpy(edge_label_index).T.tolist():\n sub_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(\n [src, dst], self.num_hops, edge_index, relabel_nodes=True)\n src, dst = mapping.tolist()\n\n # Remove target link from the subgraph.\n mask1 = (sub_edge_index[0] != src) | (sub_edge_index[1] != dst)\n mask2 = (sub_edge_index[0] != dst) | (sub_edge_index[1] != src)\n sub_edge_index = tlx.mask_select(sub_edge_index, mask1 & mask2, axis=1)\n\n # Calculate node labeling.\n z = self.drnl_node_labeling(sub_edge_index, src, dst, num_nodes=sub_nodes.shape[0])\n\n data = Graph(x=tlx.gather(self.data.x, sub_nodes), z=z, edge_index=sub_edge_index, y=y)\n data_list.append(data)\n\n return data_list\n\n def drnl_node_labeling(self, edge_index, src, dst, num_nodes=None):\n # Double-radius node labeling (DRNL).\n src, dst = (dst, src) if src > dst else (src, dst)\n adj = to_scipy_sparse_matrix(edge_index, num_nodes=num_nodes).tocsr()\n\n idx = list(range(src)) + list(range(src + 1, adj.shape[0]))\n adj_wo_src = adj[idx, :][:, idx]\n\n idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))\n adj_wo_dst = adj[idx, :][:, idx]\n\n dist2src = shortest_path(adj_wo_dst, directed=False, unweighted=True,\n indices=src)\n dist2src = np.insert(dist2src, dst, 0, axis=0)\n\n dist2dst = shortest_path(adj_wo_src, directed=False, unweighted=True,\n indices=dst - 1)\n dist2dst = np.insert(dist2dst, src, 0, axis=0)\n\n dist = dist2src + dist2dst\n dist_over_2, dist_mod_2 = dist // 2, dist % 2\n\n z = 1 + np.minimum(dist2src, dist2dst)\n z += dist_over_2 * (dist_over_2 + dist_mod_2 - 1)\n z[src] = 1.\n z[dst] = 1.\n z = np.nan_to_num(z, nan=0.)\n z = tlx.convert_to_tensor(z, dtype=tlx.int64)\n\n self._max_z = max(tlx.reduce_max(z), self._max_z)\n\n return z", "repo_name": "BUPT-GAMMA/GammaGL", "sub_path": "examples/seal/data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 5326, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 157, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gammagl.data.InMemoryDataset", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorlayerx.files.load_npy_to_any", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorlayerx.files", "line_number": 17, "usage_type": "attribute"}, {"api_name": "gammagl.transforms.RandomLinkSplit", "line_number": 24, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorlayerx.reshape", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorlayerx.scatter_update", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorlayerx.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorlayerx.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorlayerx.arange", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorlayerx.ones", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorlayerx.float32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tensorlayerx.files.save_any_to_npy", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorlayerx.files", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorlayerx.files.save_any_to_npy", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorlayerx.files", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorlayerx.files.save_any_to_npy", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorlayerx.files", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorlayerx.convert_to_numpy", "line_number": 68, "usage_type": "call"}, {"api_name": "gammagl.utils.k_hop_subgraph", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorlayerx.mask_select", "line_number": 76, "usage_type": "call"}, {"api_name": "gammagl.data.Graph", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorlayerx.gather", "line_number": 81, "usage_type": "call"}, {"api_name": "gammagl.utils.to_scipy_sparse_matrix", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.sparse.csgraph.shortest_path", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 99, "usage_type": "call"}, {"api_name": "scipy.sparse.csgraph.shortest_path", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.nan_to_num", "line_number": 112, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorlayerx.int64", "line_number": 113, "usage_type": "attribute"}, {"api_name": "tensorlayerx.reduce_max", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "31827979038", "text": "from pathlib import Path\nfrom typing import Dict, Type\n\nfrom SourceIO2.shared.content_manager.providers.base_provider import IProvider\nfrom SourceIO2.shared.content_manager.detectors.s2_detector import Source2Detector\nfrom SourceIO2.shared.content_manager.providers.fs_provider import FileSystemProvider\nfrom SourceIO2.utils.path_utils import backwalk_resolve\n\n\nclass TestDataDetector(Source2Detector):\n @classmethod\n def register_common(cls, root_path: Path, content_providers: Dict[str, IProvider]):\n pass\n\n @classmethod\n def detect(cls: Type['TestDataDetector'], path: Path):\n root = None\n test_marker = backwalk_resolve(path, Path('TEST.TEST'))\n if test_marker is not None:\n root = test_marker.parent\n if root is None:\n return Path(), {}\n content_providers = {}\n gameinfo = backwalk_resolve(path, Path('gameinfo.gi'))\n for path_type, path in cls.scan_gameinfo(gameinfo, root):\n if path_type in ('game', 'mod', 'write'):\n if path.stem in content_providers:\n continue\n content_providers[path.stem] = FileSystemProvider(path)\n elif path_type == 'addonroot':\n for addon in path.iterdir():\n if addon.stem.startswith('.') or f'addon_{addon.stem}' in content_providers:\n continue\n content_providers[f'addon_{addon.stem}'] = FileSystemProvider(addon)\n\n return root, content_providers\n", "repo_name": "REDxEYE/SourceIO2", "sub_path": "shared/content_manager/detectors/test_data.py", "file_name": "test_data.py", "file_ext": "py", "file_size_in_byte": 1524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "50", "api": [{"api_name": "SourceIO2.shared.content_manager.detectors.s2_detector.Source2Detector", "line_number": 10, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 12, "usage_type": "name"}, {"api_name": "SourceIO2.shared.content_manager.providers.base_provider.IProvider", "line_number": 12, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 16, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 16, "usage_type": "name"}, {"api_name": "SourceIO2.utils.path_utils.backwalk_resolve", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "SourceIO2.utils.path_utils.backwalk_resolve", "line_number": 24, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 24, "usage_type": "call"}, {"api_name": "SourceIO2.shared.content_manager.providers.fs_provider.FileSystemProvider", "line_number": 29, "usage_type": "call"}, {"api_name": "SourceIO2.shared.content_manager.providers.fs_provider.FileSystemProvider", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "42132817507", "text": "from typing import Awaitable, Iterable\nimport uuid\nimport time\nimport six\nimport base64\nimport hashlib\nimport hmac\nimport aiohttp\nimport xml.etree.ElementTree as ET\nfrom itertools import islice\n\n# Weekday and month names for HTTP date/time formatting; always English!\n_weekdayname = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n_monthname = [\n None, # Dummy so we can use 1-based month numbers\n \"Jan\",\n \"Feb\",\n \"Mar\",\n \"Apr\",\n \"May\",\n \"Jun\",\n \"Jul\",\n \"Aug\",\n \"Sep\",\n \"Oct\",\n \"Nov\",\n \"Dec\",\n]\n\na = \"GET\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nx-ms-client-request-id:39d4de1c-1958-11ed-85ae-7a8abb192e12\\nx-ms-date:Thu, 11 Aug 2022 09:30:23 GMT\\nx-ms-version:2021-06-08\\n/test2azure/adfsource/03120ebd-54b8-4f10-bdb4-40a840b7529b\\ncomp:list\\nrestype:directory\"\n\n_default_headers = {\n \"x-ms-version\": \"2021-06-08\",\n \"Accept\": \"application/json\",\n \"User-Agent\": \"azsdk-python-storage-file-share/12.9.0 Python/3.9.13 (macOS-12.4-arm64-arm-64bit)\",\n}\n\n_params = {\"restype\": \"directory\", \"comp\": \"list\"}\n\n\ndef format_date_time(timestamp: float) -> str:\n year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)\n return \"%s, %02d %3s %4d %02d:%02d:%02d GMT\" % (\n _weekdayname[wd],\n day,\n _monthname[month],\n year,\n hh,\n mm,\n ss,\n )\n\n\ndef encode_base64(data: bytes) -> str:\n if isinstance(data, six.text_type):\n data = data.encode(\"utf-8\")\n encoded = base64.b64encode(data)\n return encoded.decode(\"utf-8\")\n\n\ndef sign_string(key: str, string_to_sign: str) -> str:\n key = base64.b64decode(key.encode(\"utf-8\"))\n string_to_sign = string_to_sign.encode(\"utf-8\")\n signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256)\n digest = signed_hmac_sha256.digest()\n encoded_digest = encode_base64(digest)\n return encoded_digest\n\n\ndef gen_auth_header(access_key: str, account_name: str, string_to_sign: str) -> str:\n signature = sign_string(access_key, string_to_sign)\n auth_string = \"SharedKey \" + account_name + \":\" + signature\n return auth_string\n\n\ndef gen_headers(access_key: str, account_name: str, uri: str, params: dict) -> dict:\n request_id = str(uuid.uuid1())\n current_time = format_date_time(time.time())\n\n param_string = \"\"\n\n # append params by alphabet sorted\n for param, value in sorted(params.items()):\n param_string += f\"\\n{param}:{value}\"\n\n string_to_sign = f\"GET\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\nx-ms-client-request-id:{request_id}\\nx-ms-date:{current_time}\\nx-ms-version:2021-06-08\\n{uri}{param_string}\"\n auth_string = gen_auth_header(access_key, account_name, string_to_sign)\n\n return {\n \"x-ms-client-request-id\": request_id,\n \"x-ms-date\": current_time,\n \"Authorization\": auth_string,\n }\n\n\ndef parse_response(res: str) -> dict:\n root = ET.fromstring(res)\n\n return {\n \"dirs\": root.findall(\"./Entries/Directory/Name\"),\n \"files\": root.findall(\"./Entries/File/Name\"),\n \"marker\": root[-1].text,\n }\n\n\ndef chunk(it: Iterable, size: int):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n\nasync def fetch_list(\n session: aiohttp.ClientSession, params: dict, fileshare_info: dict, directory: str\n) -> Awaitable:\n \"\"\"return the sets of specific directory and it's sub-directory\"\"\"\n file_sets = set()\n\n # root directory and sub-level directory has different conversion of name\n if directory:\n endpoint = f\"/{fileshare_info['account_name']}/{fileshare_info['fileshare_name']}{directory}\"\n uri = f\"/{fileshare_info['fileshare_name']}{directory}\"\n else:\n endpoint = (\n f\"/{fileshare_info['account_name']}/{fileshare_info['fileshare_name']}\"\n )\n uri = f\"/{fileshare_info['fileshare_name']}\"\n\n # generate http headers\n headers = gen_headers(\n fileshare_info[\"access_key\"], fileshare_info[\"account_name\"], endpoint, params\n )\n\n async with session.get(\n uri,\n params=params,\n headers={**headers, **_default_headers},\n ) as resp:\n content = await resp.text()\n data = parse_response(content)\n\n # for root level directory, we want fetch a list of directory for futher process\n if not directory:\n for d in data[\"dirs\"]:\n file_sets.add(f\"/{d.text}\")\n\n for f in data[\"files\"]:\n file_sets.add(f.text)\n\n # for sub level directory, traverse the directory to get a list of fqdn file path\n else:\n for f in data[\"files\"]:\n file_sets.add(f\"{directory}/{f.text}\")\n\n for d in data[\"dirs\"]:\n file_sets |= await fetch_list(\n session,\n params,\n fileshare_info,\n directory + f\"/{d.text}\",\n )\n\n # azure storage rest-api only return max 5000 results, so we loop over all slice by marker flag\n if data[\"marker\"]:\n slice_sets = await fetch_list(\n session, {**params, \"marker\": data[\"marker\"]}, fileshare_info, directory\n )\n file_sets |= slice_sets\n\n return file_sets\n", "repo_name": "benchilian-tw/fileshareDiff", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5186, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "time.gmtime", "line_number": 42, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 55, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 57, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 62, "usage_type": "call"}, {"api_name": "hmac.HMAC", "line_number": 64, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 64, "usage_type": "attribute"}, {"api_name": "uuid.uuid1", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 97, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 97, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 106, "usage_type": "name"}, {"api_name": "itertools.islice", "line_number": 108, "usage_type": "call"}, {"api_name": "aiohttp.ClientSession", "line_number": 112, "usage_type": "attribute"}, {"api_name": "typing.Awaitable", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "43355048617", "text": "import sys\nimport json\nimport datetime\nfrom os import system\n\nfrom _utility.Logger import Logger\nfrom _utility.get_package_dir import get_output_dir\nfrom covid_crawlers._base_classes.OverseasDataSources import OverseasDataSources\nfrom covid_crawlers.oceania.au_data.StateDataSources import StateDataSources\nfrom covid_crawlers.oceania.au_data.InfrequentStateDataJobs import InfrequentStateDataJobs\n\nfrom covid_db.RevisionIDs import RevisionIDs\nfrom covid_db.DerivedData import DerivedData\nfrom covid_db.DataPointsDB import DataPointsDB\nfrom covid_db.delete_old_dbs import delete_old_dbs\nfrom covid_db.SQLiteDataRevisions import SQLiteDataRevisions\nfrom covid_db.output_compressor.output_revision_datapoints_to_zip import output_revision_datapoints_to_zip\n\nfrom data_export.push_to_github import push_to_github\nfrom data_export.output_geojson import output_geojson\nfrom data_export.output_source_info import output_source_info\nfrom data_export.output_csv_data import output_csv_data\n\n\n# Output stdout/stderr to log files\nstdout_logger = sys.stdout = Logger(sys.stdout, ext='stdout')\nstderr_logger = sys.stderr = Logger(sys.stderr, ext='stderr')\n\nOUTPUT_DIR = get_output_dir() / 'output'\nTIME_FORMAT = datetime.datetime.now().strftime('%Y_%m_%d')\nLATEST_REVISION_ID = RevisionIDs.get_latest_revision_id(TIME_FORMAT)\nRUN_INFREQUENT_JOBS = '--run-infrequent-jobs' in [i.strip() for i in sys.argv]\nSOURCE_INFO = []\n\n\ndef run_infrequent_jobs():\n \"\"\"\n Run infrequent tasks which require more resources\n Comment out any of these if they break!\n \"\"\"\n isdj = InfrequentStateDataJobs()\n isdj.update_wa_regions()\n isdj.update_vic_tableau()\n isdj.update_sa_regions()\n isdj.update_act_powerbi()\n return isdj.get_status()\n\n\ndef _rem_dupes(datapoints):\n \"\"\"\n Remove dupes!\n \"\"\"\n add_me = set()\n\n for datapoint in datapoints:\n if datapoint in add_me:\n continue\n add_me.add(datapoint)\n\n return list(add_me)\n\n\ndef output_overseas_data(dpdb: DataPointsDB):\n \"\"\"\n Output from overseas data\n \"\"\"\n ods = OverseasDataSources()\n\n for source_id, source_url, source_desc, datapoints in ods.iter_data_sources():\n SOURCE_INFO.append([source_id, source_url, source_desc])\n dpdb.extend(source_id, _rem_dupes(datapoints), is_derived=False)\n\n return ods.get_status_dict()\n\n\ndef output_state_data(dpdb: DataPointsDB):\n \"\"\"\n Output from state data\n \"\"\"\n sds = StateDataSources()\n\n for source_id, source_url, source_desc, datapoints in sds.iter_data_sources():\n SOURCE_INFO.append([source_id, source_url, source_desc])\n dpdb.extend(source_id, _rem_dupes(datapoints), is_derived=False)\n\n return sds.get_status_dict()\n\n\ndef run_crawlers(status: dict, dpdb: DataPointsDB):\n if RUN_INFREQUENT_JOBS:\n # Run infrequent jobs that need Selenium or other\n # high-processing tasks only a few times a day tops\n status.update(run_infrequent_jobs())\n\n # Output both state and overseas data from crawlers\n if True:\n print(\"Outputting state data...\")\n status.update(output_state_data(dpdb))\n print(\"State data done. Outputting overseas data...\")\n status.update(output_overseas_data(dpdb))\n print(\"Overseas data done. Migrating sources with errors...\")\n else:\n # WARNING!!!! THIS CODE HAS BUGS+DOESN'T OUTPUT THE CASES!!!! ===============================================\n print(\"Outputting state and overseas data...\")\n _output_state_data = lambda: status.update(output_state_data(dpdb))\n _output_overseas_data = lambda: status.update(output_overseas_data(dpdb))\n t1 = threading.Thread(target=_output_state_data, args=())\n t2 = threading.Thread(target=_output_overseas_data, args=())\n\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n print(\"State and overseas data done. Migrating sources with errors...\")\n\n\ndef copy_failed_from_previous_revision(status: dict, dpdb: DataPointsDB):\n \"\"\"\n If any of them failed, copy them across from the previous revision.\n Note the previous revision might have failed too, but should have\n copied the values from the previous revision before that, etc\n (assuming the crawler worked in the past)\n \"\"\"\n migrate_source_ids = []\n for status_key, status_dict in status.items():\n if status_dict['status'] == 'ERROR':\n print(\"ERROR OCCURRED, reverting to previous source ID data:\", status_key)\n migrate_source_ids.append(status_key)\n\n revisions = SQLiteDataRevisions()\n rev_date, rev_subid, dt = revisions.get_revisions()[0]\n prev_revision_path = revisions.get_revision_path(rev_date, rev_subid)\n dpdb.migrate_source_ids(prev_revision_path, migrate_source_ids)\n\n\ndef main():\n status = {}\n\n # Open the new output SQLite database\n sqlite_path = RevisionIDs.get_path_from_id(\n TIME_FORMAT, LATEST_REVISION_ID, 'sqlite'\n )\n dpdb = DataPointsDB(sqlite_path)\n run_crawlers(status, dpdb)\n dpdb.create_indexes()\n copy_failed_from_previous_revision(status, dpdb)\n\n # Derive \"new cases\" from \"total cases\" when\n # they aren't explicitly specified, etc\n DerivedData(dpdb).add_derived()\n\n # Commit and close the DB\n print(\"Derived data outputted OK: committing and closing\")\n dpdb.commit()\n dpdb.close()\n\n # Output basic status info to a .json info\n # This also signifies to the web\n # interface that the import went OK\n print(\"Writing status JSON file\")\n status_json_path = RevisionIDs.get_path_from_id(TIME_FORMAT, LATEST_REVISION_ID, 'json')\n with open(status_json_path, 'w', encoding='utf-8') as f:\n f.write(json.dumps({'status': status}, indent=4))\n\n # Output datapoints to zip\n print(\"Outputting datapoints to zip...\")\n with open(get_output_dir() / 'output' / f'{TIME_FORMAT}-{LATEST_REVISION_ID}.zip', 'wb') as f:\n output_revision_datapoints_to_zip(f, TIME_FORMAT, LATEST_REVISION_ID)\n\n # Upload them to remote AWS instance\n print(\"Uploading zip file to remote server...\")\n system('/usr/bin/env bash /home/david/upload_to_remote.sh %s' % f'{TIME_FORMAT}-{LATEST_REVISION_ID}')\n\n # Clean up old DBs to save on space\n print(\"Deleting older DBs to save space..\")\n delete_old_dbs()\n\n # Update the csv output\n print(\"Outputting CSV files:\")\n output_csv_data(TIME_FORMAT, LATEST_REVISION_ID)\n print('CSV write done')\n\n # Output information about the sources to a markdown table/csv file\n print(\"Outputting source info...\")\n output_source_info(SOURCE_INFO)\n\n # Output GeoJSON\n print(\"Outputting geojson...\")\n output_geojson()\n\n # Commit to GitHub\n print(\"Pushing to GitHub...\")\n push_to_github()\n print(\"Push to GitHub done!\")\n\n print(\"[end of script]\")\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "mcyph/world_subnational_covid_crawler", "sub_path": "output_data.py", "file_name": "output_data.py", "file_ext": "py", "file_size_in_byte": 6858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdout", "line_number": 26, "usage_type": "attribute"}, {"api_name": "_utility.Logger.Logger", "line_number": 26, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 27, "usage_type": "attribute"}, {"api_name": "_utility.Logger.Logger", "line_number": 27, "usage_type": "call"}, {"api_name": "_utility.get_package_dir.get_output_dir", "line_number": 29, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs.get_latest_revision_id", "line_number": 31, "usage_type": "call"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs", "line_number": 31, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 32, "usage_type": "attribute"}, {"api_name": "covid_crawlers.oceania.au_data.InfrequentStateDataJobs.InfrequentStateDataJobs", "line_number": 41, "usage_type": "call"}, {"api_name": "covid_db.DataPointsDB.DataPointsDB", "line_number": 63, "usage_type": "name"}, {"api_name": "covid_crawlers._base_classes.OverseasDataSources.OverseasDataSources", "line_number": 67, "usage_type": "call"}, {"api_name": "covid_db.DataPointsDB.DataPointsDB", "line_number": 76, "usage_type": "name"}, {"api_name": "covid_crawlers.oceania.au_data.StateDataSources.StateDataSources", "line_number": 80, "usage_type": "call"}, {"api_name": "covid_db.DataPointsDB.DataPointsDB", "line_number": 89, "usage_type": "name"}, {"api_name": "covid_db.DataPointsDB.DataPointsDB", "line_number": 117, "usage_type": "name"}, {"api_name": "covid_db.SQLiteDataRevisions.SQLiteDataRevisions", "line_number": 130, "usage_type": "call"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs.get_path_from_id", "line_number": 140, "usage_type": "call"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs", "line_number": 140, "usage_type": "name"}, {"api_name": "covid_db.DataPointsDB.DataPointsDB", "line_number": 143, "usage_type": "call"}, {"api_name": "covid_db.DerivedData.DerivedData", "line_number": 150, "usage_type": "call"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs.get_path_from_id", "line_number": 161, "usage_type": "call"}, {"api_name": "covid_db.RevisionIDs.RevisionIDs", "line_number": 161, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 163, "usage_type": "call"}, {"api_name": "_utility.get_package_dir.get_output_dir", "line_number": 167, "usage_type": "call"}, {"api_name": "covid_db.output_compressor.output_revision_datapoints_to_zip.output_revision_datapoints_to_zip", "line_number": 168, "usage_type": "call"}, {"api_name": "os.system", "line_number": 172, "usage_type": "call"}, {"api_name": "covid_db.delete_old_dbs.delete_old_dbs", "line_number": 176, "usage_type": "call"}, {"api_name": "data_export.output_csv_data.output_csv_data", "line_number": 180, "usage_type": "call"}, {"api_name": "data_export.output_source_info.output_source_info", "line_number": 185, "usage_type": "call"}, {"api_name": "data_export.output_geojson.output_geojson", "line_number": 189, "usage_type": "call"}, {"api_name": "data_export.push_to_github.push_to_github", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "5833564771", "text": "#coding=utf-8\n\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# fileName : Merge.py\n# comment : Merge the Original Picture and Ouput Picture\n# version :\n# author : ArtyZe\n# date : \n#\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\nimport cv2\n\ndef Merge(img,img1):\n height,width, c = img1.shape\n for c in range(c):\n for i in range(0, height):\n for j in range(0, width):\n if(img[i,j] >90):\n #here 90 is Threshold for heatmap\n #print im1[i,j]\n img1[i,j,1] = 100+img1[i,j,1]\n\t\t\t\n cv2.imwrite(\"final.png\",img1)\n return img1\n \nim = cv2.imread(\"pred.png\",cv2.IMREAD_GRAYSCALE)\nim1 = cv2.imread(\"orig.png\")\n\n\nMerge(im, im1)", "repo_name": "my-hello-world/yolo_segmentation", "sub_path": "Merge.py", "file_name": "Merge.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "50", "api": [{"api_name": "cv2.imwrite", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "17523890592", "text": "# -*- coding: utf-8 -*-\n\n# from django.test import TestCase\n\n# Create your tests here.\nimport pytest\nfrom games.models import *\nfrom players.models import *\n\n\npytestmark = pytest.mark.django_db\n\n\nclass TestMatchModel():\n\n def test_tie(self):\n red, blue = self.build_players()\n response = self.factory(\n [red, blue],\n [['1', '1'], ['1', '1'], ['1', '1']]\n )\n assert response is None\n\n def test_red_win(self):\n red, blue = self.build_players()\n response = self.factory(\n [red, blue],\n [['1', '3'], ['1', '2'], ['1', '3']]\n )\n assert response == red\n\n def test_blue_win(self):\n red, blue = self.build_players()\n response = self.factory(\n [red, blue],\n [['1', '1'], ['1', '1'], ['1', '2']]\n )\n assert response == blue\n\n def test_not_finished_match(self):\n red, blue = self.build_players()\n response = self.factory(\n [red, blue],\n [['1', '3'], ['1', '2']],\n round_number=2\n )\n assert response is None\n\n def build_players(self):\n red = Player.objects.create(name='red_sox')\n blue = Player.objects.create(name='blue_sox')\n return red, blue\n\n def factory(self, players, sets, round_number=None):\n match = Match.objects.create(\n red_player=players[0],\n blue_player=players[1]\n )\n rounds = round_number if round_number else match.rounds_per_match\n for i in range(rounds):\n match_round = MatchRound.objects.create(\n match=match\n )\n red_event = MatchEvent.objects.create(\n player=match.red_player,\n choice=sets[i][0],\n match_round=match_round\n )\n blue_event = MatchEvent.objects.create(\n player=match.blue_player,\n choice=sets[i][1],\n match_round=match_round\n )\n return match.winner()\n", "repo_name": "ppsirg/jajanken", "sub_path": "jajanken/games/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 2067, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pytest.mark", "line_number": 11, "usage_type": "attribute"}, {"api_name": "players.models", "line_number": 56, "usage_type": "name"}, {"api_name": "players.models", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "37709292758", "text": "\n'''\ncollections.OrderedDict\nAn OrderedDict is a dictionary that remembers the order of the keys that were inserted first. If a new entry overwrites an existing entry, the original insertion position is left unchanged.\n\nExample\n\nCode\n\n>>> from collections import OrderedDict\n>>> \n>>> ordinary_dictionary = {}\n>>> ordinary_dictionary['a'] = 1\n>>> ordinary_dictionary['b'] = 2\n>>> ordinary_dictionary['c'] = 3\n>>> ordinary_dictionary['d'] = 4\n>>> ordinary_dictionary['e'] = 5\n>>> \n>>> print ordinary_dictionary\n{'a': 1, 'c': 3, 'b': 2, 'e': 5, 'd': 4}\n>>> \n>>> ordered_dictionary = OrderedDict()\n>>> ordered_dictionary['a'] = 1\n>>> ordered_dictionary['b'] = 2\n>>> ordered_dictionary['c'] = 3\n>>> ordered_dictionary['d'] = 4\n>>> ordered_dictionary['e'] = 5\n>>> \n>>> print ordered_dictionary\nOrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)])\n'''\n# Enter your code here. Read input from STDIN. Print output to STDOUT\nfrom collections import OrderedDict\nsomedict=OrderedDict()\nn=int(input())\nfor i in range(n):\n data=input()\n if data in somedict.keys():\n somedict[data]+=1\n else:\n somedict[data]=1\nprint('distinct words ',len(somedict.keys()))\nprint('count of words ',*(somedict.values()))\n\n'''\nSample Input\n\n4\nbcdef\nabcdefg\nbcde\nbcdef\nSample Output\n\n3\n2 1 1\n'''\n\n", "repo_name": "skriLLeX123/Python", "sub_path": "Python Collections/Python Collections Ordered Dict.py", "file_name": "Python Collections Ordered Dict.py", "file_ext": "py", "file_size_in_byte": 1295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.OrderedDict", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "39099775342", "text": "import plotly.express as px\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output,State\nimport plotly.graph_objects as go , plotly.io as pio# or plotly.express as px\nimport pandas as pd\nimport numpy as np\nfrom mpl_toolkits.mplot3d import axes3d\n\npio.templates.default = 'plotly_white' \n#samples = pd.read_pickle(\"c:\\\\users\\\\liors\\\\source\\\\repos\\\\ctrm\\\\ctrm\\\\sample.pkl\")\nsamples = pd.read_pickle(\"c:\\\\users\\\\liors\\\\source\\\\repos\\\\ctrm\\\\ctrm\\\\grail.pkl\")\nRGlocations = pd.read_csv(\"C:\\\\Users\\\\liors\\\\source\\\\repos\\ctrm\\\\for_lior\\\\red_grail_locations.csv\").sort_values([\"X\"],ignore_index=True)\nRGlocations =RGlocations.loc[RGlocations.name.str.contains(\"GA\")].reset_index()\nRGlocations[\"Xfix\"] = RGlocations.X-RGlocations.X.min()\nRGlocations[\"Yfix\"] = RGlocations.Y-RGlocations.Y.min()\nRGlocations[\"Zfix\"] = RGlocations.alt-RGlocations.alt.min()\n\n\ngrouped = samples.groupby([\"IPindex\",\"x\" ,\"y\",\"radius\"]).agg({\"semb\": ['mean','min','max','median', 'std' ]}).droplevel(axis=1, level=0).reset_index()\n\n#data = [go.Scatter3d(x = grouped.x,y=grouped.y,z=grouped.radius, marker=dict(size=12, color=grouped.semb, colorscale='Reds', opacity=0.5))]\napp = dash.Dash(__name__, external_stylesheets=[\"https://codepen.io/chriddyp/pen/bWLwgP.css\"])\n\n\nlayout = go.Layout(\n #scene=go.layout.Scene(\n # xaxis=go.layout.scene.XAxis(\n # showspikes=True,\n # spikecolor='#1fe5bd',\n # spikethickness=10,\n # ),\n # yaxis=go.layout.scene.YAxis(\n # showspikes=False,\n # spikecolor='#1fe5bd',\n # spikethickness=6,range=[0,200],\n # ),\n # zaxis=go.layout.scene.ZAxis(\n # showspikes=False,\n # spikecolor='black',\n # spikethickness=10,\n # ),\n #),\n height = 700,width = 700\n)\n#grouped.loc[(grouped.x==96 )&( grouped.y==140),'semb'] = 1\n#fig = go.Figure(data =data, layout=layout)\n#fig = px.scatter_3d(grouped, x = 'x', y='y',z='radius', color = 'semb',color_continuous_scale = 'Reds', opacity = 0.3)\n#fig2d = px.scatter()\n#fig.update_scenes(zaxis_autorange=\"reversed\",yaxis_autorange=\"reversed\",xaxis_autorange=\"reversed\")\ncolorsc = 0;\ntheme = {\n 'dark': True,\n 'detail': '#007439',\n 'primary': '#00EA64',\n 'secondary': '#6E6E6E',\n}\n\napp = dash.Dash()\napp.layout = html.Div([\n html.Div([\n html.Div(dcc.Graph(id = \"2dplot\"),style={'display': 'inline-block'}),\n html.Div(dcc.Graph(id = \"3dplot\",figure = px.scatter_3d()),style={'display': 'inline-block'}), \n html.Div(dcc.Slider(id = \"bar\", value = 0,max = samples.semb.max(), step = samples.semb.max()/10,min = 0,updatemode='drag',vertical=True),style={'display': 'inline-block'}),\n html.Div(dcc.Dropdown(id = \"aggregation-dropdown\",options=[{'label': 'mean', 'value': 'mean'}, {'label': 'maximum', 'value': 'max'},{'label': 'std', 'value': 'std'},{'label': 'median', 'value': 'median'}], value='mean', clearable=False),style={'display': 'inline-block', 'width': \"5%\"}),\n html.Button('Submit', id='submit-val', n_clicks=0)\n ]),\n \n dcc.Input(id=\"avrWindow\", type = \"number\", placeholder=\"window size\",min = 1,max = 10,value = 1),\n \n dcc.Graph(id = \"ImagePointView\"),\n dcc.Store(id = \"radius\")])\n \n #html.Div([dcc.Slider(id='radius',min=1,max=35,value=1,marks={str(rad): str(rad) for rad in range(1,35)},step=None,vertical=True)]\n # ,style={'display': 'inline-block'})],style={'display': 'block','height':'100%'})\n\n@app.callback(\n Output(\"ImagePointView\", \"figure\"),\n Input('3dplot', 'clickData'),\n Input('2dplot', 'clickData'))\ndef updateIP(selectedPoint,selectedPoint2):\n callbackVal = dash.callback_context.triggered[0]\n print(callbackVal)\n x=y=radius =radius= 0;\n if callbackVal['value'] is not None: \n x,y = callbackVal['value']['points'][0]['x'],callbackVal['value']['points'][0]['y']\n if ((callbackVal[\"prop_id\"]== '3dplot.clickData') ):\n radius = callbackVal['value']['points'][0]['customdata'][0]\n else:\n radius = callbackVal['value']['points'][0]['customdata']\n\n print(x,y,radius)\n target = samples[(samples.x == x)&(samples.y == y)&(samples.radius==radius)]\n title = \"semblance for x: %d, y: %d, depth: %d\" %(x,y,radius)\n fig = px.line(target,'t', 'semb',title = title)\n return(fig)\n\n@app.callback(\n Output('2dplot', 'figure'),\n Output('radius', 'data'),\n Input('3dplot', 'clickData'),\n Input('3dplot',\"figure\"),\n State(\"aggregation-dropdown\",\"value\")) \ndef update_figure(selected_rad,dfig,aggregation):\n colorsc = dfig['layout']['coloraxis']['colorscale']\n radius = 0;\n if selected_rad is not None: \n radius = selected_rad['points'][0]['z']\n filtered = grouped.loc[grouped.radius==radius];\n fig2d = go.Figure(data=go.Heatmap(\n z=filtered[aggregation],\n x=filtered[\"x\"],\n y=filtered[\"y\"],\n zmin = min(grouped[aggregation]),\n zmax = max(grouped[aggregation]),\n colorscale=colorsc,zsmooth = 'best', customdata = np.ones(filtered.size)*radius))\n fig2d.add_trace(go.Scatter(x=RGlocations[\"Xfix\"], y=RGlocations[\"Yfix\"],mode = 'markers', marker = dict(color = \"Blue\")))\n #fig2d = px.imshow(filtered.pivot_table(index = 'y', columns = 'x',values = aggregation),color_continuous_scale =colorsc,origin = \"lower\",aspect = \"auto\", labels = {'customdata':np.ones(filtered.size)*radius})\n fig2d.update_layout(title = (\"radius: %d\" %radius), xaxis = {\"title\":\"x\"},yaxis = {\"title\":\"y\"})\n title=\"Plot Title\"\n #fig.add_mesh3d(x=[0,0,int(max(grouped.x)),int(max(grouped.x))],y=[0,int(max(grouped.y)),int(max(grouped.y)),0],z=[selected_rad,selected_rad,selected_rad,selected_rad])\n return(fig2d,{'radius':radius})\n@app.callback(\n Output(\"3dplot\", \"figure\"),\n Output(\"bar\", \"max\"),\n Output(\"bar\", \"step\"),\n Input(\"submit-val\",\"n_clicks\"),\n Input(\"bar\",\"value\"),\n State(\"aggregation-dropdown\",\"value\"),\n State(\"avrWindow\",\"value\"))\ndef update_3dplot(clicks,bar, selectedAgg,window): \n\n title = \"Results are aggregated with: %s\\n semblance > %s \" %(selectedAgg, bar)\n\n fig = px.scatter_3d(grouped, x = 'x', y='y',z='radius', color = selectedAgg,color_continuous_scale = 'Reds', opacity = 0.3, title = title ,template=\"plotly_white\",size = (grouped[selectedAgg].ravel()>bar)*1, hover_data = {'customdata':grouped.radius})\n #fig = go.Figure(go.Scatter3d(x = grouped.x, y=grouped.y,z=grouped.radius ,marker=dict(size = (grouped[selectedAgg].ravel()>bar)*5,color = grouped[selectedAgg], colorscale= \"Reds\"),mode = \"markers\"))\n fig.update_scenes(zaxis_autorange=\"reversed\",yaxis_autorange=\"reversed\",xaxis_autorange=\"reversed\")\n fig.update_traces(marker = dict(line=dict(width=0)))\n return (fig, grouped[selectedAgg].max(),grouped[selectedAgg].max()/10 )\n\n\napp.run_server(debug=True, use_reloader=True) # Turn off reloader if inside Jupyter\nfig = px.scatter_3d(grouped, x = 'x', y='y',z='radius', color = selectedAgg,color_continuous_scale = 'Reds', opacity = 0.3, title = title,template=\"plotly_white\",size = (grouped[selectedAgg].ravel()>0.5)*1)\nfig.update_scenes(zaxis_autorange=\"reversed\",yaxis_autorange=\"reversed\",xaxis_autorange=\"reversed\")\nfig.update_xaxis(range = [0,2])\nfig.update_layout(xaxis=dict(rangeslider=dict(visible=True)))\nfig.show()", "repo_name": "liorstov/ctrmPublic", "sub_path": "pythonProj/3dfile.py", "file_name": "3dfile.py", "file_ext": "py", "file_size_in_byte": 7384, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "plotly.io.templates", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plotly.io", "line_number": 11, "usage_type": "name"}, {"api_name": "pandas.read_pickle", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "dash.Dash", "line_number": 24, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Layout", "line_number": 27, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 27, "usage_type": "name"}, {"api_name": "dash.Dash", "line_number": 60, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 61, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 62, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 63, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 63, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 64, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 64, "usage_type": "call"}, {"api_name": "plotly.express.scatter_3d", "line_number": 64, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 64, "usage_type": "name"}, {"api_name": "dash_html_components.Div", "line_number": 65, "usage_type": "call"}, {"api_name": "dash_core_components.Slider", "line_number": 65, "usage_type": "call"}, {"api_name": "dash_html_components.Div", "line_number": 66, "usage_type": "call"}, {"api_name": "dash_core_components.Dropdown", "line_number": 66, "usage_type": "call"}, {"api_name": "dash_html_components.Button", "line_number": 67, "usage_type": "call"}, {"api_name": "dash_core_components.Input", "line_number": 70, "usage_type": "call"}, {"api_name": "dash_core_components.Graph", "line_number": 72, "usage_type": "call"}, {"api_name": "dash_core_components.Store", "line_number": 73, "usage_type": "call"}, {"api_name": "dash.callback_context", "line_number": 83, "usage_type": "attribute"}, {"api_name": "plotly.express.line", "line_number": 96, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 96, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 79, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 80, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 81, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Figure", "line_number": 111, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 111, "usage_type": "name"}, {"api_name": "plotly.graph_objects.Heatmap", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 117, "usage_type": "call"}, {"api_name": "plotly.graph_objects.Scatter", "line_number": 118, "usage_type": "call"}, {"api_name": "plotly.graph_objects", "line_number": 118, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 100, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 101, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 102, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 103, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 104, "usage_type": "call"}, {"api_name": "plotly.express.scatter_3d", "line_number": 136, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 136, "usage_type": "name"}, {"api_name": "dash.dependencies.Output", "line_number": 125, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 126, "usage_type": "call"}, {"api_name": "dash.dependencies.Output", "line_number": 127, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 128, "usage_type": "call"}, {"api_name": "dash.dependencies.Input", "line_number": 129, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 130, "usage_type": "call"}, {"api_name": "dash.dependencies.State", "line_number": 131, "usage_type": "call"}, {"api_name": "plotly.express.scatter_3d", "line_number": 144, "usage_type": "call"}, {"api_name": "plotly.express", "line_number": 144, "usage_type": "name"}]} +{"seq_id": "73892931354", "text": "import cvlib as cv\nfrom cvlib.object_detection import draw_bbox\nimport cv2\nimport base64\nimport requests\nimport json\nimport time\nimport threading\n\ndef readData():\n url = 'https://firestore.googleapis.com/v1/projects/parkingfrontend/databases/(default)/documents/images/lotOne'\n \n #sending post request and saving response as response object \n\n while True:\n resp = requests.get(url=url)\n data = resp.json()\n imgStr = data[\"fields\"][\"imageData\"][\"stringValue\"]\n\n with open(\"imageToSave.png\", \"wb\") as fh:\n fh.write(base64.b64decode(imgStr))\n fh.close()\n time.sleep(3)\n\n\nthread1 = threading.Thread(target = readData, args = ())\nthread1.start()\n\n\n\ndef numberPost(n):\n url = \"http://hughboy.com:9999/number\"\n #url = \"https://parkingfrontend.firebaseio.com/carsNumber/data.json\"\n headers = {'Content-Type': 'application/json'}\n data = {'number':n}\t\n r = requests.post(url=url, headers=headers, data=json.dumps(data))\n\ndef numberImage(n):\n url = \"http://hughboy.com:9999/img.png\" \n #url = \"https://parkingfrontend.firebaseio.com/imageResult/data.json\"\n headers = {'Content-Type': 'application/json'}\n data = {'image':n}\t\n r = requests.post(url=url, headers=headers, data=json.dumps(data))\n\n\nwhile True:\n #read input image\n\n image = cv2.imread(\"imageToSave.png\")\n #image = cv2.imread(\"randomGooglePic.jpg\")\n\n #apply object detection\n bbox, label, conf = cv.detect_common_objects(image)\n\n\n\n #draw bounding box over detected objects\n out = draw_bbox(image, bbox, label, conf)\n cv2.imwrite('resultImage.png',image)\n with open(\"resultImage.png\", \"rb\") as image:\n bkl = base64.b64encode(image.read())\n bkl = str(bkl, 'utf-8')\n numberImage(bkl)\n image.close()\n \n # display output\n # press any key to close window \n #cv2.imshow(\"object_detection\", out)\n #cv2.waitKey()\n\n #save output to disk if needed\n\n \n countNumber = label.count(\"car\")+label.count(\"motorcycle\")+label.count(\"bus\")+label.count(\"truck\")\n numberPost(countNumber)\n print(label)\n print(\"Master Sam, I have counter that there is\",str(countNumber),\" cars!\")\n time.sleep(1)\n", "repo_name": "cachecake404/Parkinator", "sub_path": "ServerFiles/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 49, "usage_type": "call"}, {"api_name": "cvlib.detect_common_objects", "line_number": 53, "usage_type": "call"}, {"api_name": "cvlib.object_detection.draw_bbox", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 59, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "24695530037", "text": "import argparse\nfrom tkinter import SEL_FIRST\n\n\nclass ReturnType:\n def __init__(self, args):\n self.rotated = args.rotation\n self.draw = args.draw\n self.instances = [\n args.instance] if args.instance is not None else range(1, 41)\n self.encodings = [args.encoding] if args.encoding is not None else [2]\n if args.all:\n self.encodings = range(1, 5)\n\n\ndef parsArguments() -> ReturnType:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\", \"--instance\", help=\"Instance on which to run the model, default is all\", type=int)\n parser.add_argument(\n \"-r\", \"--rotation\", help=\"Flag to decide whether it is possible use rotated circuits, default is not rotated\", action='store_true')\n parser.add_argument(\n \"-d\", \"--draw\", help=\"Flag to decide whether to draw a the solution, default is false\", action='store_true')\n parser.add_argument(\n \"-e\", \"--encoding\", help=\"The encoding of constraints (1-pairwise, 2-sequential, 3-bitwise, 4-heule)\", type=int)\n parser.add_argument(\n \"-all\", \"--all\", help=\"Flag to decide whether to try all encodings\", action='store_true')\n args = parser.parse_args()\n returnType = ReturnType(args)\n return returnType\n", "repo_name": "MihailStamenov98/Combinatorial-Decision-Making-and-Optimization", "sub_path": "SAT/src/argument_parser.py", "file_name": "argument_parser.py", "file_ext": "py", "file_size_in_byte": 1266, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "12668647641", "text": "import boto3,json,tabulate,pprint,itertools,os,random,string,pwd,grp\nfrom shutil import copyfile\n\nInstances=[]\nTemplateFile=\"./templates/node_exporter_dashboard.json\"\nGrafanaDashPath=\"/var/lib/grafana/dashboards\"\nGrafanaUID=pwd.getpwnam(\"grafana\").pw_uid\nGrafanaGID=grp.getgrnam(\"grafana\").gr_gid\nMYDashPath=GrafanaDashPath+\"/mydashboards\"\n\ndef createDirNPermissions(dirin):\n if not os.path.exists(dirin):\n os.makedirs(dirin)\n os.chown(dirin,GrafanaUID,GrafanaGID)\n\ndef randomString(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\ndef replaceInFile(fin,key,val):\n with open(fin, 'r') as f:\n newlines = []\n for line in f.readlines():\n newlines.append(line.replace(key,val))\n with open(fin, 'w') as f:\n for line in newlines:\n f.write(line)\n\ncreateDirNPermissions(GrafanaDashPath)\ncreateDirNPermissions(MYDashPath)\n\nec2 = boto3.client('ec2')\nfilters = [\n {\n 'Name': 'tag:MACHINETAGKEY',\n 'Values': ['MACHINESTAGVAL']\n }\n]\nlist_instances=json.dumps(ec2.describe_instances(Filters=filters), indent=4, sort_keys=True, default=str)\ndata = json.loads(list_instances)\nidict={}\nilist=[]\n\nfor reservations in data['Reservations']:\n for instance in reservations['Instances']:\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n iName = tag['Value']\n iName = iName.replace(\" \",\"\")\n iPrivateDNS = instance['PrivateDnsName']\n dashUid = randomString(10)\n path=MYDashPath+\"/\"+iName+\"-dashboards.json\"\n copyfile(TemplateFile,path)\n os.chown(path,GrafanaUID,GrafanaGID)\n Replacements={'MACHINENAME': iName, 'MACHINEIPADDR': iPrivateDNS, 'DASHBOARDUID': dashUid}\n for k,v in Replacements.items():\n replaceInFile(path,k,v)\n", "repo_name": "wcampos/grafana_dynamic_node_exporter_dashboards", "sub_path": "generate_dasboards.py", "file_name": "generate_dasboards.py", "file_ext": "py", "file_size_in_byte": 1936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pwd.getpwnam", "line_number": 7, "usage_type": "call"}, {"api_name": "grp.getgrnam", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 13, "usage_type": "call"}, {"api_name": "os.chown", "line_number": 14, "usage_type": "call"}, {"api_name": "string.ascii_lowercase", "line_number": 17, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 18, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 53, "usage_type": "call"}, {"api_name": "os.chown", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "3374600806", "text": "\"\"\"\nmath_competition URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('register/', views.register, name='register'),\n path('logout/', views.log_out, name='logout'),\n path('login/', views.log_in, name='login'),\n path('competition//', views.comp_detail, name='comp_detail'),\n path('problem//', views.prob_detail, name='prob_detail'),\n path('competition/register//', views.register_comp, name='register_comp'),\n path('competition//standings/', views.standings, name='standings'),\n path('competition/new/', views.new_comp, name='new_comp'),\n path('competition//new/', views.new_prob, name='new_prob'),\n]", "repo_name": "xiaoyu2006/math-competition", "sub_path": "competition/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1376, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "9189323711", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[73]:\n\n\n# Importer la base de données \n\nimport pandas as pd\ndf = pd.read_csv(\"/Users/fadilamoussaoui/Desktop/Session H22/PSY4016 - Programmation/Projet/Base_de_données_travail.csv\", sep = \";\")\n\ndf\n\n\n# In[74]:\n\n\n# Nom des colonnes de la base de données \n\ndf.columns\n\n\n# In[75]:\n\n\n# Enelever les colonnes inutiles pour le projet \n\ndf_projet = df.drop(columns =['Age','Salary','Study_Level','Mood_Disorder','BMI','Sport','Music',\n 'Extraversion','Conscientiousness','Instability.Neuroticism','Openness_to_Experience.Intellect',\n 'Honesty.Humility','Detachment','Psychoticism','Negative_Affect','Antagonism'])\n\ndf_projet\n\n\n# In[76]:\n\n\n# Nom des colonnes de la base de données utiles au projet\n\ndf_projet.columns\n\n\n# In[77]:\n\n\n# Lire les données de la colonne \"Sex\"\n\nfor value in df_projet[\"Sex\"]:\n print(value)\n\n\n# In[78]:\n\n\n# Lire les données de la colonne \"Meditation\"\n\nfor value in df_projet[\"Meditation\"]:\n print(value)\n\n\n# In[79]:\n\n\n# Lire les données de la colonne \"Empathy.Agreeableness\"\n# !!! Il y a une donnée manquante \n\nfor value in df_projet[\"Empathy.Agreeableness\"]:\n print(value)\n \n\n\n# In[80]:\n\n\n# Lire les données de la colonne \"Disinhibition\"\n\nfor value in df_projet[\"Disinhibition\"]:\n print(value)\n \n\n\n# In[87]:\n\n\n# Coder la variable Méditation ( \"No\" = 0; \"Yes\" = 1)\nfor value in df_projet['Meditation'] : \n if value == \"No\":\n df_projet.replace(value, int(str(0)), inplace = True)\n \n if value == \"Yes\":\n df_projet.replace(value, int(str(1)), inplace = True)\n \n# Lire les données de la colonne \"Meditation\"\n\nfor value in df_projet[\"Meditation\"]:\n print(value)\n\n\n# In[90]:\n\n\n# Type de variables \n\nprint(type(\"Sex\"))\nprint(type(\"Meditation\"))\nprint(type(\"Empathy.Agreeableness\"))\nprint(type(\"Disinhibition\"))\n\n\n# In[ ]:\n\n\n# Transformer de chaîne à float\n\nfor col in df.columns:\n if Meditation in col:\n df_projet[Meditation] = df_projet[Meditation].astype(float)\n if Empathy.Agreeableness in col:\n df_projet[Empathy.Agreeableness] = df_projet[Empathy.Agreeableness].astype(float)\n if Disinhibition in col:\n df_projet[Disinhibition] = df_projet[Disinhibition].astype(float)\n\n\n# In[46]:\n\n\n# Gérer les données manquantes pour la colonne \"Empathy.Agreeableness\"\n\nimport sklearn \nimport seaborn\nfrom sklearn import impute\nimport numpy as np\n\nimp = sklearn.impute.SimpleImputer(missing_values = np.nan, strategy = \"mean\")\n\n\n# In[ ]:\n\n\n# Modification des valeurs manquantes pour la moyenne des scores\nimport numpy as np\nimport sklearn \nfrom sklearn import impute\nimp = sklearn.impute.SimpleImputer(missing_values = np.nan, strategy = \"mean\")\n\nfor col in [Empathy.Agreeableness]:\n \n for index in range(len(df)):\n \n for i in [df_projet[col][index]]: \n cell = i \n imp.fit(df[[col]])\n df[col] = imp.transform(df[[col]])\n\n\n# In[101]:\n\n\n# Créer les scores Z\n\ndef create_scoreZ():\n\n import numpy as np\n import sklearn \n from sklearn import preprocessing\n import pandas as pd\n scaler = sklearn.preprocessing.StandardScaler()\n\n\n\n columns = df_projet.columns.values.tolist()\n\n for col in df_projet.columns : \n \n for i in df[col]: \n x_value = df_projet[col].to_numpy() \n x_value = x_value[:, np.newaxis] \n scaled_value = scaler.fit(x_value) \n scaled_value = scaler.transform(x_value)\n \n df['Z_'+col] = scaled_value\n\n\n# In[102]:\n\n\n# Test T pour hypothèse 1\n \nimport scipy\nfrom scipy import stats\n \navec_meditation = df_projet[df_projet['Meditation'] == 'Yes'][\"Empathy.Agreeableness\"]\nsans_meditation = df_projet[df_projet['Meditation'] == 'No'][\"Empathy.Agreeableness\"]\n\nstats.ttest_ind(avec_meditation, sans_meditation)\n\nres = stats.ttest_ind(avec_meditation, sans_meditation)\nprint(res.statistic, res.pvalue)\n\n\n# In[ ]:\n\n\n# Graphique pour hypothèse 1\n\nimport seaborn as sns\nwith sns.axes_style(style='ticks'):\n g = sns.catplot(data=df_projet, x = 'Meditation', y = 'Empathy.Agreeableness', kind='box')\n g.set_axis_labels('Meditation', 'Empathy.Agreeableness')\n \n\n\n# In[44]:\n\n\n# Test T pour hypothèse 2\n\navec_meditation = df_projet[df_projet['Meditation'] == 'Yes'][\"Disinhibition\"]\nsans_meditation = df_projet[df_projet['Meditation'] == 'No'][\"Disinhibition\"]\n\nstats.ttest_ind(avec_meditation, sans_meditation)\n\nres = stats.ttest_ind(avec_meditation, sans_meditation)\nprint(res.statistic, res.pvalue)\n\n\n# In[ ]:\n\n\n# Graphique pour hypothèse 2 \n\nwith sns.axes_style(style='ticks'):\n sns.catplot(data=df_projet, x = 'Meditation', y = 'Disinhibition', kind='box')\n set_axis_labels('Meditation', 'Disinhibition') \n\n\n# In[104]:\n\n\n# Préalables AA supervisé\n\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom scipy import stats\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (8,5) \nimport seaborn as sns;\nimport sklearn\nfrom sklearn.datasets import fetch_lfw_people, make_blobs, make_circles, load_digits\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.svm import SVC \nfrom sklearn.decomposition import PCA\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import BaggingClassifier, RandomForestClassifier, RandomForestRegressor\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom IPython.display import Image\nfrom mpl_toolkits import mplot3d\nfrom ipywidgets import interact, fixed\nfrom math import sqrt\n\n\n# In[ ]:\n\n\n# AA supervisé code \n\nclass KNN_GRAPHIQUE:\n\n\tdef AA_supervise():\n\n\t\tX_data = pd.read_excel(\"/Users/fadilamoussaoui/Desktop/Session H22/PSY4016 - Programmation/Projet/Base_de_données_travail.csv\")\n\t\tX = X_data['Sex', 'Meditation', 'Empathy.Agreeableness', 'Disinhibition'].values\n\n\t\tMeditation = X_data.Meditation.to_list()\n\t\tle = LabelEncoder()\n\t\tlabel=le.fit_transform(Meditation)\n\t\ty = label\n\n\t\tknn = KNN(n_neighbors=3)\n\t\ty_pred = cross_validation_predict(knn, X, y, cv = 5)\n\n\t\tprint(sqrt(mean_squared_error(y,y_pred)))\n\t\tprint(r2_score(y,y_pred))\n\n\t\terror = []\n\t\tfor k in range(1,100):\n\t\t knn = KNN(n_neighbors=k)\n\t\t y_pred = cross_validation_predict(knn, X, y)\n\t\t error.append(mean_squared_error(y,y_pred))\n \n\n\n# In[ ]:\n\n\n# AA supervisé graphique\n\nax = plt.axes()\nax.plot(range(1,100),error, color=\"red\", linestyle=\"-\", marker=\"o\",\n markerfacecolor=\"blue\", markersize=10)\nax.set_title(\"KNN non standardisé\")\nax.set_xlabel(\"K\")\nax.set_ylabel(\"Erreur\")\nplt.show()\n\n\n# In[ ]:\n\n\n# Préalables AA non-supervisé\nimport sklearn\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nwarnings.simplefilter(\"ignore\")\nprint('sklearn version:', sklearn.__version__)\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\nfrom sklearn.decomposition import PCA\n\n\n# In[ ]:\n\n\n# AA non-supervisé code \n\ndef AA_non_supervise():\n\n X_data = pd.read_excel(\"/Users/fadilamoussaoui/Desktop/Session H22/PSY4016 - Programmation/Projet/Base_de_données_travail.csv\")\n X = X_data['Sex', 'Meditation', 'Empathy.Agreeableness', 'Disinhibition'].values\n\n x = X_data.loc[:, caractéristiques].values\n y = X_data.loc[:,[\"Meditation\"]].values\n\n x = StandardScaler().fit_transform(x)\n\n\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(x)\n Df1 = pd.DataFrame(data = principalComponents\n , columns = ['principal component 1', 'principal component 2'])\n Df2 = pd.concat([Df1, X_data[['Meditation']]], axis = 1)\n\n\n# In[ ]:\n\n\n# AA non-supervisé graphique \n\nfig = plt.figure(figsize = (8,8))\nax = fig.add_subplot(1,1,1) \nax.set_xlabel('Principal Component 1', fontsize = 15)\nax.set_ylabel('Principal Component 2', fontsize = 15)\nax.set_title('2 component PCA', fontsize = 20)\n\ntargets = ['No', 'Yes']\ncolors = ['r', 'g']\nfor target, color in zip(targets,colors):\n indicesToKeep = finalDf['Meditation'] == target\n ax.scatter(Df2.loc[indicesToKeep, 'principal component 1']\n , Df2.loc[indicesToKeep, 'principal component 2']\n , c = color\n , s = 50)\n \nprint(pca.explained_variance_ratio_)\n\n\n# In[ ]:\n\n\n# Base de données sqlite avec une pipeline\n\nimport sqlite\nsqlite.try_sqlite()\n\n", "repo_name": "fadilamoussaoui/psy4016", "sub_path": "Projet_Final_Fadila_Moussaoui.py", "file_name": "Projet_Final_Fadila_Moussaoui.py", "file_ext": "py", "file_size_in_byte": 8785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.impute", "line_number": 134, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 134, "usage_type": "attribute"}, {"api_name": "sklearn.impute.SimpleImputer", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.impute", "line_number": 144, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 144, "usage_type": "attribute"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 167, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 177, "usage_type": "attribute"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 195, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 195, "usage_type": "name"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 197, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 197, "usage_type": "name"}, {"api_name": "seaborn.axes_style", "line_number": 207, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 208, "usage_type": "call"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 221, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 221, "usage_type": "name"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 223, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 223, "usage_type": "name"}, {"api_name": "seaborn.axes_style", "line_number": 232, "usage_type": "call"}, {"api_name": "seaborn.catplot", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 249, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 249, "usage_type": "name"}, {"api_name": "pandas.read_excel", "line_number": 278, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 282, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 286, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "warnings.filterwarnings", "line_number": 320, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 321, "usage_type": "call"}, {"api_name": "sklearn.__version__", "line_number": 322, "usage_type": "attribute"}, {"api_name": "seaborn.set", "line_number": 325, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 338, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 347, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 349, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "sqlite.try_sqlite", "line_number": 383, "usage_type": "call"}]} +{"seq_id": "18393083330", "text": "import base64\nimport copy\nimport json\nimport isodate\nfrom datetime import datetime\nfrom pyld import jsonld\nimport pytz\n\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.exceptions import InvalidSignature\n\nSECURITY_CONTEXT_URL = 'https://w3id.org/security/v1'\nSECURITY_CONTEXT = {\n \"@context\": {\n \"id\": \"@id\",\n \"type\": \"@type\",\n\n \"dc\": \"http://purl.org/dc/terms/\",\n \"sec\": \"https://w3id.org/security#\",\n \"xsd\": \"http://www.w3.org/2001/XMLSchema#\",\n\n \"EcdsaKoblitzSignature2016\": \"sec:EcdsaKoblitzSignature2016\",\n \"EncryptedMessage\": \"sec:EncryptedMessage\",\n \"GraphSignature2012\": \"sec:GraphSignature2012\",\n \"LinkedDataSignature2015\": \"sec:LinkedDataSignature2015\",\n \"LinkedDataSignature2016\": \"sec:LinkedDataSignature2016\",\n \"CryptographicKey\": \"sec:Key\",\n\n \"authenticationTag\": \"sec:authenticationTag\",\n \"canonicalizationAlgorithm\": \"sec:canonicalizationAlgorithm\",\n \"cipherAlgorithm\": \"sec:cipherAlgorithm\",\n \"cipherData\": \"sec:cipherData\",\n \"cipherKey\": \"sec:cipherKey\",\n \"created\": {\"@id\": \"dc:created\", \"@type\": \"xsd:dateTime\"},\n \"creator\": {\"@id\": \"dc:creator\", \"@type\": \"@id\"},\n \"digestAlgorithm\": \"sec:digestAlgorithm\",\n \"digestValue\": \"sec:digestValue\",\n \"domain\": \"sec:domain\",\n \"encryptionKey\": \"sec:encryptionKey\",\n \"expiration\": {\"@id\": \"sec:expiration\", \"@type\": \"xsd:dateTime\"},\n \"expires\": {\"@id\": \"sec:expiration\", \"@type\": \"xsd:dateTime\"},\n \"initializationVector\": \"sec:initializationVector\",\n \"iterationCount\": \"sec:iterationCount\",\n \"nonce\": \"sec:nonce\",\n \"normalizationAlgorithm\": \"sec:normalizationAlgorithm\",\n \"owner\": {\"@id\": \"sec:owner\", \"@type\": \"@id\"},\n \"password\": \"sec:password\",\n \"privateKey\": {\"@id\": \"sec:privateKey\", \"@type\": \"@id\"},\n \"privateKeyPem\": \"sec:privateKeyPem\",\n \"publicKey\": {\"@id\": \"sec:publicKey\", \"@type\": \"@id\"},\n \"publicKeyPem\": \"sec:publicKeyPem\",\n \"publicKeyService\": {\"@id\": \"sec:publicKeyService\", \"@type\": \"@id\"},\n \"revoked\": {\"@id\": \"sec:revoked\", \"@type\": \"xsd:dateTime\"},\n \"salt\": \"sec:salt\",\n \"signature\": \"sec:signature\",\n \"signatureAlgorithm\": \"sec:signingAlgorithm\",\n \"signatureValue\": \"sec:signatureValue\"}}\n\nIDENTITY_CONTEXT_URL = 'https://w3id.org/identity/v1'\nIDENTITY_CONTEXT = {\n \"@context\": {\n \"id\": \"@id\",\n \"type\": \"@type\",\n\n \"cred\": \"https://w3id.org/credentials#\",\n \"dc\": \"http://purl.org/dc/terms/\",\n \"identity\": \"https://w3id.org/identity#\",\n \"perm\": \"https://w3id.org/permissions#\",\n \"ps\": \"https://w3id.org/payswarm#\",\n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\",\n \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\",\n \"sec\": \"https://w3id.org/security#\",\n \"schema\": \"http://schema.org/\",\n \"xsd\": \"http://www.w3.org/2001/XMLSchema#\",\n\n \"Group\": \"https://www.w3.org/ns/activitystreams#Group\",\n\n \"claim\": {\"@id\": \"cred:claim\", \"@type\": \"@id\"},\n \"credential\": {\"@id\": \"cred:credential\", \"@type\": \"@id\"},\n \"issued\": {\"@id\": \"cred:issued\", \"@type\": \"xsd:dateTime\"},\n \"issuer\": {\"@id\": \"cred:issuer\", \"@type\": \"@id\"},\n \"recipient\": {\"@id\": \"cred:recipient\", \"@type\": \"@id\"},\n \"Credential\": \"cred:Credential\",\n \"CryptographicKeyCredential\": \"cred:CryptographicKeyCredential\",\n\n \"about\": {\"@id\": \"schema:about\", \"@type\": \"@id\"},\n \"address\": {\"@id\": \"schema:address\", \"@type\": \"@id\"},\n \"addressCountry\": \"schema:addressCountry\",\n \"addressLocality\": \"schema:addressLocality\",\n \"addressRegion\": \"schema:addressRegion\",\n \"comment\": \"rdfs:comment\",\n \"created\": {\"@id\": \"dc:created\", \"@type\": \"xsd:dateTime\"},\n \"creator\": {\"@id\": \"dc:creator\", \"@type\": \"@id\"},\n \"description\": \"schema:description\",\n \"email\": \"schema:email\",\n \"familyName\": \"schema:familyName\",\n \"givenName\": \"schema:givenName\",\n \"image\": {\"@id\": \"schema:image\", \"@type\": \"@id\"},\n \"label\": \"rdfs:label\",\n \"name\": \"schema:name\",\n \"postalCode\": \"schema:postalCode\",\n \"streetAddress\": \"schema:streetAddress\",\n \"title\": \"dc:title\",\n \"url\": {\"@id\": \"schema:url\", \"@type\": \"@id\"},\n \"Person\": \"schema:Person\",\n \"PostalAddress\": \"schema:PostalAddress\",\n \"Organization\": \"schema:Organization\",\n\n \"identityService\": {\"@id\": \"identity:identityService\", \"@type\": \"@id\"},\n \"idp\": {\"@id\": \"identity:idp\", \"@type\": \"@id\"},\n \"Identity\": \"identity:Identity\",\n\n \"paymentProcessor\": \"ps:processor\",\n \"preferences\": {\"@id\": \"ps:preferences\", \"@type\": \"@vocab\"},\n\n \"cipherAlgorithm\": \"sec:cipherAlgorithm\",\n \"cipherData\": \"sec:cipherData\",\n \"cipherKey\": \"sec:cipherKey\",\n \"digestAlgorithm\": \"sec:digestAlgorithm\",\n \"digestValue\": \"sec:digestValue\",\n \"domain\": \"sec:domain\",\n \"expires\": {\"@id\": \"sec:expiration\", \"@type\": \"xsd:dateTime\"},\n \"initializationVector\": \"sec:initializationVector\",\n \"member\": {\"@id\": \"schema:member\", \"@type\": \"@id\"},\n \"memberOf\": {\"@id\": \"schema:memberOf\", \"@type\": \"@id\"},\n \"nonce\": \"sec:nonce\",\n \"normalizationAlgorithm\": \"sec:normalizationAlgorithm\",\n \"owner\": {\"@id\": \"sec:owner\", \"@type\": \"@id\"},\n \"password\": \"sec:password\",\n \"privateKey\": {\"@id\": \"sec:privateKey\", \"@type\": \"@id\"},\n \"privateKeyPem\": \"sec:privateKeyPem\",\n \"publicKey\": {\"@id\": \"sec:publicKey\", \"@type\": \"@id\"},\n \"publicKeyPem\": \"sec:publicKeyPem\",\n \"publicKeyService\": {\"@id\": \"sec:publicKeyService\", \"@type\": \"@id\"},\n \"revoked\": {\"@id\": \"sec:revoked\", \"@type\": \"xsd:dateTime\"},\n \"signature\": \"sec:signature\",\n \"signatureAlgorithm\": \"sec:signatureAlgorithm\",\n \"signatureValue\": \"sec:signatureValue\",\n \"CryptographicKey\": \"sec:Key\",\n \"EncryptedMessage\": \"sec:EncryptedMessage\",\n \"GraphSignature2012\": \"sec:GraphSignature2012\",\n \"LinkedDataSignature2015\": \"sec:LinkedDataSignature2015\",\n\n \"accessControl\": {\"@id\": \"perm:accessControl\", \"@type\": \"@id\"},\n \"writePermission\": {\"@id\": \"perm:writePermission\", \"@type\": \"@id\"}\n }\n}\n \n\n_get_values = jsonld.JsonLdProcessor.get_values\ndef _get_value(obj, key):\n try:\n return _get_values(obj, key)[0]\n # A bit more accurate since we're trying to pull a value out of a specific\n # key, and nothing exists for this one\n except IndexError:\n raise KeyError(key)\n_has_value = jsonld.JsonLdProcessor.has_value\n\n\ndef _make_simple_loader(url_map, load_unknown_urls=True,\n cache_externally_loaded=True):\n def _make_context(url, doc):\n return {\n \"contextUrl\": None,\n \"documentUrl\": url,\n \"document\": doc}\n\n # Wrap in the structure that's expected to come back from the\n # documentLoader\n _pre_url_map = {}\n _pre_url_map.update(url_map)\n _url_map = {\n url: _make_context(url, doc)\n for url, doc in _pre_url_map.items()}\n\n def loader(url):\n if url in _url_map:\n return _url_map[url]\n elif load_unknown_urls:\n doc = jsonld.load_document(url)\n # @@: Is this optimization safe in all cases?\n if isinstance(doc[\"document\"], str):\n doc[\"document\"] = json.loads(doc[\"document\"])\n _url_map[url] = doc\n return doc\n else:\n raise jsonld.JsonLdError(\n \"url not found and loader set to not load unknown URLs.\",\n {'url': url})\n\n return loader\n\n_security_context_loader = _make_simple_loader(\n {SECURITY_CONTEXT_URL: SECURITY_CONTEXT,\n IDENTITY_CONTEXT_URL: IDENTITY_CONTEXT})\n\n# @@: Shouldn't this be a mapping from these names to their actual\n# functionality? Seems kludgy to have all these if-elif-else things\n# as interspersed through the document...\n# Okay, answer is yes\n\n# TODO: Make these JsonLdErrors\n# class LdsError(jsonld.JsonLdError): pass\n# class LdsTypeError(LdsError, TypeError): pass\nclass LdsError(Exception): pass\nclass LdsTypeError(LdsError, TypeError): pass\n\ndef is_valid_uri(obj):\n \"\"\"\n Check to see if OBJ is a valid URI\n\n (or at least do the best check we can: that it's a string, and that\n it contains the ':' character.)\n \"\"\"\n return isinstance(obj, str) and \":\" in obj\n\ndef sign(document, options):\n \"\"\"\n Signs a JSON-LD document using a digital signature.\n\n - input: the JSON-LD document to be signed.\n - options: options to use:\n [privateKeyPem] A PEM-encoded private key.\n [creator] the URL to the paired public key.\n [date] an optional date to override the signature date with.\n If provided, must have an \"aware\" timezone\n (.tzinfo not None)\n [domain] an optional domain to include in the signature.\n [nonce] an optional nonce to include in the signature.\n [algorithm] the algorithm to use, eg: 'GraphSignature2012',\n 'LinkedDataSignature2015' (default: 'GraphSignature2012').\n \"\"\"\n options = copy.deepcopy(options)\n\n # TODO: The spec says privateKey, but in jsonld-signatures.js there are\n # these two separate fields...\n options[\"date\"] = options.get(\"date\") or datetime.now(pytz.utc)\n options.setdefault(\"algorithm\", \"GraphSignature2012\")\n\n if not options[\"algorithm\"] in SUITES:\n raise LdsError(\n (\"[jsig.sign] Unsupported algorithm '%s'; options.algorithm must \"\n \"be one of: %s\") % (options[\"algorithm\"], SUITES.keys()))\n\n suite = SUITES[options[\"algorithm\"]]\n options = suite.signature_munge_verify_options(options)\n\n # @@: Do we need this in the sign thing?\n sig_options = {\n \"date\": options[\"date\"]\n }\n if \"nonce\" in options:\n sig_options[\"nonce\"] = options[\"nonce\"]\n if \"domain\" in options:\n sig_options[\"domain\"] = options[\"domain\"]\n formatted = suite.format_for_signature(document, sig_options, options)\n sig_val = suite.sign_formatted(formatted, options)\n\n signature = {\n \"@context\": SECURITY_CONTEXT_URL,\n \"type\": options[\"algorithm\"],\n \"creator\": options[\"creator\"],\n \"created\": options[\"date\"],\n \"signatureValue\": sig_val}\n if \"domain\" in options:\n signature[\"domain\"] = options[\"domain\"]\n if \"nonce\" in options:\n signature[\"nonce\"] = options[\"nonce\"]\n ctx = _get_values(document, \"@context\")\n compacted = jsonld.compact(\n {\"https://w3id.org/security#signature\": signature},\n ctx, options={\n \"documentLoader\": _security_context_loader})\n\n del compacted[\"@context\"]\n \n output = copy.deepcopy(document)\n # @@: Wow, this seems like a terribly kludgy way to get that key,\n # but that's what's done in jsonld-signatures.js. I mean,\n # I guess it should work. I guess this is to avoid that the name may\n # be either expanded or compacted at this point\n signature_key = list(compacted.keys())[0]\n # TODO: support multiple signatures.\n # Same warning as in jsonld-signatures.js! ;P\n # We could put this in the suite option?\n output[signature_key] = compacted[signature_key]\n return output\n\n\ndef _basic_rsa_signature(formatted, options):\n private_key = serialization.load_pem_private_key(\n options[\"privateKeyPem\"],\n password=None,\n backend=default_backend())\n signed = private_key.sign(\n formatted,\n # I'm guessing this is the right padding function...?\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH),\n hashes.SHA256())\n return base64.b64encode(signed).decode(\"utf-8\")\n\n\ndef _getDataToHash_2012_2015(input, sig_options, options):\n # TODO: These are two separate algorithms, so we should separate them\n to_hash = \"\"\n if options[\"algorithm\"] == \"GraphSignature2012\":\n if \"nonce\" in sig_options:\n to_hash += sig_options[\"nonce\"]\n to_hash += sig_options[\"date\"]\n to_hash += input\n if \"domain\" in sig_options:\n to_hash += \"@\" + sig_options[\"domain\"]\n else:\n headers = {\n \"http://purl.org/dc/elements/1.1/created\": sig_options.get(\"date\"),\n \"https://w3id.org/security#domain\": sig_options.get(\"domain\"),\n \"https://w3id.org/security#nonce\": sig_options.get(\"nonce\")};\n # add headers in lexicographical order\n for key in sorted(headers.keys()):\n value = headers[key]\n if value is not None:\n to_hash += \"%s: %s\\n\" % (key, value)\n to_hash += input\n return to_hash.encode(\"utf-8\")\n\n\ndef _w3c_date(dt):\n # We may need to convert it to UTC\n if dt.tzinfo is not pytz.utc:\n dt = dt.astimezone(pytz.utc)\n\n return isodate.datetime_isoformat(dt)\n\n\f\n# Verification\n\ndef verify(signed_document, options):\n \"\"\"\n Signs a JSON-LD document using a digital signature.\n\n Args:\n - input: the JSON-LD document to be verified.\n - options:\n\n # TODO: Not all these are implemented yet, and some may be algorithm\n # specific\n Options:\n - publicKey(signature, options): A procedure which, if present, is called\n to retrieve the public key. Must do all validation that ownership\n correcly aligns.\n - checkNonce(nonce, options)] a procedure to check if the nonce (null\n if none) used in the signature is valid.\n - checkDomain(domain, options): a procedure to check if the domain used\n (null if none) is valid.\n - checkKey(key, options): a procedure to check if the key used to sign the\n message is trusted.\n - checkKeyOwner(owner, key, options): a procedure to check if the key's\n owner is trusted.\n - checkTimestamp: check signature timestamp (default: false).\n - maxTimestampDelta: signature must be created within a window of\n this many seconds (default: 15 minutes).\n - documentLoader(url): the document loader.\n - id the ID (full URL) of the node to check the signature of, if\n the input contains multiple signed nodes.\n \"\"\"\n options = copy.copy(options)\n loader = options.get(\"documentLoader\", _security_context_loader)\n options.setdefault(\"algorithm\", \"GraphSignature2012\")\n\n # Here's a TODO copy-pasta'ed from jsonld-signatures.js:\n # TODO: frame before getting signature, not just compact? considerations:\n # should the assumption be that the signature is on the top-level object\n # and thus framing is unnecessary?\n compacted = jsonld.compact(\n signed_document, SECURITY_CONTEXT_URL, options={\n \"documentLoader\": loader})\n\n try:\n signature = _get_values(compacted, \"signature\")[0]\n except IndexError:\n raise LdsError('[jsigs.verify] No signature found.')\n\n try:\n suite_name = _get_values(signature, \"type\")[0]\n except IndexError:\n suite_name = \"\"\n\n if not suite_name in SUITES:\n raise LdsError(\n (\"[jsigs.verify] Unsupported signature algorithm \\\"%s\\\"; \"\n \"supported algorithms are: %s\") % (suite_name,\n SUITES.keys()))\n suite = SUITES[suite_name]\n\n # TODO: Should we be framing here? According to my talks with Dave Longley\n # we probably should, though I don't know how well pyld supports framing\n # and I need to wrap my head around it better\n # @@: So here we have to extract the signature\n\n # @@: 3 before 1 and 2? Well we need it in 1 and 2 :P\n # SPEC (3): Remove any signature nodes from the default graph in\n # document and save it as signature.\n # @@: This isn't recursive, should it be? Also it just handles\n # one value for now.\n # SPEC (2): Let document be a copy of signed document. \n document = copy.deepcopy(compacted)\n signature = document.pop(\"signature\")\n\n # SPEC (1): Get the public key by dereferencing its URL identifier\n # in the signature node of the default graph of signed document.\n # @@: Rest of SPEC(1) in _get_public_key\n get_public_key = options.get(\"publicKey\", _get_public_key)\n public_key = get_public_key(signature, options)\n\n # SPEC (5): Create a value tbv that represents the data to be\n # verified, and set it to the result of running the Create Verify\n # Hash Algorithm, passing the information in signature.\n # TODO: This doesn't look like the same verification step\n # being done in the signature step as ported from jsonld-signatures.js\n # It looks like what step we do here should be farmed out depending\n # on the signature suite used.\n # @@: Maybe sig_options should be munged by the suite?\n sig_options = {}\n if \"publicKeyPem\" in public_key:\n sig_options[\"publicKeyPem\"] = _get_value(public_key, \"publicKeyPem\")\n if \"publicKeyWif\" in public_key:\n sig_options[\"publicKeyWif\"] = _get_value(public_key, \"publicKeyWif\")\n if \"nonce\" in signature:\n sig_options[\"nonce\"] = _get_value(signature, \"nonce\")\n if \"domain\" in signature:\n sig_options[\"domain\"] = _get_value(signature, \"domain\")\n # @@: Why isn't this also \"created\"?\n sig_options[\"date\"] = _get_value(signature, \"created\")\n\n tbv = suite.format_for_signature(document, sig_options, options)\n\n # SPEC (6): Pass the signatureValue, tbv, and the public key to\n # the signature algorithm (e.g. JSON Web Signature using\n # RSASSA-PKCS1-v1_5 algorithm). Return the resulting boolean\n # value.\n return suite.verify_formatted(signature, tbv, public_key, options)\n\n\ndef _get_public_key(signature, options):\n def _id_of(obj):\n if isinstance(obj, str):\n return obj\n return obj.get(\"@id\") or obj.get(\"id\")\n\n creator_id = _id_of(_get_value(signature, \"creator\"))\n if not creator_id:\n raise LdsError(\n '[jsigs.verify] creator not found on signature.')\n\n creator = _get_security_compacted_jsonld(creator_id, options)\n if not \"publicKey\" in creator:\n raise LdsError(\n '[jsigs.verify] publicKey not found on creator object')\n\n # @@: What if it's a fragment identifier on an embedded object?\n public_key_id = _get_value(creator, \"publicKey\")\n public_key = _get_security_compacted_jsonld(\n public_key_id, options)\n\n owners = _get_values(public_key, \"owner\")\n\n # SPEC (1): Confirm that the linked data document that describes\n # the public key specifies its owner and that its owner's URL\n # identifier can be dereferenced to reveal a bi-directional link\n # back to the key.\n if not creator_id in owners:\n raise LdsError(\n '[jsigs.verify] The public key is not owned by its declared owner.')\n\n # SPEC (1): Ensure that the key's owner is a trusted entity before\n # proceeding to the next step.\n check_key_owner = options.get(\"checkKeyOwner\")\n if check_key_owner and not check_key_owner(signature, public_key, options):\n raise LdsError(\n '[jsigs.verify] The owner of the public key is not trusted.')\n\n return public_key\n\n\ndef _security_compact(document, options):\n loader = options.get(\"documentLoader\", _security_context_loader)\n return jsonld.compact(document, SECURITY_CONTEXT_URL,\n options={\"documentLoader\": loader})\n\ndef _get_jsonld(id, options):\n if isinstance(id, dict):\n id = id.get(\"id\") or id.get(\"@id\")\n if not id:\n raise ValueError(\"Tried to fetch object with no id: %s\" % id)\n loader = options.get(\"documentLoader\", _security_context_loader)\n return loader(id)[\"document\"]\n\ndef _get_security_compacted_jsonld(id, options):\n return _security_compact(_get_jsonld(id, options), options)\n\n\n# TODO: Are we actually passing in multiple aglgorithms for message\n# canonicalization *and* message digest?\ndef create_verify_hash(document, suite, options,\n options_to_canonicalize):\n \"\"\"\n \n \"\"\"\n normalized_input = suite.normalize_jsonld(document, options)\n\n # SPEC (1): Let options be a copy of input options.\n options_to_canonicalize = copy.deepcopy(options_to_canonicalize)\n\n # SPEC (2): If type, id, or signatureValue exists in options,\n # remove the entry.\n # @@: Well since we're specifically passing these in to this procedure\n # I guess we don't need to do that...\n\n # SPEC (3): If created does not exist in options, add an entry\n # with a value that is an ISO8601 combined date and time string\n # containing the current date and time accurate to at least one\n # second, in Universal Time Code format. For example:\n # 2017-11-13T20:21:34Z.\n if not \"created\" in options_to_canonicalize:\n options_to_canonicalize[\"created\"] = _w3c_date(datetime.now(pytz.utc))\n\n # SPEC (4): Generate output by: \n # SPEC (4.1): Creating a canonicalized options document by\n # canonicalizing options according to the canonicalization\n # algorithm (e.g. the GCA2015 [RDF-DATASET-NORMALIZATION]\n # algorithm). \n # Well, we need to add the context first:\n options_to_canonicalize[\"@context\"] = SECURITY_CONTEXT_URL\n canonical_options = suite.normalize_jsonld(\n options_to_canonicalize, options)\n\n # SPEC (4.2): Hash canonicalized options document using the\n # message digest algorithm (e.g. SHA-256) and set output to the\n # result.\n output = suite.message_digest(canonical_options, options)\n\n # SPEC (4.3): Hash canonicalized document using the message digest\n # algorithm (e.g. SHA-256) and append it to output.\n output += suite.message_digest(normalized_input, options)\n\n # SPEC (5): Hash output using the message digest algorithm\n # (e.g. SHA-256) and replace it with the result.\n output = suite.message_digest(output, options)\n\n # SPEC (6): Return output. \n return output\n\ndef _rsa_verify_sig(sig_value, formatted, public_key_jsonld):\n \"\"\"\n - sig_value: data to be verified\n - public_key: creator of this document's public_key \n - tbv: to be verified\n \"\"\"\n # TODO: Support other formats than just PEM\n public_key = serialization.load_pem_public_key(\n _get_value(public_key_jsonld, \"publicKeyPem\").encode(\"utf-8\"),\n backend=default_backend())\n\n try:\n public_key.verify(\n base64.b64decode(sig_value.encode(\"utf-8\")), formatted,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH),\n hashes.SHA256())\n return True\n except InvalidSignature:\n return False\n\n\f\n# In the future, we'll be doing a lot more work based on what suite is\n# selected.\n\ndef signature_common_munge_verify(options):\n if not is_valid_uri(options[\"creator\"]):\n raise LdsTypeError(\n \"[jsig.sign] options.creator must be a URL string.\")\n\n if \"domain\" in options and not is_valid_uri(options[\"domain\"]):\n raise LdsTypeError(\n \"[jsig.sign] options.domain must be a string.\")\n\n if \"nonce\" in options and not is_valid_uri(options[\"nonce\"]):\n raise LdsTypeError(\n \"[jsig.sign] options.nonce must be a string.\")\n\n if not isinstance(options[\"date\"], str):\n options[\"date\"] = _w3c_date(options[\"date\"])\n\n return options\n\nclass SignatureSuite():\n name = None\n\n @classmethod\n def signature_munge_verify_options(cls, options):\n options = signature_common_munge_verify(options)\n return options\n\n @classmethod\n def normalize_jsonld(cls, document, options):\n raise NotImplementedError()\n\n @classmethod\n def format_for_signature(cls, document, sig_options, options):\n raise NotImplementedError()\n\n @classmethod\n def sign_formatted(cls, formatted, options):\n raise NotImplementedError()\n\n @classmethod\n def verify_formatted(cls, formatted, options):\n raise NotImplementedError()\n\n\ndef _format_gs_2012_ld_2015(suite, document, sig_options, options):\n normalized = suite.normalize_jsonld(document, options)\n\n if len(normalized) == 0:\n raise LdsError(\n ('[jsig.sign] '\n 'The data to sign is empty. This error may be because a '\n '\"@context\" was not supplied in the input thereby causing '\n 'any terms or prefixes to be undefined. '\n 'Input: %s') % (json.dumps(document)))\n return _getDataToHash_2012_2015(normalized, sig_options, options)\n\n\nclass GraphSignature2012(SignatureSuite):\n name = \"GraphSignature2012\"\n\n @classmethod\n def format_for_signature(cls, document, sig_options, options):\n return _format_gs_2012_ld_2015(cls, document, sig_options, options)\n\n @classmethod\n def normalize_jsonld(self, document, options):\n return jsonld.normalize(\n document,\n {\"algorithm\": \"URGNA2012\",\n \"format\": \"application/nquads\",\n \"documentLoader\": options.get(\"documentLoader\",\n _security_context_loader)})\n\n @classmethod\n def sign_formatted(cls, formatted, options):\n return _basic_rsa_signature(formatted, options)\n\n @classmethod\n def verify_formatted(cls, signature, formatted, public_key_jsonld, options):\n return _rsa_verify_sig(\n _get_value(signature, \"signatureValue\"),\n formatted, public_key_jsonld)\n\n\nclass LinkedDataSignature2015(SignatureSuite):\n name = \"LinkedDataSignature2015\"\n\n @classmethod\n def normalize_jsonld(cls, document, options):\n return jsonld.normalize(\n document, {\"algorithm\": \"URDNA2015\",\n \"format\": \"application/nquads\"})\n\n @classmethod\n def format_for_signature(cls, document, sig_options, options):\n return _format_gs_2012_ld_2015(cls, document, sig_options, options)\n\n @classmethod\n def sign_formatted(cls, formatted, options):\n return _basic_rsa_signature(formatted, options)\n\n\nclass EcdsaKoblitzSignature2016(SignatureSuite):\n name = \"EcdsaKoblitzSignature2016\"\n\n @classmethod\n def signature_munge_verify_options(cls, options):\n options = signature_common_munge_verify(options)\n\n if not isinstance(options.get(\"privateKeyWif\", str)):\n raise LdsTypeError(\n \"[jsig.sign] options.privateKeyWif must be a base 58 \"\n \"formatted string.\")\n elif not isinstance(options.get(\"privateKeyPem\"), str):\n raise LdsTypeError(\n \"[jsig.sign] options.privateKeyPem must be a PEM \"\n \"formatted string.\")\n\n return options\n\nclass LinkedDataSignature2016(SignatureSuite):\n name = \"LinkedDataSignature2016\"\n \n\n\n\nSUITES = {\n s.name: s\n for s in [GraphSignature2012,\n LinkedDataSignature2015,\n # EcdsaKoblitzSignature2016,\n ]}\n\n", "repo_name": "Spec-Ops/pyld-signatures", "sub_path": "pyld_sig/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 27253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyld.jsonld.JsonLdProcessor", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pyld.jsonld", "line_number": 153, "usage_type": "name"}, {"api_name": "pyld.jsonld.JsonLdProcessor", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pyld.jsonld", "line_number": 161, "usage_type": "name"}, {"api_name": "pyld.jsonld.load_document", "line_number": 184, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 184, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 187, "usage_type": "call"}, {"api_name": "pyld.jsonld.JsonLdError", "line_number": 191, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 191, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 241, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 241, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 241, "usage_type": "attribute"}, {"api_name": "pyld.jsonld.compact", "line_number": 274, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 274, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 281, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization.load_pem_private_key", "line_number": 295, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 295, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 298, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS", "line_number": 302, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 302, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1", "line_number": 303, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 303, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.hashes.SHA256", "line_number": 303, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.hashes", "line_number": 303, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS", "line_number": 304, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 304, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.hashes.SHA256", "line_number": 305, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.hashes", "line_number": 305, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 306, "usage_type": "call"}, {"api_name": "pytz.utc", "line_number": 335, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 336, "usage_type": "attribute"}, {"api_name": "isodate.datetime_isoformat", "line_number": 338, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 372, "usage_type": "call"}, {"api_name": "pyld.jsonld.compact", "line_number": 380, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 380, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 412, "usage_type": "call"}, {"api_name": "pyld.jsonld.compact", "line_number": 493, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 493, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 518, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 531, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 531, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 531, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.serialization.load_pem_public_key", "line_number": 566, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.serialization", "line_number": 566, "usage_type": "name"}, {"api_name": "cryptography.hazmat.backends.default_backend", "line_number": 568, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 572, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS", "line_number": 573, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 573, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.MGF1", "line_number": 574, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 574, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.hashes.SHA256", "line_number": 574, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.hashes", "line_number": 574, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding.PSS", "line_number": 575, "usage_type": "attribute"}, {"api_name": "cryptography.hazmat.primitives.asymmetric.padding", "line_number": 575, "usage_type": "name"}, {"api_name": "cryptography.hazmat.primitives.hashes.SHA256", "line_number": 576, "usage_type": "call"}, {"api_name": "cryptography.hazmat.primitives.hashes", "line_number": 576, "usage_type": "name"}, {"api_name": "cryptography.exceptions.InvalidSignature", "line_number": 578, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 637, "usage_type": "call"}, {"api_name": "pyld.jsonld.normalize", "line_number": 650, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 650, "usage_type": "name"}, {"api_name": "pyld.jsonld.normalize", "line_number": 673, "usage_type": "call"}, {"api_name": "pyld.jsonld", "line_number": 673, "usage_type": "name"}]} +{"seq_id": "69997854236", "text": "#!/usr/bin/env python3\n\n\"\"\"\nExecutable. \nPlot the residuals from an OpenFOAM run. \nFirst run the following OpenFOAM utilities in the OpenFOAM case directory:\n >> foamLog \nWhere is the file where the log (screen output) was saved\n(redirected, e.g. log.simpleFoam in \"simpleFoam >> log.simpleFoam\")\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n# dir = input('logs directory: ')\ndir = 'logs'\n\nfiles = [\n 'Ux_0',\n 'Uy_0',\n 'Uz_0',\n 'p_0',\n 'k_0',\n 'epsilon_0',\n 'omega_0',\n 'Uax_0',\n 'Uay_0',\n 'Uaz_0',\n 'pa_0'\n ]\n\nplt.figure()\nfor file in files:\n try:\n data = np.loadtxt(os.path.join(dir, file))\n plt.semilogy(data[:, 0], data[:, 1], label=file, alpha=0.5)\n except:\n pass\nplt.legend()\n\nplt.savefig('residuals.pdf')\nplt.savefig('residuals.png')\nplt.show()", "repo_name": "xiaoh/DAFI", "sub_path": "ensemble-learning/code/utilities/plot_residuals.py", "file_name": "plot_residuals.py", "file_ext": "py", "file_size_in_byte": 860, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 38, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.semilogy", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "29272129260", "text": "import os\nimport logging\nfrom http.server import HTTPServer\nfrom http.server import BaseHTTPRequestHandler\n\n\nlogging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n\nCRLF = '\\r\\n'\n\n\nhome_page_content = b\"\"\nwith open(os.path.join(\"templates\", \"index.html\"), 'rb') as f:\n home_page_content = f.read()\n\ndef map_path_to_file(path):\n return path[1:].replace('/', '\\\\')\n\n\nclass ContentLengthFrontEndServer(BaseHTTPRequestHandler):\n def _set_headers(self):\n self.send_response(200)\n self.end_headers()\n\n def do_GET(self):\n self._set_headers()\n file = map_path_to_file(self.path)\n if os.path.exists(file):\n with open(file, 'rb') as f:\n self.wfile.write(f.read())\n else:\n self.wfile.write(home_page_content)\n\n def do_POST(self):\n self._set_headers()\n\n request_body = \"\"\n chunk_size = self.read_chunk_size()\n while chunk_size != 0:\n request_body += self.read_chunk(chunk_size)\n chunk_size = self.read_chunk_size()\n terminator = self.rfile.read(len(CRLF)).decode()\n request_body += terminator\n\n self.wfile.write(b\"It works!\")\n \n def read_chunk_size(self):\n line = self.rfile.readline().strip()\n return int(line, 16)\n \n def read_chunk(self, chunk_size):\n chunk = self.rfile.read(chunk_size).decode()\n chunk += self.rfile.read(len(CRLF)).decode()\n return chunk\n\n\ndef run(address):\n logging.info(\"server started on {}\".format(':'.join(map(lambda x: str(x), address))))\n httpd = HTTPServer(address, ContentLengthFrontEndServer)\n httpd.serve_forever()\n\nif __name__ == \"__main__\":\n from sys import argv\n\n if len(argv) == 2:\n run(('', int(argv[1])))\n\n elif len(argv) == 3:\n run((argv[1], int(argv[2])))\n \n else:\n run(('', 8081))\n", "repo_name": "blabla1337/skf-labs", "sub_path": "python/HTTP-desync-CLTE-backend-server/http-desync-clte-backend-server.py", "file_name": "http-desync-clte-backend-server.py", "file_ext": "py", "file_size_in_byte": 1894, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 409, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.basicConfig", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 58, "usage_type": "call"}, {"api_name": "http.server.HTTPServer", "line_number": 59, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 66, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 68, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "6630633407", "text": "import pygame\n\nclass Wavepoint():\n\n\n def __init__(self, color, field, index):\n self.color = color\n self.window = field\n self.center = [0, 0]\n self.index = index\n\n\n def setPoint(self):\n end_loop = False\n coordinate = [0, 0]\n\n #draw circles that follow the mouse\n while not end_loop:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n\n coordinate = pygame.mouse.get_pos()\n pygame.draw.circle(self.window.return_screen(), self.color, coordinate, 5)\n self.window.background_refresh()\n\n if pygame.key.get_pressed()[13]:\n end_loop = True\n\n\n #update the class variables\n self.center = coordinate\n self.window.clear()\n\n\n\n def draw_point(self):\n pygame.draw.circle(self.window.return_screen(), self.color, self.center, 5)\n self.window.background_refresh()\n\n\n def return_position(self):\n return self.center\n\n\n", "repo_name": "Vergil-SLI/2022-pathing-simulation", "sub_path": "wavepoint.py", "file_name": "wavepoint.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pygame.event.get", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "3632819395", "text": "import argparse\nimport hashlib\nimport os\nimport sys\n\nimport prestodb\n\n\n# perf script -s perf-script.py -F+srcline --full-source-path --strip-cxx-templates\n\nBATCH_SIZE = 50\n\n\ndef get_random_table_name():\n return \"T\" + hashlib.sha1(os.urandom(512)).hexdigest()\n\n\nclass CLI:\n def __init__(self, args):\n self.args = args\n\n if self.args.table is None:\n self.args.table = get_random_table_name()\n\n def trace_begin(self):\n connection = prestodb.dbapi.connect(\n host=self.args.host,\n port=self.args.port,\n user=\"perf\",\n catalog=\"memory\",\n schema=\"default\",\n )\n\n self.cursor = connection.cursor()\n self.cursor.execute(\n f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.args.table} (\n event VARCHAR,\n timestamp BIGINT, -- TODO: Figure out how to get real time timestamps.\n comm VARCHAR,\n stack ARRAY(VARCHAR),\n srclines ARRAY(VARCHAR)\n )\n \"\"\"\n )\n if self.cursor.fetchone()[0]:\n print(f\"New table created: {self.args.table}\")\n else:\n print(f\"Using existing table: {self.args.table}\")\n\n self.count = 0\n self.batch = []\n\n def process_event(self, event):\n assert \"callchain\" in event\n\n stack = []\n srclines = []\n for frame in reversed(event[\"callchain\"]):\n stack.append(frame.get(\"sym\", {}).get(\"name\", \"[unknown]\"))\n srcline = \"[unknown]\"\n if \"sym_srcline\" in frame:\n # There is a colon in the path, but realpath will end up ignoring it.\n srcline = os.path.realpath(frame[\"sym_srcline\"])\n srclines.append(srcline)\n\n self.batch.append(\n f\"\"\"(\n '{event[\"ev_name\"]}',\n {event.get(\"sample\", {}).get(\"time\", 0)},\n '{event[\"comm\"]}',\n ARRAY{stack},\n ARRAY{srclines}\n )\n \"\"\"\n )\n\n self.count += 1\n\n if self.count % BATCH_SIZE == 0:\n self.__batch()\n print(f\"Processed {self.count} rows\")\n\n def trace_end(self):\n if len(self.batch) > 0:\n self.__batch()\n self.cursor.close()\n print(f\"Inserted {self.count} rows into {self.args.table}\")\n\n def __batch(self):\n self.cursor.execute(\n f\"INSERT INTO {self.args.table} VALUES {','.join(self.batch)}\"\n )\n count = self.cursor.fetchone()[0]\n assert count == len(self.batch)\n self.batch.clear()\n\n\nif __name__ == \"__main__\":\n if \"PERF_EXEC_PATH\" not in os.environ:\n print(\"This must be run with perf script, e.g. perf script -s perf-script.py\")\n sys.exit(1)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--host\", metavar=\"HOST\", type=str, default=\"localhost\")\n parser.add_argument(\"--port\", metavar=\"PORT\", type=int, default=8080)\n parser.add_argument(\n \"-t\",\n \"--table\",\n type=str,\n default=None,\n help=\"Insert entries into a specific table\",\n )\n\n cli = CLI(parser.parse_args())\n\n trace_begin = cli.trace_begin\n process_event = cli.process_event\n trace_end = cli.trace_end\n", "repo_name": "nicovank/microperf", "sub_path": "perf-script.py", "file_name": "perf-script.py", "file_ext": "py", "file_size_in_byte": 3310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "hashlib.sha1", "line_number": 15, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 15, "usage_type": "call"}, {"api_name": "prestodb.dbapi.connect", "line_number": 26, "usage_type": "call"}, {"api_name": "prestodb.dbapi", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 100, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 102, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "32205770826", "text": "# -*- coding : utf-8 -*-\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import precision_recall_curve\nfrom sklearn.feature_extraction import DictVectorizer\nimport matplotlib.pyplot as plt\nfrom chap_08 import ch08_78\n\"79. 適合率-再現率グラフの描画\"\n\n\ndef main():\n X_text,y = ch08_78.make_x_y_text()\n vec = DictVectorizer(sparse=False)\n X_array = vec.fit_transform(X_text)\n X_train, X_test, y_train, y_test = train_test_split(X_array, y, test_size=0.2)\n lr = LogisticRegression()\n lr.fit(X_train, y_train)\n y_score = lr.decision_function(X_test)\n average_precision = average_precision_score(y_test, y_score)\n precision, recall, _ = precision_recall_curve(y_test, y_score)\n\n # グラフ設定\n plt.step(recall, precision, color='b', alpha=0.2, where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AUC={0:0.2f}'.format(average_precision))\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "shihono/100-knock", "sub_path": "chap_08/ch08_79.py", "file_name": "ch08_79.py", "file_ext": "py", "file_size_in_byte": 1254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chap_08.ch08_78.make_x_y_text", "line_number": 13, "usage_type": "call"}, {"api_name": "chap_08.ch08_78", "line_number": 13, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.metrics.average_precision_score", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.step", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "27132683488", "text": "import argparse\nimport os\n\nimport gym\nimport numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\n\nfrom network_models.policy_net import Policy_net\n\n\ndef argparser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--modeldir\", help=\"\", default=\"trained_models\")\n parser.add_argument(\"--alg\", help=\"\", default=\"gail\")\n parser.add_argument(\"--model\", help=\"\", default=\"\")\n parser.add_argument(\"--logdir\", help=\"\", default=\"log/test\")\n parser.add_argument(\"--iteration\", help=\"\", default=int(1e3))\n parser.add_argument(\"--stochastic\", help=\"\", action=\"store_false\")\n parser.add_argument(\"--gpu_num\", help=\"specify GPU number\", default=\"0\", type=str)\n return parser.parse_args()\n\n\ndef main(args):\n # prepare log dir\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n # gym環境作成\n env = gym.make(\"CartPole-v0\")\n # policy net\n Policy = Policy_net(\"policy\", env)\n # tensorflow saver\n saver = tf.train.Saver()\n\n # session config\n config = tf.ConfigProto(\n gpu_options=tf.GPUOptions(visible_device_list=args.gpu_num, allow_growth=True)\n )\n # start session\n with tf.Session(config=config) as sess:\n # summary writer\n writer = tf.summary.FileWriter(args.logdir, sess.graph)\n # Sessionの初期化\n sess.run(tf.global_variables_initializer())\n # 学習済みモデルの読み込み\n if args.model == \"\":\n saver.restore(sess, args.modeldir + \"/\" + args.alg + \"/\" + \"model.ckpt\")\n else:\n # モデル番号の選択\n saver.restore(\n sess, args.modeldir + \"/\" + args.alg + \"/\" + \"model.ckpt-\" + args.model\n )\n # 状��の初期化\n obs = env.reset()\n success_num = 0\n\n # episode loop\n for iteration in tqdm(range(args.iteration)):\n rewards = []\n run_policy_steps = 0\n # run episode\n while True:\n run_policy_steps += 1\n # prepare to feed placeholder Policy.obs\n # ネットワーク入力用にobsを変換\n obs = np.stack([obs]).astype(dtype=np.float32)\n # 行動と価値を推定\n act, _ = Policy.act(obs=obs, stochastic=args.stochastic)\n\n # 要素数が1の配列をスカラーに変換\n act = np.asscalar(act)\n\n # policy netで推定した行動で状態の更新\n next_obs, reward, done, info = env.step(act)\n\n # episodeの各変数を追加\n rewards.append(reward)\n\n # episode終了判定\n # episodeが終了していたら次のepisodeを開始\n if done:\n obs = env.reset()\n reward = -1\n break\n else:\n obs = next_obs\n\n # summary追加\n writer.add_summary(\n tf.Summary(\n value=[\n tf.Summary.Value(\n tag=\"episode_length\", simple_value=run_policy_steps\n )\n ]\n ),\n iteration,\n )\n writer.add_summary(\n tf.Summary(\n value=[\n tf.Summary.Value(\n tag=\"episode_reward\", simple_value=sum(rewards)\n )\n ]\n ),\n iteration,\n )\n\n # episode成功判定\n if sum(rewards) >= 195:\n success_num += 1\n # 連続で100回成功していればepisode loopを終了\n if success_num >= 100:\n print(\"Iteration: \", iteration)\n print(\"Clear!!\")\n break\n else:\n success_num = 0\n\n writer.close()\n\n\nif __name__ == \"__main__\":\n args = argparser()\n main(args)\n", "repo_name": "ykamikawa/tensorflow-gail", "sub_path": "test_policy.py", "file_name": "test_policy.py", "file_ext": "py", "file_size_in_byte": 4064, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 27, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 29, "usage_type": "call"}, {"api_name": "network_models.policy_net.Policy_net", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 33, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.GPUOptions", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.summary.FileWriter", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.summary", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 44, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.asscalar", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.Summary.Value", "line_number": 92, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 92, "usage_type": "attribute"}, {"api_name": "tensorflow.Summary", "line_number": 100, "usage_type": "call"}, {"api_name": "tensorflow.Summary.Value", "line_number": 102, "usage_type": "call"}, {"api_name": "tensorflow.Summary", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "17366493431", "text": "import requests\nimport time\nimport os\n\ndef fotoCek():\n os.system(\"fswebcam --save a.jpg\")\n files = {'file': open('a.jpg', 'rb')}\n z = requests.post(\"https://455.eu/\", files=files)\n print (z.content)\n\nwhile True:\n sT = open(\"x.txt\", \"r\")\n gf = sT.read()\n sT.close()\n\n lastFoto = requests.get(\"http://455.eu/static/t.txt\").content\n\n if gf in str(lastFoto):\n print (\"same\")\n else:\n print (lastFoto)\n saveTime = open(\"x.txt\", 'w')\n saveTime.write(lastFoto)\n saveTime.close()\n fotoCek()\n", "repo_name": "niyoseris/flaskcam", "sub_path": "client-side.py", "file_name": "client-side.py", "file_ext": "py", "file_size_in_byte": 554, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.system", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "41595323335", "text": "# coding:utf-8\nimport time\nimport urllib\nimport urllib2\nimport json\nimport requests\nimport StringIO\nimport gzip\nfrom poster.encode import multipart_encode\nfrom poster.streaminghttp import register_openers\nfrom protos import log_request_pb2\n\n# register_openers()\n# datagen, headers = multipart_encode()\n# print datagen, headers\n\nurl = \"http://192.168.9.41:8080/m/v1/log/\"\nlr = log_request_pb2.LogRequest()\n# data = {\"sysVersion\": \"6.0\", \"apkVersion\": \"1.2.3\",\n# \"channelID\": \"843984932\", \"versionCode\": \"1\", \"data\": \"item0\"}\ndata = \"item2\"\narg = {\n \"application_id\": \"com.apkpure.aegon\",\n \"client_version\": int(883),\n \"device_brand\": \"google\",\n \"device_model\": \"Nexus 5\",\n \"flavor\": \"aa\",\n \"sdk_version\": int(23),\n # \"supported_abis[]\": [\"armeabi-v7a\", \"armeabi\"]\n # \"supported_abis\": [\"armeabi-v7a\", \"armeabi\", \"22\", \"33\"]\n}\n\n\ndef dataGzip(data):\n s = StringIO.StringIO()\n g = gzip.GzipFile(fileobj=s, mode='w')\n g.write(data)\n g.close()\n gzipped_body = s.getvalue()\n # print('len:',s.len)\n request_body = gzipped_body\n return request_body\n\n\ndef init():\n global lr\n for i in range(0, 20):\n l = log_request_pb2.Log()\n l.type = str(i)\n # l.logData = json.dumps(data)\n l.logData = str(data)\n # l.delayTimeMs = int(time.time())\n l.delayTimeMs = int(10)\n # l.delayTimeMs = 128\n # print(l.delayTimeMs)\n lr.logs._values.append(l)\n\n\ndef test_zip(lr):\n if not isinstance(lr, log_request_pb2.LogRequest):\n return\n s = lr.SerializeToString()\n # arg[\"argument\"] = s\n global url\n url = url + \"?\" + urllib.urlencode(arg)\n print(\"url:\", url)\n req = urllib2.Request(url)\n req.add_header('Accept-encoding', 'gzip')\n # 发送压缩后数据\n print(\"slenunzip:\", len(s))\n s = dataGzip(s)\n print(\"s:\", s)\n print(\"slenzip:\", len(s))\n resp = requests.post(url, s, headers=req.headers)\n print(resp.content)\n print(resp)\n\n\ndef test_zip_arg(lr):\n if not isinstance(lr, log_request_pb2.LogRequest):\n return\n s = lr.SerializeToString()\n global url\n url = url + \"?\" + urllib.urlencode(arg)\n url = \"http://localhost:8080/m/v1/log/?\" \\\n \"application_id=com.apkpure.aegon&device_model=Nexus%205&\" \\\n \"supported_abis=%27armeabi-v7a%27&supported_abis=%20%27armeabi%27&supported_abis=%20%2722%27&\" \\\n \"sdk_version=23&device_brand=google&client_version=111&flavor=aa\"\n req = urllib2.Request(url)\n print(\"url:\", url)\n req.add_header('Accept-encoding', 'gzip')\n s = dataGzip(s)\n resp = requests.post(url, s, headers=req.headers)\n print(resp.content)\n print(resp)\n\n\ndef test_zip_arg_json(lr):\n if not isinstance(lr, log_request_pb2.LogRequest):\n return\n # delay time\n # for l in lr.logs._values:\n #\n s = lr.SerializeToString()\n global url\n # url = \"{}?arg={}\".format(url,json.dumps(arg))\n req = urllib2.Request(url)\n req.add_header('Accept-encoding', 'gzip')\n # 发送压缩后数据\n s = dataGzip(s)\n resp = requests.post(url, s, headers=req.headers)\n print(resp.content)\n print(resp)\n pass\n\n\ndef main():\n global lr\n init()\n # test_zip(lr)\n test_zip_arg(lr)\n # test_zip_arg_son(lr)\n\nif __name__ == '__main__':\n main()\n", "repo_name": "tphyhFighting/docker", "sub_path": "protobuf/python/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3301, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "protos.log_request_pb2.LogRequest", "line_number": 18, "usage_type": "call"}, {"api_name": "protos.log_request_pb2", "line_number": 18, "usage_type": "name"}, {"api_name": "StringIO.StringIO", "line_number": 35, "usage_type": "call"}, {"api_name": "gzip.GzipFile", "line_number": 36, "usage_type": "call"}, {"api_name": "protos.log_request_pb2.Log", "line_number": 48, "usage_type": "call"}, {"api_name": "protos.log_request_pb2", "line_number": 48, "usage_type": "name"}, {"api_name": "protos.log_request_pb2.LogRequest", "line_number": 60, "usage_type": "attribute"}, {"api_name": "protos.log_request_pb2", "line_number": 60, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 67, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 74, "usage_type": "call"}, {"api_name": "protos.log_request_pb2.LogRequest", "line_number": 80, "usage_type": "attribute"}, {"api_name": "protos.log_request_pb2", "line_number": 80, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 84, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 89, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 93, "usage_type": "call"}, {"api_name": "protos.log_request_pb2.LogRequest", "line_number": 99, "usage_type": "attribute"}, {"api_name": "protos.log_request_pb2", "line_number": 99, "usage_type": "name"}, {"api_name": "urllib2.Request", "line_number": 107, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "21228795248", "text": "import numpy as np\r\nimport pandas as pd\r\n\r\ntime = 200\r\nd_mu = 100\r\nd_std = 25\r\nd = np.maximum(np.random.normal(d_mu, d_std, time).round(0).astype(int),0)\r\n\r\nfrom scipy.stats import norm\r\nL, R, alpha = 4, 1, 0.95\r\nz = norm.ppf(alpha)\r\nx_std = np.sqrt(L+R)*d_std\r\n\r\nSs = np.round(x_std*z).astype(int)\r\nCs = 1/2 * d_mu * R\r\nIs = d_mu * L\r\nS = Ss + 2*Cs + Is\r\n\r\nhand = np.zeros(time, dtype=int)\r\ntransit = np.zeros((time,L+1), dtype=int)\r\n\r\nstockout_period = np.full(time, False, dtype=bool)\r\nstockout_cycle = []\r\n\r\nhand[0] = S-d[0]\r\ntransit[1,-1]= d[0]\r\n\r\nfor t in range(1,time):\r\n if transit[t-1,0]>0:\r\n stockout_cycle.append(stockout_period[t-1])\r\n hand[t] = hand[t-1]- d[t] + transit[t-1,0]\r\n stockout_period[t] = hand[t] < 0\r\n transit[t,:-1] = transit[t-1,1:]\r\n if 0==t%R:\r\n net = hand[t] + transit[t].sum()\r\n transit[t,L] = S-net\r\n\r\n\r\ndf = pd.DataFrame(data= {'Demand':d, 'On−hand':hand, 'In−transit':list(transit)})\r\ndf = df.iloc[R+L:,:] #Remove initialization periods\r\nprint(df)\r\ndf['On−hand'].plot(title='Inventory Policy (%d,%d)' %(R,S), ylim=(0,S), legend=True)\r\n\r\nprint('Alpha:',alpha*100)\r\nSL_alpha = 1-sum(stockout_cycle)/len(stockout_cycle)\r\nprint('Cycle Service Level:', round(SL_alpha*100,1))\r\nSL_period = 1-sum(stockout_period)/time\r\nprint('Period Service Level:', round(SL_period*100,1))", "repo_name": "Yunbo-max/Mphil-in-ISMM-from-University-of-Cambirge", "sub_path": "Project2_Factory_optimization/inventory optimisation/safety stock.py", "file_name": "safety stock.py", "file_ext": "py", "file_size_in_byte": 1324, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.maximum", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "scipy.stats.norm.ppf", "line_number": 11, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "22789686759", "text": "#%%\nimport h5py\nimport numpy as np\n\ndef load_wavelets_and_outputs(path_in):\n '''loads wavelets and fourier freqs from input path and stores them as numpy arrays'''\n hdf5_file = h5py.File(path_in, mode='r')\n # print(hdf5_file['/inputs/'].keys())\n wavelets = np.array(hdf5_file['inputs/wavelets'])\n freq_decoder= np.array(hdf5_file['inputs/fourier_frequencies'])\n print('wavelets dimensionality:' + str(wavelets.shape))\n return(wavelets,freq_decoder)\n\n\n\npath_in='processed_R2478.h5'\nwavelets,freq_decoder=load_wavelets(path_in)\n\n\n\n\n\n\n\n# %%\n", "repo_name": "tbhowe/deepinsight_transformer", "sub_path": "h5_test.py", "file_name": "h5_test.py", "file_ext": "py", "file_size_in_byte": 559, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "h5py.File", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "43191499485", "text": "#from preprocessing_phase import preprocess\r\nimport heapq\r\nimport nltk\r\nnltk.download('stopwords')\r\nimport random\r\n\r\nfrom textblob import TextBlob\r\nfrom textblob.translate import NotTranslated\r\n#Data Augmentation Technique\r\n\r\nstop_words = nltk.corpus.stopwords.words('greek')\r\nwith open('stopwords.txt') as f:\r\n stop_words = f.read().splitlines()\r\n\r\n\r\nlexicon = [\"άντρας\",\"γυναίκα\",\"παιδί\"]\r\nemb = {'άντρας': [0.5 , 0.3, 0.2], 'γυναίκα': [0.3 , 0.5 , 0.2], 'παιδί': [0.1 , 0.3 , 0.6]}\r\n\r\ntext = ['αλλά','άντρας']\r\n\r\ndef replaceBySynonym(text):\r\n newText = []\r\n for word in text:\r\n if(word in stop_words):\r\n newText.append(word)\r\n else:\r\n synonym_index = [emb[word].index(x) for x in sorted(emb[word], reverse=True)[:2]][1]\r\n synonym = lexicon[synonym_index]\r\n newText.append(synonym)\r\n\r\n return newText\r\n\r\n\r\n\r\ndef replaceByShuffling(text):\r\n newText = text.copy()\r\n random.shuffle(newText)\r\n return newText\r\n\r\ndef replaceByTranslation(text):\r\n newText = []\r\n sr = random.SystemRandom()\r\n for word in text:\r\n wordBlob = TextBlob(word)\r\n try:\r\n wordBlob = wordBlob.translate(to=\"en\") ## Converting to random langauge for meaningful variation\r\n wordBlob = wordBlob.translate(to=\"el\")\r\n except NotTranslated:\r\n pass\r\n\r\n newText.append(str(wordBlob).lower())\r\n\r\n return newText\r\n\r\nprint(text)\r\nprint(\"converts to: \")\r\nprint(replaceBySynonym(text))\r\nprint(replaceByShuffling(text))\r\nprint(replaceByTranslation(text))", "repo_name": "vlavrent/NLP-Book-Classifier", "sub_path": "balancing_phase/balancing.py", "file_name": "balancing.py", "file_ext": "py", "file_size_in_byte": 1609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nltk.download", "line_number": 4, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 11, "usage_type": "call"}, {"api_name": "nltk.corpus", "line_number": 11, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 37, "usage_type": "call"}, {"api_name": "random.SystemRandom", "line_number": 42, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 44, "usage_type": "call"}, {"api_name": "textblob.translate.NotTranslated", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "69899231525", "text": "import pytest\n\nfrom .utils import _expected_path, SAMPLES, _serialize, _process\n\nPAIRS = [(s, _expected_path(s)) for s in SAMPLES]\n\n\n@pytest.mark.parametrize(\"sample, expected\", PAIRS)\ndef test_corpus(sample, expected):\n processed = _process(sample)\n exported = _serialize(processed)\n\n assert expected.read_bytes() == exported\n", "repo_name": "jupyter/papyri", "sub_path": "papyri/tests/test_corpus.py", "file_name": "test_corpus.py", "file_ext": "py", "file_size_in_byte": 336, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 74, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils._expected_path", "line_number": 5, "usage_type": "call"}, {"api_name": "utils.SAMPLES", "line_number": 5, "usage_type": "name"}, {"api_name": "utils._process", "line_number": 10, "usage_type": "call"}, {"api_name": "utils._serialize", "line_number": 11, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}]} +{"seq_id": "37689359949", "text": "\"\"\"kanban URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls import url\nfrom rest_framework import routers\n\nfrom apps.profile.views import IconViewSet, UserViewSet\nfrom apps.main.views import AppView\nfrom apps.kanban.views import (\n BoardViewSet,\n TagViewSet,\n LaneViewSet,\n CardViewSet,\n CommentViewSet,\n SortLaneAPIView,\n SortCardAPIView,\n)\n\nprofile_router = routers.DefaultRouter()\nprofile_router.register(r\"icons\", IconViewSet)\nprofile_router.register(r\"users\", UserViewSet)\n\nkanban_router = routers.DefaultRouter()\nkanban_router.register(r\"boards\", BoardViewSet)\nkanban_router.register(r\"tags\", TagViewSet)\nkanban_router.register(r\"lanes\", LaneViewSet)\nkanban_router.register(r\"cards\", CardViewSet)\nkanban_router.register(r\"comments\", CommentViewSet)\n\nurlpatterns = [\n # Auth urls\n path(\"api/auth/\", include(\"dj_rest_auth.urls\")),\n path(\"api/auth/register/\", include(\"dj_rest_auth.registration.urls\")),\n # Account urls\n path(\"api/profile/\", include(profile_router.urls)),\n # Kanban urls\n path(\"api/kanban/\", include(kanban_router.urls)),\n path(\"api/kanban/sort/lane/\", SortLaneAPIView.as_view(), name=\"sort-lane\"),\n path(\"api/kanban/sort/card/\", SortCardAPIView.as_view(), name=\"sort-card\"),\n # Secret backdoor url\n path(\"arcane/\", admin.site.urls),\n # Load app\n url(r\"^\", AppView.as_view(), name=\"app\"),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "repo_name": "sajalshres/kanban-on-django-marionette", "sub_path": "backend/config/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2174, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 35, "usage_type": "name"}, {"api_name": "apps.profile.views.IconViewSet", "line_number": 36, "usage_type": "argument"}, {"api_name": "apps.profile.views.UserViewSet", "line_number": 37, "usage_type": "argument"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 39, "usage_type": "call"}, {"api_name": "rest_framework.routers", "line_number": 39, "usage_type": "name"}, {"api_name": "apps.kanban.views.BoardViewSet", "line_number": 40, "usage_type": "argument"}, {"api_name": "apps.kanban.views.TagViewSet", "line_number": 41, "usage_type": "argument"}, {"api_name": "apps.kanban.views.LaneViewSet", "line_number": 42, "usage_type": "argument"}, {"api_name": "apps.kanban.views.CardViewSet", "line_number": 43, "usage_type": "argument"}, {"api_name": "apps.kanban.views.CommentViewSet", "line_number": 44, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 48, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 48, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 49, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 49, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 51, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 53, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 54, "usage_type": "call"}, {"api_name": "apps.kanban.views.SortLaneAPIView.as_view", "line_number": 54, "usage_type": "call"}, {"api_name": "apps.kanban.views.SortLaneAPIView", "line_number": 54, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 55, "usage_type": "call"}, {"api_name": "apps.kanban.views.SortCardAPIView.as_view", "line_number": 55, "usage_type": "call"}, {"api_name": "apps.kanban.views.SortCardAPIView", "line_number": 55, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 57, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 59, "usage_type": "call"}, {"api_name": "apps.main.views.AppView.as_view", "line_number": 59, "usage_type": "call"}, {"api_name": "apps.main.views.AppView", "line_number": 59, "usage_type": "name"}, {"api_name": "django.conf.urls.static.static", "line_number": 60, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 60, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 60, "usage_type": "attribute"}]} +{"seq_id": "6983085356", "text": "from collections import deque\ndef bfs(i, j):\n q = deque()\n q.append([i, j])\n ally = []\n ally.append([i, j])\n while q:\n r, c = q.popleft()\n for i in range(4):\n nx = r + dx[i]\n ny = c + dy[i]\n if 0 <= nx < N and 0 <= ny < N and visit[nx][ny] == 0:\n gap = abs(graph[nx][ny] - graph[r][c])\n if L <= gap <= R:\n visit[nx][ny] = 1\n q.append([nx, ny])\n ally.append([nx, ny])\n return ally\ndx = [1, -1, 0, 0]\ndy = [0, 0, -1, 1]\nN, L, R = map(int, input().split())\ngraph = [list(map(int, input().split())) for _ in range(N)]\n\ncnt = 0\nwhile True:\n visit = [[0] * N for i in range(N)]\n isFinished = True\n for i in range(N):\n for j in range(N):\n if visit[i][j] == 0: #visited를 bfs 외부에서 마킹하여 여러곳에서의 인구이동을 병렬 처리 (서로 영향 있는 부분만 bfs 내부에서 마킹됨)\n visit[i][j] = 1\n ally = bfs(i, j) #동맹국 리스트\n if len(ally) > 1:\n isFinished = False\n total_num = 0\n for r, c in ally: #인구 이동 처리\n total_num += graph[r][c]\n num = int(total_num / len(ally))\n for r, c in ally:\n graph[r][c] = num\n if isFinished:\n break\n cnt += 1\nprint(cnt)", "repo_name": "jeno8522/Coding-Test-Study", "sub_path": "2022/9월/4주차/백준/BFS/16234_인구이동.py", "file_name": "16234_인구이동.py", "file_ext": "py", "file_size_in_byte": 1482, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "38599416537", "text": "\"\"\"\nThis is a daemon, that uploads CV to S3\n\"\"\"\n\nimport os\nimport sys\nfrom datetime import datetime\nfrom django.core.wsgi import get_wsgi_application\n\n# Environment can use the models as if inside the Django app\nsys.path.insert(0, '/'.join(os.getcwd().split('/')[:-1]))\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'testing_webpage.settings')\napplication = get_wsgi_application()\n\nimport boto3\nimport time\nimport urllib.parse\nfrom botocore.exceptions import EndpointConnectionError\nfrom beta_invite.models import User\nfrom decouple import config\nfrom django.db.models import Q\nfrom queue import Queue\nfrom subscribe import helper as h\n\n\ndef cool_print(s):\n print(str(datetime.today()) + ': ' + s)\n\n\n# the actual upload\ndef upload_resource_to_s3(user):\n\n bucket = config('aws_s3_bucket')\n s3_key = user.curriculum_url\n\n session = boto3.session.Session(region_name='us-east-2',\n aws_access_key_id=config('aws_access_key'),\n aws_secret_access_key=config('aws_secret_access_key'))\n\n s3client = session.client('s3', config=boto3.session.Config(signature_version='s3v4'))\n\n try:\n s3client.upload_file(get_local_path(user), bucket, s3_key)\n s3_url = get_s3_path(bucket, s3_key)\n\n cool_print(\"Uploaded: {} to: {}\".format(get_local_path(user), s3_url))\n\n return s3_url\n except FileNotFoundError:\n cool_print('FileNotFoundError: {}'.format(get_local_path(user)))\n cool_print('daemon will continue...')\n return '#'\n except EndpointConnectionError:\n cool_print('EndpointConnectionError with: {}'.format(get_local_path(user)))\n cool_print('daemon will continue...')\n return '#'\n except UnicodeEncodeError:\n cool_print('UnicodeEncodeError with CV of user_id: {}'.format(user.id))\n cool_print('daemon will continue...')\n return '#'\n\n\ndef add_new_users(queue, created_since):\n \"\"\"\n Users with\n 1. missing a s3 url\n 2. having a local resource\n 3. text analysis already done\n :return:\n \"\"\"\n # TODO fix cv updates\n users = User.objects.filter(~Q(curriculum_url='#') &\n Q(curriculum_s3_url='#')).all()\n cool_print('total new users, to add on S3: {}'.format(len(users)))\n [queue.put(u) for u in users]\n\n created_since = created_since if len(users) == 0 else max({u.created_at for u in users})\n\n return created_since\n\n\ndef get_s3_path(bucket, s3_key):\n return urllib.parse.urljoin(config('s3_base_url'), bucket + '/' + s3_key)\n\n\ndef get_local_path(user):\n return os.path.join('../media', user.curriculum_url)\n\n\n# each worker does this job\ndef upload_users(users_queue, wait_time_workers, debug):\n \"\"\"\n Uploads a User to s3, then waits some time, and repeats...\n :return:\n \"\"\"\n user = users_queue.get()\n if debug:\n user.curriculum_s3_url = 'LE FINI'\n else:\n user.curriculum_s3_url = upload_resource_to_s3(user)\n user.save()\n time.sleep(wait_time_workers)\n\n\ndef upload_all():\n\n wait_time_workers = 10 # seconds\n wait_time_db = 60 # 10 minutes\n debug = False\n users_queue = Queue()\n\n created_since = datetime(day=9, month=4, year=1948)\n\n while True:\n created_since = add_new_users(users_queue, created_since)\n while not users_queue.qsize() == 0:\n upload_users(users_queue, wait_time_workers, debug)\n\n time.sleep(wait_time_db)\n\n\ndef run():\n with open('s3_uploader.log', 'a') as f:\n sys.stdout = h.Unbuffered(f)\n upload_all()\n\n\nif __name__ == '__main__':\n run()\n", "repo_name": "palaciossruben/acerto", "sub_path": "testing_webpage/tasks/s3_uploader.py", "file_name": "s3_uploader.py", "file_ext": "py", "file_size_in_byte": 3618, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ.setdefault", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.core.wsgi.get_wsgi_application", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "decouple.config", "line_number": 33, "usage_type": "call"}, {"api_name": "boto3.session.Session", "line_number": 36, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 36, "usage_type": "attribute"}, {"api_name": "decouple.config", "line_number": 37, "usage_type": "call"}, {"api_name": "decouple.config", "line_number": 38, "usage_type": "call"}, {"api_name": "boto3.session.Config", "line_number": 40, "usage_type": "call"}, {"api_name": "boto3.session", "line_number": 40, "usage_type": "attribute"}, {"api_name": "botocore.exceptions.EndpointConnectionError", "line_number": 53, "usage_type": "name"}, {"api_name": "beta_invite.models.User.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "beta_invite.models.User.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "beta_invite.models.User", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 73, "usage_type": "call"}, {"api_name": "queue.put", "line_number": 75, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 83, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 83, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 83, "usage_type": "name"}, {"api_name": "decouple.config", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 102, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 112, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 119, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 124, "usage_type": "attribute"}, {"api_name": "subscribe.helper.Unbuffered", "line_number": 124, "usage_type": "call"}, {"api_name": "subscribe.helper", "line_number": 124, "usage_type": "name"}]} +{"seq_id": "17606680841", "text": "import os\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom data_generators import DataGen\nfrom utils import train, test\nfrom tensorflow.keras import Sequential, layers, losses, optimizers, callbacks\nfrom metrics import accuracy, specificity, sensitivity, f1_score\n\nDATA_DIR = os.getenv('DATA_DIR')\nRESULTS_BASE_DIR = Path(os.getenv('RESULTS_BASE_DIR'))\n\nMODEL_NAME = 'LeNet-5'\nSHAPE = int(os.getenv('SHAPE', 128))\nEPOCHS = int(os.getenv('EPOCHS', 30))\nBATCH_SIZE = int(os.getenv('BATCH_SIZE', 32))\nVALIDATION_SPLIT = float(os.getenv('VALIDATION_SPLIT', '0.2'))\n\nif __name__ == \"__main__\":\n cn_log_dir = RESULTS_BASE_DIR/MODEL_NAME / \\\n f\"{datetime.now().strftime('%Y%m%d_%H%M%S')}\"\n\n lenet_model = Sequential(name=MODEL_NAME)\n lenet_model.add(layers.Conv2D(filters=64, kernel_size=(9, 9), strides=2, activation='relu', input_shape=(SHAPE,SHAPE,1)))\n lenet_model.add(layers.AveragePooling2D())\n lenet_model.add(layers.Conv2D(filters=24, kernel_size=(3, 3), activation='relu'))\n lenet_model.add(layers.AveragePooling2D())\n lenet_model.add(layers.Flatten())\n lenet_model.add(layers.Dense(units=16, activation='relu'))\n lenet_model.add(layers.Dense(units=16, activation='relu'))\n lenet_model.add(layers.Dense(units=4, activation = 'softmax'))\n lenet_model.compile(loss=losses.squared_hinge, optimizer=optimizers.Adam(lr=0.001),\n metrics=[accuracy, specificity, sensitivity, f1_score])\n lenet_model.summary()\n\n data_gen = DataGen(\n batch_size=BATCH_SIZE,\n data_dir=DATA_DIR,\n target_size=(SHAPE, SHAPE),\n validation_split=VALIDATION_SPLIT\n )\n\n print(\"Training model\")\n\n cn_callbacks = [\n callbacks.CSVLogger(f\"{cn_log_dir}/log.csv\"),\n ]\n\n train(\n model=lenet_model,\n data_gen=data_gen,\n save_dir=cn_log_dir,\n epochs=EPOCHS,\n callbacks=cn_callbacks\n )\n", "repo_name": "mrhhodd/capsnet-keras", "sub_path": "run_cnn.py", "file_name": "run_cnn.py", "file_ext": "py", "file_size_in_byte": 1915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 15, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 24, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.AveragePooling2D", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 25, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 26, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.AveragePooling2D", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 27, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 30, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.squared_hinge", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses", "line_number": 32, "usage_type": "name"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers", "line_number": 32, "usage_type": "name"}, {"api_name": "metrics.accuracy", "line_number": 33, "usage_type": "name"}, {"api_name": "metrics.specificity", "line_number": 33, "usage_type": "name"}, {"api_name": "metrics.sensitivity", "line_number": 33, "usage_type": "name"}, {"api_name": "metrics.f1_score", "line_number": 33, "usage_type": "name"}, {"api_name": "data_generators.DataGen", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.CSVLogger", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.train", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "7164146483", "text": "import win32com.client as wincl\nimport os, os.path\nimport pandas as pd\nfrom IPython.display import display\nfrom unittest import loader\nfrom jinja2 import FileSystemLoader, Environment\nfrom destinatarios import destinatarios\n\n\ndef lerExcel(tabela):\n dados = pd.read_excel(tabela)\n #display(dados)\n cont = 0\n dados_template = []\n for i, col in dados.iterrows():\n\n texto_var = f\"{col['VAR']:.2%}\"\n\n if col['MESA'] == 'SFF' and col['ALERTA'] > 0 and col['ALERTA'] < 3:\n dados_template.append([col['MESA'], col['ALERTA'], col['FUNDO'], texto_var])\n cont = cont + 1\n if cont > 1:\n print(\"Cheguei aqui\")\n template = carregarTemplate(dados_template)\n enviarEmail(destinatarios['SFF'], template)\n\ndef rodarMacro():\n\n excel_macro = wincl.DispatchEx(\"Excel.application\")\n excel_path = os.path.expanduser(\"C:\\Git-Projects\\\\automates\\\\teste envio de email\\\\rodarMacro.xlsm\")\n workbook = excel_macro.Workbooks.Open(Filename = excel_path) # ReadOnly =1\n excel_macro.Application.Run(\"rodarMacro.xlsm!Módulo1.rodarMacro\")\n #Save the results in case you have generated data\n workbook.Save()\n excel_macro.Application.Quit()\n del excel_macro\n\n\ndef carregarTemplate(dados_template):\n loader = FileSystemLoader('templates')\n env = Environment(loader=loader)\n template = env.get_template('index2.html')\n\n file = open('output/index.html', 'w')\n\n render = template.render(dados = dados_template)\n file.write(render)\n file.close()\n return render\n\ndef enviarEmail(destinatario, template):\n\n outlook = wincl.Dispatch('outlook.application')\n\n # criando e-mail a partir da ingregracao com outlook\n email = outlook.CreateItem(0)\n email.Display()\n # configurando informacoes do email\n # Para quem vai o e-mail // Assunto da mensagem\n\n email.To = destinatario + \"; jrds.contato@hotmail.com; jrds.contato@gmail.com\"\n email.Cc = \"jhonatan.loko25@gmail.com\"\n email.Subject = 'e-mail automático de teste do python'\n\n # Criando e adicionando anexo\n # anexo = \"C:\\Git-Projects\\\\automates\\\\teste envio de email\\email.xlsx\"\n # email.Attachments.Add(anexo)\n\n email.HTMLBody = template\n\n #email.Send()\n\nlerExcel(\"rodarMacro.xlsm\")", "repo_name": "jrsantos1/estudo-email-marketing-python", "sub_path": "rodarMacroComWin32.py", "file_name": "rodarMacroComWin32.py", "file_ext": "py", "file_size_in_byte": 2254, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_excel", "line_number": 11, "usage_type": "call"}, {"api_name": "destinatarios.destinatarios", "line_number": 25, "usage_type": "name"}, {"api_name": "win32com.client.DispatchEx", "line_number": 29, "usage_type": "call"}, {"api_name": "win32com.client", "line_number": 29, "usage_type": "name"}, {"api_name": "os.path.expanduser", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "unittest.loader", "line_number": 40, "usage_type": "name"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 40, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 41, "usage_type": "call"}, {"api_name": "unittest.loader", "line_number": 41, "usage_type": "name"}, {"api_name": "win32com.client.Dispatch", "line_number": 53, "usage_type": "call"}, {"api_name": "win32com.client", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "21904644384", "text": "import os\n\nimport pytest\n\nfrom spsdk.crypto.keys import EccCurve, PrivateKeyEcc, PrivateKeyRsa\nfrom spsdk.crypto.signature_provider import PlainFileSP\nfrom spsdk.crypto.symmetric import Counter\nfrom spsdk.crypto.utils import get_matching_key_id\nfrom spsdk.exceptions import SPSDKError, SPSDKValueError\n\n\ndef test_counter():\n \"\"\"Test of Counter.\"\"\"\n # simple counter with nonce only\n cntr = Counter(bytes([0] * 16))\n assert cntr.value == bytes([0] * 16)\n\n # counter with nonce and counter encoded as little endian\n cntr = Counter(bytes([0] * 16), ctr_value=0x01234567, ctr_byteorder_encoding=\"little\")\n assert cntr.value == bytes([0] * 12 + [0x67, 0x45, 0x23, 0x01])\n\n # counter with nonce and counter encoded as little endian\n cntr = Counter(bytes([0] * 16), ctr_value=0x01234567)\n assert cntr.value == bytes([0] * 12 + [0x67, 0x45, 0x23, 0x01])\n\n # counter with nonce and counter encoded as big endian\n cntr = Counter(bytes([0] * 16), ctr_value=1, ctr_byteorder_encoding=\"big\")\n assert cntr.value == bytes([0] * 15 + [1])\n\n # increment\n cntr.increment()\n assert cntr.value == bytes([0] * 15 + [2])\n cntr.increment(2)\n assert cntr.value == bytes([0] * 15 + [4])\n cntr.increment(256)\n assert cntr.value == bytes([0] * 14 + [1, 4])\n\n\ndef test_counter_invalid():\n with pytest.raises(SPSDKError, match=\"Wrong byte order\"):\n Counter(nonce=bytes(16), ctr_byteorder_encoding=\"BIG\")\n\n\n@pytest.mark.parametrize(\"length\", [(2048), (3072), (4096)])\ndef test_matching_keys_rsa(tmpdir, length):\n signature_providers = []\n pub_keys = []\n for i in range(4):\n prv_key = PrivateKeyRsa.generate_key(key_size=length)\n prv_key.save(os.path.join(tmpdir, f\"key{i}.pem\"))\n signature_providers.append(PlainFileSP(os.path.join(tmpdir, f\"key{i}.pem\")))\n pub_keys.append(prv_key.get_public_key())\n\n for i in range(4):\n assert i == get_matching_key_id(\n public_keys=pub_keys, signature_provider=signature_providers[i]\n )\n\n\n@pytest.mark.parametrize(\"curve\", [(curve_name) for curve_name in EccCurve])\ndef test_matching_keys_ecc(tmpdir, curve):\n signature_providers = []\n pub_keys = []\n for i in range(4):\n prv_key = PrivateKeyEcc.generate_key(curve_name=curve)\n prv_key.save(os.path.join(tmpdir, f\"key{i}.pem\"))\n signature_providers.append(PlainFileSP(os.path.join(tmpdir, f\"key{i}.pem\")))\n pub_keys.append(prv_key.get_public_key())\n\n for i in range(4):\n assert i == get_matching_key_id(\n public_keys=pub_keys, signature_provider=signature_providers[i]\n )\n\n\ndef test_matching_keys_unmatch(tmpdir):\n signature_providers = []\n pub_keys = []\n for i in range(4):\n prv_key = PrivateKeyRsa.generate_key()\n prv_key.save(os.path.join(tmpdir, f\"key{i}.pem\"))\n signature_providers.append(PlainFileSP(os.path.join(tmpdir, f\"key{i}.pem\")))\n pub_keys.append(prv_key.get_public_key())\n\n prv_key = PrivateKeyRsa.generate_key()\n prv_key.save(os.path.join(tmpdir, \"diff_key.pem\"))\n with pytest.raises(SPSDKValueError):\n get_matching_key_id(\n public_keys=pub_keys,\n signature_provider=PlainFileSP(os.path.join(tmpdir, \"diff_key.pem\")),\n )\n", "repo_name": "nxp-mcuxpresso/spsdk", "sub_path": "tests/utils/crypto/test_common.py", "file_name": "test_common.py", "file_ext": "py", "file_size_in_byte": 3274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 37, "dataset": "github-code", "pt": "52", "api": [{"api_name": "spsdk.crypto.symmetric.Counter", "line_number": 15, "usage_type": "call"}, {"api_name": "spsdk.crypto.symmetric.Counter", "line_number": 19, "usage_type": "call"}, {"api_name": "spsdk.crypto.symmetric.Counter", "line_number": 23, "usage_type": "call"}, {"api_name": "spsdk.crypto.symmetric.Counter", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 40, "usage_type": "call"}, {"api_name": "spsdk.exceptions.SPSDKError", "line_number": 40, "usage_type": "argument"}, {"api_name": "spsdk.crypto.symmetric.Counter", "line_number": 41, "usage_type": "call"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa.generate_key", "line_number": 49, "usage_type": "call"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.signature_provider.PlainFileSP", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.utils.get_matching_key_id", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 44, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 44, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.keys.PrivateKeyEcc.generate_key", "line_number": 65, "usage_type": "call"}, {"api_name": "spsdk.crypto.keys.PrivateKeyEcc", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.signature_provider.PlainFileSP", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.utils.get_matching_key_id", "line_number": 71, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 60, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.keys.EccCurve", "line_number": 60, "usage_type": "name"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa.generate_key", "line_number": 80, "usage_type": "call"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa", "line_number": 80, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.signature_provider.PlainFileSP", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa.generate_key", "line_number": 85, "usage_type": "call"}, {"api_name": "spsdk.crypto.keys.PrivateKeyRsa", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 87, "usage_type": "call"}, {"api_name": "spsdk.exceptions.SPSDKValueError", "line_number": 87, "usage_type": "argument"}, {"api_name": "spsdk.crypto.utils.get_matching_key_id", "line_number": 88, "usage_type": "call"}, {"api_name": "spsdk.crypto.signature_provider.PlainFileSP", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "19198722274", "text": "from models import Validator\nfrom flask import Flask, request, jsonify, send_file\nfrom flask_cors import CORS\n\nimport pandas as pd\nimport io\n\napp = Flask(__name__)\nCORS(app, resources={r\"/upload\": {\"origins\": \"https://jetcalcship.web.app\"}})\n\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n try:\n # Check if the POST request has the file part\n if 'file' not in request.files:\n return jsonify({'error': 'No file part'}), 400\n\n file = request.files['file']\n\n # Check if the file has a valid name and extension\n if file.filename == '':\n return jsonify({'error': 'No selected file'}), 400\n\n # Ensure the file is an Excel file\n if file and file.filename.endswith('.xlsx'):\n # Read the Excel file into a DataFrame\n excel_data = pd.read_excel(file)\n\n # Validator to check the excel data \n validator = Validator(dataframe=excel_data)\n validator.style_dataframe()\n\n # Check data\n excel_data = validator.get_dataframe()\n\n # Save the updated DataFrame to a new Excel file\n output = io.BytesIO()\n writer = pd.ExcelWriter(output, engine='openpyxl')\n\n excel_data.to_excel(writer, index=False)\n writer.close()\n output.seek(0)\n\n # Return the updated Excel file to the user\n return send_file(\n output,\n as_attachment=True,\n download_name=\"planilha_atualizada.xlsx\"\n )\n\n else:\n return jsonify({'error': 'Invalid file format, must be .xlsx'}), 400\n\n except Exception as e:\n return jsonify({'error': str(e)}), 500\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "zoddinGC/cepValidator", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Validator", "line_number": 30, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "39969923117", "text": "import humanize\n\nfrom datetime import datetime\nfrom pytz import timezone\n\n_intervals = (\n ('w', 604800),\n ('d', 86400),\n ('h', 3600),\n ('m', 60),\n ('s', 1),\n)\n\n_barColors = {\n 'Error': \"ff0000ff\",\n 'Deleted': \"ff0000ff\",\n 'Uploading': \"007bffff\",\n 'PausedUP': \"00ff00ff\",\n 'QueuedUP': \"007bffff\",\n 'StalledUP': \"b0b0b0ff\",\n 'CheckingUP': \"007bffff\",\n 'ForcedUP': \"ff0000ff\",\n 'Allocating': \"b0b0b0ff\",\n 'Downloading': \"00ff00ff\",\n 'MetaDL': \"007bffff\",\n 'PausedDL': \"b0b0b0ff\",\n 'QueuedDL': \"b0b0b0ff\",\n 'StalledDL': \"b0b0b0ff\",\n 'CheckingDL': \"ff0000ff\",\n 'ForcedDL': \"ff0000ff\",\n 'CheckingResumeData': \"b0b0b0ff\",\n 'Moving': \"ff0000ff\",\n 'Unknown': \"ff0000ff\"\n}\n\n_show_seeders = [\n \"Allocating\",\n \"Downloading\",\n \"MetaDL\",\n \"PausedDL\",\n \"QueuedDL\",\n \"StalledDL\",\n \"CheckingDL\",\n \"ForcedDL\",\n \"CheckingResumeData\",\n \"Unknown\"\n]\n\n\ndef _display_time(seconds, granularity=2):\n result = []\n\n for name, count in _intervals:\n value = seconds // count\n if value:\n seconds -= value * count\n if value == 1:\n name = name.rstrip('s')\n result.append(\"{}{}\".format(value, name))\n return ' '.join(result[:granularity])\n\n\ndef torrent_format(tr_dict):\n rm_values = {}\n for i in range(4):\n rm_values[f'TorrentName{i}'] = {'Text': tr_dict[i]['name']}\n rm_values[f'TorrentName{i}']['ToolTipText'] = tr_dict[i]['name']\n rm_values[f'TorrentStatus{i}'] = {'Text': tr_dict[i]['state'][0].upper() + tr_dict[i]['state'][1:]}\n rm_values[f'TorrentDSpeed{i}'] = {'Text': \"Down speed: \" + humanize.naturalsize(tr_dict[i]['dlspeed']) + \"/s\"}\n if rm_values[f'TorrentStatus{i}']['Text'] in _show_seeders:\n rm_values[f'TorrentSeeds{i}'] = {'Text': f\"Seeds: {tr_dict[i]['num_complete']}({tr_dict[i]['num_seeds']})\"}\n else:\n rm_values[f'TorrentSeeds{i}'] = {'Text': \\\n f\"Leechs: {tr_dict[i]['num_incomplete']}({tr_dict[i]['num_leechs']})\"}\n rm_values[f'TorrentETA{i}'] = {'Text': \"ETA: \" + _display_time(tr_dict[i]['eta'])}\n rm_values[f'TorrentPercentage{i}'] = {'Text': f\"{tr_dict[i]['progress'] * 100:.1f}%\"}\n rm_values[f'TorrentProgress{i}'] = {'Text': \\\n humanize.naturalsize(tr_dict[i]['downloaded']) + \"/\" +\\\n humanize.naturalsize(tr_dict[i]['downloaded'] + tr_dict[i]['amount_left'])}\n rm_values[f'TorrentProgressBar{i}'] = {'BarColor': _barColors[rm_values[f'TorrentStatus{i}']['Text']]}\n rm_values[f'TorrentUSpeed{i}'] = {'Text': \"Up speed: \" + humanize.naturalsize(tr_dict[i]['upspeed']) + \"/s\"}\n rm_values[f'TorrentAddedOn{i}'] = {'Text': humanize.naturaltime(\n datetime.fromtimestamp(tr_dict[i]['added_on'], tz=timezone(\"US/Eastern\")).replace(tzinfo=None)\n )}\n rm_values[f'TorrentRatio{i}'] = {'Text': f\"Ratio: {tr_dict[i]['ratio']:.2f}\"}\n return rm_values\n", "repo_name": "LOAFCLAN/QBT_rainmeter_skin", "sub_path": "@Resources/Scripts/torrent_formatter.py", "file_name": "torrent_formatter.py", "file_ext": "py", "file_size_in_byte": 2989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "humanize.naturalsize", "line_number": 69, "usage_type": "call"}, {"api_name": "humanize.naturalsize", "line_number": 78, "usage_type": "call"}, {"api_name": "humanize.naturalsize", "line_number": 79, "usage_type": "call"}, {"api_name": "humanize.naturalsize", "line_number": 81, "usage_type": "call"}, {"api_name": "humanize.naturaltime", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "26142607487", "text": "import os\nimport random\nimport torch.utils.data as data\nfrom PIL import Image\nfrom utils import regular_exp\n\n\ndef pil_loader(path: str) -> Image.Image:\n with open(path, \"rb\") as f:\n img = Image.open(f)\n return img.convert(\"RGB\")\n\nclass MICLeDataset(data.Dataset):\n\n def __init__(self,\n img_dir,\n transform=None,\n augmentation = None):\n \"\"\"\n :param img_dir: Path to the folder with all subdirs, each subdir contains images of one subject.\n |--- img_dir\n | |--- subject 1\n | | |--- 1_1.jpg\n | | |--- 1_2.jpg\n | | |--- 1_3.jpg\n | |--- subject 2\n | | |--- 2_1.jpg\n | | |--- 2_2.jpg\n | | |--- 2_3.jpg\n | |--- subject 3 \n | | |--- 3_1.jpg\n | | |--- 3_2.jpg\n | | |--- 3_3.jpg\n :param transform: transform to be applied on the image\n :param augmentation: augmentation to be applied on the image\n \"\"\"\n\n super().__init__()\n\n self.img_dir = img_dir\n self.folder_list = os.listdir(img_dir)\n self.transform = transform\n\n self.augmentation = augmentation\n\n def __len__(self):\n return len(self.folder_list)\n\n def __getitem__(self, idx):\n folder_name = self.folder_list[idx]\n folder_path = os.path.join(self.img_dir, folder_name)\n fname_list = os.listdir(folder_path)\n if len(fname_list) >=2:\n two_random_imgs = random.sample(fname_list, 2)\n img_path1 = os.path.join(folder_path, two_random_imgs[0])\n img_path2 = os.path.join(folder_path, two_random_imgs[1])\n image1 = pil_loader(img_path1)\n image2 = pil_loader(img_path2)\n\n if self.transform:\n image1 = self.transform(image1)\n image2 = self.transform(image2)\n return image1, image2, two_random_imgs[0] + \"&\" + two_random_imgs[1]\n\n elif len(fname_list) == 1:\n img_path = os.path.join(folder_path, fname_list[0])\n image = pil_loader(img_path)\n\n if self.augmentation:\n imageA = self.augmentation(image)\n imageB = self.augmentation(image)\n else:\n imageA = None\n imageB = None\n\n return imageA, imageB, fname_list[0]", "repo_name": "rjrobben/MICLe_pytorch", "sub_path": "dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 2407, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 8, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 10, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 10, "usage_type": "name"}, {"api_name": "PIL.Image.Image", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 13, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 52, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "8565082251", "text": "import pygame\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n self.image = pygame.image.load('red_ball.png').convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\n def move_left(self, pixels):\n self.rect.x -= pixels\n if self.rect.x < 0:\n self.rect.x = 0\n\n def move_right(self, pixels):\n self.rect.x += pixels\n if self.rect.x > 640 - self.rect.width:\n self.rect.x = 640 - self.rect.width\n\n def jump(self):\n self.rect.y += 2\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n self.rect.y -= 2\n\n if len(platform_hit_list) > 0 or self.rect.bottom >= self.level.height:\n self.change_y = -10\n\n def update(self):\n self.change_y += self.level.gravity\n\n self.rect.y += self.change_y\n\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for platform in platform_hit_list:\n if self.change_y > 0:\n self.rect.bottom = platform.rect.top\n elif self.change_y < 0:\n self.rect.top = platform.rect.bottom\n\n self.change_y = 0\n", "repo_name": "daniil-novel/python-proj-3sem", "sub_path": "All_practice/game/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.sprite", "line_number": 3, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "70000356004", "text": "#!/usr/bin/python\n\n\nimport wx \n# always add this line, or your code won't compile - the wx module contains the GUI code you'll use\n\n#The start of our wxPython GUI tutorial\"\"\"\n\napp = wx.App(redirect=False) # create a wx application\n\nwindow = wx.Frame(None, title = 'Sample GUI App') # create a window\nbtn = wx.Button(window) \n# create a button 'widget' on the window - note how the button receives the window. This creates a tree-like structure where the window is the parent node and anything else like buttons are child nodes.\n\nwindow.Show() # make the frame (and hence button) visible\napp.MainLoop() # keep things going\n", "repo_name": "daviddoria/Examples", "sub_path": "Python/GUI/Test.py", "file_name": "Test.py", "file_ext": "py", "file_size_in_byte": 622, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 224, "dataset": "github-code", "pt": "52", "api": [{"api_name": "wx.App", "line_number": 9, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 11, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "7123301435", "text": "import json\nfrom fastapi import APIRouter, Depends, Response, HTTPException\nfrom starlette.status import HTTP_500_INTERNAL_SERVER_ERROR, \\\n HTTP_401_UNAUTHORIZED, HTTP_201_CREATED, HTTP_200_OK\nfrom app.models.index import Person, Admin, Name\nfrom fastapi_cache.backends.redis import RedisCacheBackend\nfrom app.helper import redis_cache\nfrom fastapi_jwt_auth import AuthJWT\nfrom app.utils import Constants\n\n\nrouter = APIRouter()\n\n\n@router.post(\n \"/person\",\n name=\"persons:create-user\",\n response_model_exclude_unset=True\n)\nasync def person(person: Person, response: Response, cache: RedisCacheBackend = Depends(redis_cache),\n Authorize: AuthJWT = Depends()) -> Response:\n \"\"\"\n\n :param cache:\n :param response:\n :param person:\n :type Authorize: object\n \"\"\"\n Authorize.jwt_required()\n try:\n await cache.set(\"{0}_{1}\".format(person.first_name.lower(),\n person.last_name.lower()), json.dumps(person.__dict__))\n return Response(status_code=HTTP_201_CREATED, content=json.dumps(person.__dict__))\n except Exception as e:\n print(e)\n return Response(status_code=HTTP_500_INTERNAL_SERVER_ERROR, content=\"Person creation failed\")\n\n\n@router.delete(\n \"/person\",\n name=\"persons:delete-user\",\n response_model_exclude_unset=True\n)\nasync def person(user: Name, response: Response, cache: RedisCacheBackend = Depends(redis_cache),\n Authorize: AuthJWT = Depends()) -> Response:\n \"\"\"\n\n :param cache:\n :param response:\n :param user:\n :type Authorize: object\n \"\"\"\n Authorize.jwt_required()\n try:\n await cache.delete(\"{0}_{1}\".format(user.first_name.lower(),\n user.last_name.lower()))\n response.status_code = HTTP_200_OK\n return response\n except Exception as e:\n return Response(status_code=HTTP_500_INTERNAL_SERVER_ERROR, content='0')\n\n\n@router.get(\n \"/person\",\n name=\"persons:fetch-user\",\n response_model_exclude_unset=True\n)\nasync def person(first_name: str, last_name: str, response: Response, cache: RedisCacheBackend = Depends(redis_cache),\n Authorize: AuthJWT = Depends()) -> Response:\n \"\"\"\n\n :param last_name:\n :param first_name:\n :param user:\n :param cache:\n :param response:\n :type Authorize: object\n \"\"\"\n Authorize.jwt_required()\n try:\n _persons = await cache.get(\"{0}_{1}\".format(first_name.lower(),\n last_name.lower()))\n response.status_code = HTTP_200_OK\n return Response(content=_persons)\n except Exception as e:\n return Response(status_code=HTTP_500_INTERNAL_SERVER_ERROR, content='0')\n\n\n@router.put(\n \"/person\",\n name=\"persons:update-user\",\n response_model_exclude_unset=True\n)\nasync def person(person: Person, response: Response, cache: RedisCacheBackend = Depends(redis_cache),\n Authorize: AuthJWT = Depends()) -> Person:\n \"\"\"\n\n :param cache:\n :param response:\n :param person:\n :type Authorize: object\n \"\"\"\n Authorize.jwt_required()\n try:\n await cache.set(\"{0}_{1}\".format(person.first_name.lower(),\n person.last_name.lower()), json.dumps(person.__dict__))\n response.status_code = HTTP_200_OK\n return response\n except Exception as e:\n return Response(status_code=HTTP_500_INTERNAL_SERVER_ERROR, content='0')\n\n\n@router.post('/login')\ndef login(admin: Admin, Authorize: AuthJWT = Depends()):\n \"\"\"\n\n :param Authorize:\n :type admin: object\n \"\"\"\n if admin.username != Constants.username.value or admin.password != Constants.password.value:\n raise HTTPException(status_code=HTTP_401_UNAUTHORIZED, detail=\"Bad username or password\")\n\n # subject identifier for who this token is for example id or username from database\n access_token = Authorize.create_access_token(subject=admin.username)\n return Response(content=json.dumps({\"access_token\": access_token}), status_code=HTTP_200_OK)\n", "repo_name": "nithishmohan/EMBL_task", "sub_path": "app/api/routes/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 4105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 12, "usage_type": "call"}, {"api_name": "app.models.index.Person", "line_number": 20, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 20, "usage_type": "name"}, {"api_name": "fastapi_cache.backends.redis.RedisCacheBackend", "line_number": 20, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 21, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 20, "usage_type": "call"}, {"api_name": "app.helper.redis_cache", "line_number": 20, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 33, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_201_CREATED", "line_number": 33, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 36, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 36, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 21, "usage_type": "name"}, {"api_name": "app.models.index.Name", "line_number": 44, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 44, "usage_type": "name"}, {"api_name": "fastapi_cache.backends.redis.RedisCacheBackend", "line_number": 44, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 44, "usage_type": "call"}, {"api_name": "app.helper.redis_cache", "line_number": 44, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 45, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 57, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 60, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 60, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 68, "usage_type": "name"}, {"api_name": "fastapi_cache.backends.redis.RedisCacheBackend", "line_number": 68, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 69, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 68, "usage_type": "call"}, {"api_name": "app.helper.redis_cache", "line_number": 68, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 69, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 83, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 86, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 86, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 69, "usage_type": "name"}, {"api_name": "app.models.index.Person", "line_number": 94, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 94, "usage_type": "name"}, {"api_name": "fastapi_cache.backends.redis.RedisCacheBackend", "line_number": 94, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 95, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 94, "usage_type": "call"}, {"api_name": "app.helper.redis_cache", "line_number": 94, "usage_type": "argument"}, {"api_name": "fastapi.Depends", "line_number": 95, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 106, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 107, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 110, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_500_INTERNAL_SERVER_ERROR", "line_number": 110, "usage_type": "name"}, {"api_name": "app.models.index.Person", "line_number": 95, "usage_type": "name"}, {"api_name": "app.models.index.Admin", "line_number": 114, "usage_type": "name"}, {"api_name": "fastapi_jwt_auth.AuthJWT", "line_number": 114, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 114, "usage_type": "call"}, {"api_name": "app.utils.Constants.username", "line_number": 120, "usage_type": "attribute"}, {"api_name": "app.utils.Constants", "line_number": 120, "usage_type": "name"}, {"api_name": "app.utils.Constants.password", "line_number": 120, "usage_type": "attribute"}, {"api_name": "fastapi.HTTPException", "line_number": 121, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_401_UNAUTHORIZED", "line_number": 121, "usage_type": "name"}, {"api_name": "fastapi.Response", "line_number": 125, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 125, "usage_type": "call"}, {"api_name": "starlette.status.HTTP_200_OK", "line_number": 125, "usage_type": "name"}]} +{"seq_id": "43296032708", "text": "\"\"\"\nThis is scheduler for Learning.\n\"\"\"\n\nimport datetime\nimport itertools\nimport time\n\nDAY_OF_WEEKS = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n\n\ndef events_generator(schedule: str, start_at: datetime):\n if not start_at:\n start_at = datetime.datetime.now()\n parts = schedule.split(',')\n schedule_elements = []\n for element in parts:\n element = element.strip()\n parts = element.split(' ')\n if len(parts) > 2:\n raise ValueError(\"Wrong schedule template\")\n\n day_of_week = parts[0]\n if day_of_week.startswith('+'):\n dow_index = None\n day_incrementor = int(day_of_week[1:])\n else:\n try:\n dow_index = DAY_OF_WEEKS.index(day_of_week)\n except ValueError:\n raise ValueError(f\"Unexpected day of week '{day_of_week}'. Possible values are: {DAY_OF_WEEKS}\")\n day_incrementor = None\n\n if len(parts) == 2:\n parsed = time.strptime(parts[1], \"%H:%M\")\n time_part = {\n 'hour': parsed.tm_hour,\n 'minute': parsed.tm_min,\n 'second': 0,\n 'microsecond': 0,\n }\n else:\n time_part = {\n 'hour': start_at.hour,\n 'minute': start_at.minute,\n 'second': start_at.second,\n 'microsecond': start_at.microsecond,\n }\n\n schedule_elements.append((dow_index, time_part, day_incrementor))\n\n current_day = start_at\n for schedule_element in itertools.cycle(schedule_elements):\n day_of_week, time_part, day_incrementor = schedule_element\n if day_incrementor is not None:\n current_day += datetime.timedelta(days=day_incrementor)\n current_day = current_day.replace(**time_part)\n yield current_day\n else:\n while day_of_week != current_day.weekday():\n current_day += datetime.timedelta(days=1)\n current_day = current_day.replace(**time_part)\n yield current_day\n current_day += datetime.timedelta(days=1)\n", "repo_name": "bestchanges/studyworthy", "sub_path": "djangoapps/lms/schedule.py", "file_name": "schedule.py", "file_ext": "py", "file_size_in_byte": 2135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "time.strptime", "line_number": 35, "usage_type": "call"}, {"api_name": "itertools.cycle", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "16484568743", "text": "import traceback\nimport numpy as np\nimport scipy\nimport xarray as xr\nimport argparse\nimport pandas as pd\nfrom pathlib import Path\nimport tool_fig_config\n\nfrom multiprocessing import Pool\nimport multiprocessing\nimport os.path\nimport os\n\nfrom WRFDiag import wrf_load_helper\nwrf_load_helper.engine = \"netcdf4\"\n\n\nimport MITgcmDiff.loadFunctions as lf\nimport MITgcmDiff.mixed_layer_tools as mlt\nimport MITgcmDiff.calBudget as cb\nimport MITgcmDiag.data_loading_helper as dlh\n\nM_H2O = 0.018\nM_DRY = 0.02896\n\nc_p = 1004\ng0 = 9.8\nrho_water = 1e3\nL_vap = 2.25e6\n\ndef intVert(q, p_w):\n dm = ( p_w[:-1, : , :] - p_w[1:, :, : ]) / g0\n\n print(q.shape)\n print(p_w.shape)\n\n return np.sum(c_p * dm * q, axis=0)\n\n\n\ndef saturation_vapor_pressure(T):\n T_degC = T - 273.15\n return 6.112 * np.exp(17.67 * T_degC / (T_degC + 243.5)) * 100.0\n\ndef getMixingRatio(T, p, RH):\n p_wv = saturation_vapor_pressure(T) * RH\n return p_wv / (p - p_wv) * M_H2O / M_DRY\n\nvec_getMixingRatio = np.vectorize(getMixingRatio)\n\ndef produceDiagQuantities(ds):\n \n\n MU = ds.MU + ds.MUB\n \n H_DIAB_TTL = - (MU * ds.H_DIABATIC * ds.DNW / g0 * c_p).sum(dim=\"bottom_top\").rename(\"H_DIAB_TTL\")\n \n PREC_ACC_DT = ds.attrs['PREC_ACC_DT'] * 60\n print(\"PREC_ACC_DT = \", PREC_ACC_DT)\n\n H_DIAB_RAIN = ( (ds.PREC_ACC_C + ds.PREC_ACC_NC) / 1e3 / PREC_ACC_DT * rho_water * L_vap ).rename(\"H_DIAB_RAIN\")\n\n SHFLX = ds.HFX.rename(\"SHFLX\")\n LHFLX = ds.LH.rename(\"LHFLX\")\n SWFLX = (ds.SWUPB - ds.SWDNB).rename(\"SWFLX\")\n LWFLX = (ds.LWUPB - ds.LWDNB).rename(\"LWFLX\")\n \n TTL_HFLX = SHFLX + LHFLX + SWFLX + LWFLX\n TTL_HFLX = TTL_HFLX.rename(\"TTL_HFLX\")\n \n SLHFLX = (ds.HFX + ds.LH).rename(\"SLHFLX\")\n\n AOTDIFF = ds.TSK - ds.T2\n AOTDIFF = AOTDIFF.rename(\"AOTDIFF\")\n\n U10 = ds.U10\n V10 = ds.V10\n\n WIND10 = (U10**2 + V10**2)**0.5\n WIND10 = WIND10.rename(\"WIND10\")\n\n\n SST = ds.TSK.to_numpy()\n PSFC = ds.PSFC.to_numpy()\n Q2 = ds.Q2.to_numpy()\n qv_sat_tmp = vec_getMixingRatio(SST, PSFC, 1.0)\n \n AOQVDIFF = ds.PSFC.copy()\n AOQVDIFF[:, :] = qv_sat_tmp - Q2\n AOQVDIFF = AOQVDIFF.rename(\"AOQVDIFF\")\n\n new_ds = xr.merge([\n H_DIAB_TTL, H_DIAB_RAIN, SLHFLX, SHFLX, LHFLX, SWFLX, LWFLX, TTL_HFLX, WIND10, AOTDIFF, AOQVDIFF,\n ])\n\n\n return new_ds\n\n\n\n\n\ndef produceDiagQuantities_ocn(data):\n\n Usfc = data[\"UVEL\"][0:5, :, :].mean(axis=0)\n Vsfc = data[\"VVEL\"][0:5, :, :].mean(axis=0)\n SSC = np.sqrt(Usfc**2 + Vsfc**2)\n\n data[\"SSC\"] = SSC\n data[\"Usfc\"] = Usfc\n data[\"Vsfc\"] = Vsfc\n\n return data\n\n\n\n\n\nparser = argparse.ArgumentParser(\n prog = 'plot_skill',\n description = 'Plot prediction skill of GFS on AR.',\n)\n\nparser.add_argument('--date-rng', type=str, nargs=2, help='Date range.', required=True)\nparser.add_argument('--skip-hrs', type=int, help='The skip in hours to do the next diag.', required=True)\nparser.add_argument('--avg-hrs', type=int, help='The length of time to do the average in hours.', default=np.nan)\n#parser.add_argument('--data-freq-hrs', type=int, help='The data frequency in hours.', required=True)\nparser.add_argument('--sim-names', type=str, nargs='*', help='Simulation names', default=[])\n\nparser.add_argument('--mitgcm-beg-date', type=str, help='The datetime of iteration zero in mitgcm.', required=True)\nparser.add_argument('--mitgcm-deltaT', type=float, help='The timestep (sec) of mitgcm (deltaT).', required=True)\nparser.add_argument('--mitgcm-dumpfreq', type=float, help='The timestep (sec) of mitgcm dump frequency.', required=True)\nparser.add_argument('--mitgcm-grid-dir', type=str, help='Grid directory of MITgcm.', default=\"\")\nparser.add_argument('--pressure-factor', type=float, help='Pressure factor', default=1.0)\n\n\nparser.add_argument('--input-dirs', type=str, nargs='+', help='Input dirs.', required=True)\nparser.add_argument('--output-dir', type=str, help='Output dir', default=\"output_figure\")\nparser.add_argument('--nproc', type=int, help='Number of processors.', default=1)\nparser.add_argument('--lat-rng', type=float, nargs=2, help='Latitudes in degree', default=[20, 52])\nparser.add_argument('--lon-rng', type=float, nargs=2, help='Longitudes in degree', default=[360-180, 360-144])\nparser.add_argument('--deg-lat-per-inch', type=float, help='Degree latitude per plot-inch.', default=10.0)\nparser.add_argument('--deg-lon-per-inch', type=float, help='Degree longitude per plot-inch', default=10.0)\nparser.add_argument('--pvalue-threshold', type=float, help='P value threshold.', default=0.10)\nparser.add_argument('--varnames', type=str, nargs='+', help='Plotted variable names', default=['TTL_HFLX',])\nparser.add_argument('--overwrite', action=\"store_true\")\nparser.add_argument('--no-display', action=\"store_true\")\nparser.add_argument('--is-ensemble', action=\"store_true\")\nparser.add_argument('--ensemble-members', type=int, help=\"Ensemble members. Assume equal sampling members.\", default=-1)\n\nargs = parser.parse_args()\nprint(args)\n\nif args.is_ensemble and args.ensemble_members == -1:\n raise Exception(\"The option `--is-ensemble` is set but `--ensemble-members` is not given.\")\n\nif np.isnan(args.avg_hrs):\n print(\"--avg-hrs is not set. Set it to --skip-hrs = %d\" % (args.skip_hrs,))\n args.avg_hrs = args.skip_hrs\n\nskip_hrs = pd.Timedelta(hours=args.skip_hrs)\navg_hrs = pd.Timedelta(hours=args.avg_hrs)\ndts = pd.date_range(args.date_rng[0], args.date_rng[1], freq=skip_hrs, inclusive=\"left\")\n\nargs.lon = np.array(args.lon_rng) % 360.0\n\nlat_n, lat_s = np.amax(args.lat_rng), np.amin(args.lat_rng)\nlon_w, lon_e = np.amin(args.lon_rng), np.amax(args.lon_rng)\n\n\nif len(args.sim_names) == 0:\n args.sim_names = args.input_dirs\nelif len(args.sim_names) != len(args.input_dirs):\n raise Exception(\"--sim-names is provided but the number of input does not match the --input-dirs\")\n\n# Plotting setup\nheat_flux_setting = dict( \n ctl = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-400, 400, 41),\n contourf_ticks = np.linspace(-400, 400, 9),\n ),\n\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-20, 20, 21),\n contourf_ticks = np.linspace(-20, 20, 9),\n )\n)\n\nlevs_ps = np.arange(980, 1040, 4)\nlevs_ps_diff = np.concatenate(\n (\n np.arange(-20, 0, 1),\n np.arange(1, 21, 1),\n )\n) * args.pressure_factor\n\n\nplot_infos = dict(\n\n atm = {\n\n \"PSFC\" : dict(\n factor = 100,\n label = \"$P_\\\\mathrm{sfc}$\",\n unit = \"$\\\\mathrm{hPa}$\",\n ctl = dict(\n contourf_cmap = \"bone_r\",\n contourf_levs = np.linspace(980, 1040, 16),\n contourf_ticks = np.linspace(980, 1040, 16),\n ),\n\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-4, 4, 21),\n contourf_ticks = np.linspace(-4, 4, 11),\n )\n ),\n\n\n \"WIND10\" : dict(\n factor = 1,\n label = \"$\\\\left|\\\\vec{U}_\\\\mathrm{10m}\\\\right|$\",\n unit = \"$\\\\mathrm{m}/\\\\mathrm{s}$\",\n ctl = dict(\n contourf_cmap = \"hot_r\",\n contourf_levs = np.linspace(0, 20, 21),\n contourf_ticks = np.linspace(0, 20, 11),\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-5, 5, 21),\n contourf_ticks = np.linspace(-5, 5, 11),\n )\n ),\n\n\n \"T2\" : dict(\n factor = 1,\n label = \"$T_{\\\\mathrm{2m}}$\",\n unit = \"$\\\\mathrm{K}$\",\n ctl = dict(\n contourf_cmap = \"Spectral_r\",\n# contourf_levs = np.arange(285.15, 293.15, 0.1),\n# contourf_ticks = np.arange(285.15, 293.15, 0.5),\n\n contourf_levs = np.arange(273.15, 300, .5),\n contourf_ticks = np.arange(273.15, 300, 2),\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-2, 2, 21),\n contourf_ticks = np.linspace(-2, 2, 11),\n )\n ),\n\n \"SST\" : dict(\n factor = 1,\n label = \"$\\\\mathrm{SST}$\",\n unit = \"$\\\\mathrm{K}$\",\n ctl = dict(\n contourf_cmap = \"Spectral_r\",\n contourf_levs = np.arange(273.15, 300, .5),\n contourf_ticks = np.arange(273.15, 300, 2),\n# contourf_levs = np.arange(285.15, 293.15, 0.1),\n# contourf_ticks = np.arange(285.15, 293.15, 0.5),\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21),\n contourf_ticks = np.linspace(-1, 1, 11),\n )\n ),\n\n \"QFX\" : dict(\n factor = 1e-4,\n label = \"QFX\",\n unit = \"$\\\\mathrm{kg}/\\\\mathrm{s}/\\\\mathrm{m}^2$\",\n ctl = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21),\n contourf_ticks = np.linspace(-1, 1, 11),\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21),\n contourf_ticks = np.linspace(-1, 1, 11),\n )\n ),\n\n\n \"AOTDIFF\" : dict(\n factor = 1,\n label = \"$\\\\mathrm{SST} - T_{\\\\mathrm{2m}}$\",\n unit = \"$\\\\mathrm{K}$\",\n ctl = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-5, 5, 21),\n contourf_ticks = np.linspace(-5, 5, 11),\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-2, 2, 21),\n contourf_ticks = np.linspace(-2, 2, 11),\n )\n ),\n\n \"H_DIAB_RAIN\" : dict(\n factor = 1000,\n label = \"H_DIAB_RAIN\",\n unit = \"$\\\\mathrm{kW}/\\\\mathrm{m}^2$\",\n ctl = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21) * 5,\n contourf_ticks = np.linspace(-1, 1, 11) * 5,\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21) * 1,\n contourf_ticks = np.linspace(-1, 1, 11) * 1,\n )\n ),\n\n\n \"H_DIAB_TTL\" : dict(\n factor = 1,\n label = \"H_DIAB_TTL\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n ctl = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21) * 1000,\n contourf_ticks = np.linspace(-1, 1, 11) * 1000,\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-1, 1, 21) * 1000,\n contourf_ticks = np.linspace(-1, 1, 11) * 1000,\n )\n ),\n\n\n \"TTL_HFLX\" : dict(\n factor = 1,\n label = \"HFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n \"SLHFLX\" : dict(\n factor = 1,\n label = \"SHFLX + LHFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n \"SHFLX\" : dict(\n factor = 1,\n label = \"SHFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n \"LHFLX\" : dict(\n factor = 1,\n label = \"LHFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n \"SWFLX\" : dict(\n factor = 1,\n label = \"SWFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n \"LWFLX\" : dict(\n factor = 1,\n label = \"LWFLX\",\n unit = \"$\\\\mathrm{W}/\\\\mathrm{m}^2$\",\n **heat_flux_setting,\n ),\n\n },\n\n ocn = {\n \"SSC\" : dict(\n factor = 1,\n label = \"$\\\\left|\\\\vec{U}_\\\\mathrm{sfc}\\\\right|$\",\n unit = \"$\\\\mathrm{m}/\\\\mathrm{s}$\",\n ctl = dict(\n contourf_cmap = \"YlOrRd\",\n contourf_levs = np.linspace(0, 1, 21) * 0.5,\n contourf_ticks = np.linspace(0, 1, 11) * 0.5,\n ),\n diff = dict(\n contourf_cmap = \"bwr\",\n contourf_levs = np.linspace(-.5, .5, 21),\n contourf_ticks = np.linspace(-.5, .5, 11),\n )\n ),\n }\n\n)\n\nprint(\"Parse variables...\")\n\nplot_variables = []\nfor i, full_varname in enumerate(args.varnames):\n\n \n category, varname = full_varname.split(\".\")\n \n if not ( (category in plot_infos) and (varname in plot_infos[category]) ):\n \n raise Exception(\"Error: Varname '%s' has no corresponding plot info.\" % (varname,))\n\n plot_variables.append((category, varname))\n\n\n\nprint(\"==================================\")\nprint(\"Date range: \", dts[0], \" to \", dts[-1])\nprint(\"Skip : \", skip_hrs)\nprint(\"Avg : \", avg_hrs)\nprint(\"Latitude box: %.2f %.2f\" % (lat_s, lat_n))\nprint(\"Longitude box: %.2f %.2f\" % (lon_w, lon_e))\n\nfor i, (category, varname) in enumerate(plot_variables):\n print(\"The %d-th plotted variable: %s - %s\" % (i, category, varname,))\n\nfor i, input_dir in enumerate(args.input_dirs):\n print(\"The %d-th input folder: %s\" % (i, input_dir,))\n\n\nprint(\"==================================\")\n\n\n\nncol = len(args.input_dirs)\nnrow = len(args.varnames)\n\nprint(\"Load matplotlib...\")\n\nimport matplotlib as mpl\nif args.no_display is False:\n print(\"Load TkAgg\")\n mpl.use('TkAgg')\nelse:\n print(\"Load Agg\")\n mpl.use('Agg')\n mpl.rc('font', size=15)\n \n \nmpl.use('Agg')\n# This program plots the AR event\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.patches import Rectangle\nimport matplotlib.transforms as transforms\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.ticker as mticker\nimport cartopy.crs as ccrs\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nprint(\"done\")\n\nprint(\"Create dir: %s\" % (args.output_dir,))\nPath(args.output_dir).mkdir(parents=True, exist_ok=True)\n\n\n\ndef workWrap(*args):\n\n try:\n result = plot(*args)\n except:\n traceback.print_exc()\n \n result = \"ERROR\"\n result\n\n\ndef plot(beg_dt, end_dt, output_filename):\n \n beg_dtstr = beg_dt.strftime(\"%Y-%m-%d_%H\")\n end_dtstr = end_dt.strftime(\"%Y-%m-%d_%H\")\n \n print(\"Doing date range: [%s, %s]\" % (beg_dtstr, end_dtstr))\n\n data = []\n\n ref_time = None\n\n coords = dict(\n atm = dict(\n lat = None,\n lon = None,\n ),\n\n ocn = dict(\n lat = None,\n lon = None,\n )\n )\n for i, input_dir in enumerate(args.input_dirs):\n\n _data = dict(atm=None, ocn=None)\n\n\n print(\"Load the %d-th folder: %s\" % (i, input_dir,))\n\n _ds = wrf_load_helper.loadWRFDataFromDir(input_dir, prefix=\"wrfout_d01_\", time_rng=[beg_dt, end_dt], extend_time=pd.Timedelta(days=1))\n\n if i == 0:\n ref_time = _ds.time.to_numpy()\n coords['atm']['lon'] = _ds.coords[\"XLONG\"].isel(time=0)\n coords['atm']['lat'] = _ds.coords[\"XLAT\"].isel(time=0)\n\n print(\"Loaded time: \")\n print(ref_time)\n\n\n if i > 0:\n if any(ref_time != _ds.time.to_numpy()):\n raise Exception(\"Time is not consistent between %s and %s\" % (args.input_dirs[0], input_dir,))\n\n\n _ds = _ds.mean(dim=\"time\", keep_attrs=True)\n\n if not args.is_ensemble:\n _data['atm'] = xr.merge([_ds, produceDiagQuantities(_ds)])\n else:\n _data['atm'] = _ds\n \n _ds_std = None\n if args.is_ensemble:\n try:\n _ds_std = wrf_load_helper.loadWRFDataFromDir(input_dir, prefix=\"std_wrfout_d01_\", time_rng=[beg_dt, end_dt], extend_time=pd.Timedelta(days=0))\n\n except Exception as e:\n print(e)\n traceback.print_exc()\n print(\"Error happens when trying to get standard deviation file. Ignore this.\")\n\n _ds_std = None\n\n if _ds_std is not None:\n _ds_std = _ds_std.mean(dim=\"time\", keep_attrs=True)\n _data['atm_std'] = _ds_std\n\n print(\"Load ocean data\")\n\n try:\n if args.mitgcm_grid_dir != \"\":\n mitgcm_grid_dir = args.mitgcm_grid_dir\n else:\n mitgcm_grid_dir = input_dir\n \n msm = dlh.MITgcmSimMetadata(args.mitgcm_beg_date, args.mitgcm_deltaT, args.mitgcm_dumpfreq, input_dir, mitgcm_grid_dir)\n coo, crop_kwargs = lf.loadCoordinateFromFolderAndWithRange(msm.grid_dir, nlev=None, lat_rng=args.lat_rng, lon_rng=args.lon_rng)\n\n coords['ocn']['lat'] = coo.grid[\"YC\"][:, 0]\n coords['ocn']['lon'] = coo.grid[\"XC\"][0, :]\n\n #ocn_z_T = coo.grid[\"RC\"].flatten()\n #ocn_z_W = coo.grid[\"RF\"].flatten()\n #ocn_mask = coo.grid[\"maskInC\"]\n\n # Load average data\n datasets = [\"diag_state\"]\n data_ave = dlh.loadAveragedDataByDateRange(beg_dt, end_dt, msm, **crop_kwargs, datasets=datasets, inclusive=\"right\") # inclusive is right because output at time=t is the average from \"before\" to t\n _data['ocn'] = produceDiagQuantities_ocn(data_ave)\n\n \n # Load standard deviation\n if args.is_ensemble:\n new_datasets = []\n for j in range(len(datasets)): \n new_datasets.append(\"%s_%s\" % (\"std\", datasets[j],))\n\n data_std = dlh.loadAveragedDataByDateRange(beg_dt, end_dt, msm, **crop_kwargs, datasets=new_datasets, inclusive=\"right\") # inclusive is right because output at time=t is the average from \"before\" to t\n _data['ocn_std'] = data_std\n\n except Exception as e:\n traceback.print_exc()\n print(\"Loading ocean data error. Still keep going...\")\n\n _data['ocn'] = None\n\n data.append(_data)\n #data_ave = dlh.loadAveragedDataByDateRange(dt, dt + avg_hrs, msm, **crop_kwargs, datasets=[\"diag_Tbdgt\", \"diag_2D\", \"diag_state\",], inclusive=\"right\") # inclusive is right because output at time=t is the average from \"before\" to t\n print(\"Data loading complete.\")\n \n cent_lon = 180.0\n\n plot_lon_l = lon_w\n plot_lon_r = lon_e\n plot_lat_b = lat_s\n plot_lat_t = lat_n\n\n proj = ccrs.PlateCarree(central_longitude=cent_lon)\n proj_norm = ccrs.PlateCarree()\n\n \n thumbnail_height = (lat_n - lat_s) / args.deg_lat_per_inch\n thumbnail_width = (lon_e - lon_w) / args.deg_lon_per_inch\n\n figsize, gridspec_kw = tool_fig_config.calFigParams(\n w = thumbnail_width,\n h = thumbnail_height,\n wspace = 2.5,\n hspace = 0.5,\n w_left = 1.0,\n w_right = 1.5,\n h_bottom = 1.0,\n h_top = 1.0,\n ncol = ncol,\n nrow = nrow,\n )\n\n fig, ax = plt.subplots(\n nrow, ncol,\n figsize=figsize,\n subplot_kw=dict(projection=proj, aspect=\"auto\"),\n gridspec_kw=gridspec_kw,\n constrained_layout=False,\n squeeze=False,\n )\n \n \n fig.suptitle(\"%s ~ %s\" % ( beg_dtstr, end_dtstr, ))\n \n \n print(\"Plot control simulation: \", args.sim_names[0])\n ref_ax = ax[:, 0]\n ref_data = data[0]\n\n\n ps_ref = ref_data[\"atm\"][\"PSFC\"]\n for j, _ax in enumerate(ref_ax): \n \n category, varname = plot_variables[j]\n plot_info = plot_infos[category][varname]\n\n if ref_data[category] is None:\n print(\"Blank data encountered. Skip this one.\")\n fig.delaxes(_ax)\n continue\n\n\n var_ref = ref_data[category][varname] / plot_info['factor']\n\n mappable = _ax.contourf(\n coords[category]['lon'],\n coords[category]['lat'],\n var_ref,\n levels=plot_info['ctl']['contourf_levs'],\n transform=proj_norm,\n extend=\"both\",\n cmap=plot_info['ctl']['contourf_cmap'],\n )\n\n cax = tool_fig_config.addAxesNextToAxes(fig, _ax, \"right\", thickness=0.03, spacing=0.05)\n cb = plt.colorbar(mappable, cax=cax, ticks=plot_info['ctl']['contourf_ticks'], orientation=\"vertical\", pad=0.0)\n cb.ax.set_ylabel(\" %s [ %s ]\" % (plot_info[\"label\"], plot_info[\"unit\"]))\n\n\n # Extra plot for current\n if category == \"ocn\" and varname == \"SSC\":\n\n print(\"Plot STREAMPLOT for SSC\")\n \n _ax.streamplot(\n coords['ocn']['lon'],\n coords['ocn']['lat'],\n ref_data['ocn']['Usfc'],\n ref_data['ocn']['Vsfc'],\n color='dodgerblue',\n linewidth=1,\n transform=proj_norm,\n )\n\n\n # Extra plot for current\n if category == \"atm\" and varname == \"WIND10\":\n\n print(\"Plot STREAMPLOT for SSC\")\n \n _ax.streamplot(\n coords['atm']['lon'],\n coords['atm']['lat'],\n ref_data['atm']['U10'],\n ref_data['atm']['V10'],\n color='dodgerblue',\n linewidth=1,\n transform=proj_norm,\n )\n\n\n\n\n cs = _ax.contour(\n coords['atm']['lon'],\n coords['atm']['lat'],\n ps_ref / 1e2,\n levels=levs_ps,\n transform=proj_norm,\n colors=\"black\",\n linewidths = 1.0,\n )\n\n #plt.clabel(cs, fmt= ( \"%d\" if np.all(levs_ps % 1 != 0) else \"%.1f\" ))\n\n\n ref_ax[0].set_title(args.sim_names[0])\n\n\n for i in range(1, len(args.input_dirs)):\n\n print(\"Plot diff simulation: \", args.sim_names[i])\n \n case_ax = ax[:, i]\n _data = data[i]\n \n\n \n ps_diff = _data[\"atm\"][\"PSFC\"] - ps_ref \n for j, _ax in enumerate(case_ax): \n \n category, varname = plot_variables[j]\n plot_info = plot_infos[category][varname]\n\n if _data[category] is None:\n print(\"Blank data encountered. Skip this one.\")\n fig.delaxes(_ax)\n continue\n\n var_mean1 = ref_data[category][varname]\n var_mean2 = _data[category][varname]\n\n var_diff = ( var_mean2 - var_mean1 ) / plot_info['factor']\n mappable = _ax.contourf(\n coords[category]['lon'],\n coords[category]['lat'],\n var_diff,\n levels=plot_info['diff']['contourf_levs'],\n transform=proj_norm,\n extend=\"both\",\n cmap=plot_info['diff']['contourf_cmap'],\n )\n #cax = tool_fig_config.addAxesNextToAxes(fig, _ax, \"right\", thickness=0.03, spacing=0.05)\n #cb = plt.colorbar(mappable, cax=cax, ticks=plot_info['diff']['contourf_ticks'], orientation=\"vertical\", pad=0.0)\n #cb.ax.set_ylabel(\" %s [ %s ]\" % (plot_info[\"label\"], plot_info[\"unit\"]))\n\n cs = _ax.contour(\n coords['atm']['lon'],\n coords['atm']['lat'],\n ps_diff / 1e2, \n levels=levs_ps_diff, \n transform=proj_norm, \n extend=\"both\", \n colors=\"black\",\n linewidths = 1.0,\n )\n \n #plt.clabel(cs, fmt= ( \"%d\" if np.all(levs_ps % 1 != 0) else \"%.1f\" ))\n \n if args.is_ensemble:\n stdcat = \"%s_std\" % category\n if stdcat in ref_data and varname in ref_data[stdcat]:\n\n print(\"Plotting significancy ... of variable %s \" % (varname,))\n var_std1 = ref_data[stdcat][varname]\n var_std2 = _data[stdcat][varname]\n\n # Doing T-test\n _tscore, _pvalues = scipy.stats.ttest_ind_from_stats(\n var_mean1, var_std1, args.ensemble_members,\n var_mean2, var_std2, args.ensemble_members,\n equal_var=True,\n alternative='two-sided',\n )\n\n cs = _ax.contourf(\n coords[category]['lon'],\n coords[category]['lat'],\n _pvalues,\n cmap=None,\n colors='none',\n levels=[-1, args.pvalue_threshold],\n hatches=[\"..\"],\n transform=proj_norm, \n )\n\n # Remove the contour lines for hatches\n for _, collection in enumerate(cs.collections):\n collection.set_edgecolor(\"black\")\n collection.set_linewidth(0.) \n \n\n \n \n case_ax[0].set_title(args.sim_names[i])\n\n\n\n \"\"\"\n _ax.quiver(coords[\"lon\"], coords[\"lat\"], _data.u10.to_numpy(), _data.v10.to_numpy(), scale=200, transform=proj_norm)\n\n cs = _ax.contourf(coords[\"lon\"], coords[\"lat\"], _data['map'], colors='none', levels=[0, 0.5, np.inf], hatches=[None, \".\"], transform=proj_norm)\n\n # Remove the contour lines for hatches \n for _, collection in enumerate(cs.collections):\n collection.set_edgecolor(\"red\")\n \"\"\"\n \n \n\n for _ax in ax.flatten():\n\n if _ax is None:\n\n # This axis has been deleted\n continue\n\n _ax.set_global()\n #__ax.gridlines()\n _ax.coastlines()#color='gray')\n _ax.set_extent([plot_lon_l, plot_lon_r, plot_lat_b, plot_lat_t], crs=proj_norm)\n\n gl = _ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=1, color='gray', alpha=0.5, linestyle='--')\n\n gl.xlabels_top = False\n gl.ylabels_right = False\n\n #gl.xlocator = mticker.FixedLocator(np.arange(-180, 181, 30))\n #gl.xlocator = mticker.FixedLocator([120, 150, 180, -150, -120])#np.arange(-180, 181, 30))\n #gl.ylocator = mticker.FixedLocator([10, 20, 30, 40, 50])\n \n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n\n\n print(\"Output file: \", output_filename)\n fig.savefig(output_filename, dpi=200)\n\n if args.no_display is False:\n plt.show()\n \n plt.close(fig)\n\n return \"DONE\" \n\nfailed_dates = []\nwith Pool(processes=args.nproc) as pool:\n\n input_args = []\n for i, beg_dt in enumerate(dts):\n \n end_dt = beg_dt + avg_hrs\n beg_dtstr = beg_dt.strftime(\"%Y-%m-%d_%H\")\n end_dtstr = end_dt.strftime(\"%Y-%m-%d_%H\")\n\n output_filename = \"%s/wrf_comparison_avg-%d_%s.png\" % (args.output_dir, args.avg_hrs, beg_dtstr)\n\n if args.overwrite is False and os.path.exists(output_filename):\n print(\"[%s] File %s already exists. Do not do this job.\" % (beg_dtstr, output_filename))\n\n else:\n input_args.append((beg_dt, end_dt, output_filename))\n\n \n result = pool.starmap(workWrap, input_args)\n\n", "repo_name": "meteorologytoday/MITgcm-diagnostics", "sub_path": "useful_code/plot_code/plot_atm_ocn.py", "file_name": "plot_atm_ocn.py", "file_ext": "py", "file_size_in_byte": 27491, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "WRFDiag.wrf_load_helper.engine", "line_number": 16, "usage_type": "attribute"}, {"api_name": "WRFDiag.wrf_load_helper", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 50, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 108, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 127, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 158, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 162, "usage_type": "call"}, {"api_name": "pandas.Timedelta", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 301, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 400, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 401, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 450, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.rc", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 457, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 471, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 480, "usage_type": "call"}, {"api_name": "WRFDiag.wrf_load_helper.loadWRFDataFromDir", "line_number": 515, "usage_type": "call"}, {"api_name": "WRFDiag.wrf_load_helper", "line_number": 515, "usage_type": "name"}, {"api_name": "pandas.Timedelta", "line_number": 515, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 534, "usage_type": "call"}, {"api_name": "WRFDiag.wrf_load_helper.loadWRFDataFromDir", "line_number": 541, "usage_type": "call"}, {"api_name": "WRFDiag.wrf_load_helper", "line_number": 541, "usage_type": "name"}, {"api_name": "pandas.Timedelta", "line_number": 541, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 545, "usage_type": "call"}, {"api_name": "MITgcmDiag.data_loading_helper.MITgcmSimMetadata", "line_number": 562, "usage_type": "call"}, {"api_name": "MITgcmDiag.data_loading_helper", "line_number": 562, "usage_type": "name"}, {"api_name": "MITgcmDiff.loadFunctions.loadCoordinateFromFolderAndWithRange", "line_number": 563, "usage_type": "call"}, {"api_name": "MITgcmDiff.loadFunctions", "line_number": 563, "usage_type": "name"}, {"api_name": "MITgcmDiag.data_loading_helper.loadAveragedDataByDateRange", "line_number": 574, "usage_type": "call"}, {"api_name": "MITgcmDiag.data_loading_helper", "line_number": 574, "usage_type": "name"}, {"api_name": "MITgcmDiag.data_loading_helper.loadAveragedDataByDateRange", "line_number": 584, "usage_type": "call"}, {"api_name": "MITgcmDiag.data_loading_helper", "line_number": 584, "usage_type": "name"}, {"api_name": "traceback.print_exc", "line_number": 588, "usage_type": "call"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 604, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 604, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 605, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 605, "usage_type": "name"}, {"api_name": "tool_fig_config.calFigParams", "line_number": 611, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 624, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 624, "usage_type": "name"}, {"api_name": "tool_fig_config.addAxesNextToAxes", "line_number": 666, "usage_type": "call"}, {"api_name": "MITgcmDiff.calBudget", "line_number": 667, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 667, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 667, "usage_type": "name"}, {"api_name": "MITgcmDiff.calBudget.ax.set_ylabel", "line_number": 668, "usage_type": "call"}, {"api_name": "MITgcmDiff.calBudget.ax", "line_number": 668, "usage_type": "attribute"}, {"api_name": "MITgcmDiff.calBudget", "line_number": 668, "usage_type": "name"}, {"api_name": "scipy.stats.ttest_ind_from_stats", "line_number": 780, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 780, "usage_type": "attribute"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 834, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 834, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LONGITUDE_FORMATTER", "line_number": 844, "usage_type": "name"}, {"api_name": "cartopy.mpl.gridliner.LATITUDE_FORMATTER", "line_number": 845, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 854, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 854, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 856, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 856, "usage_type": "name"}, {"api_name": "multiprocessing.Pool", "line_number": 861, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 872, "usage_type": "call"}, {"api_name": "os.path", "line_number": 872, "usage_type": "attribute"}]} +{"seq_id": "18075351374", "text": "import pygame\nfrom datetime import date, datetime\n\n########## LOCAL MODULES ##########\nfrom files.vars import Scene\nimport files.bucle as b\nimport files.import_imp\nfrom files.fonts import *\nfrom files.UI.Text import Text\nimport files.calendar.calendar_engine as c\n\ndef update_current_date():\n\tglobal cur_year, cur_month, cur_day\n\tcurrent_date = date.today()\n\n\t# Keep updating current dates\n\tcur_year = int(current_date.strftime(\"%Y\"))\n\tcur_month = int(current_date.strftime(\"%m\"))\n\tcur_day = int(current_date.strftime(\"%d\"))\n\nupdate_current_date()\n\n# Month/Year where the user is\ncalendar_year = cur_year\ncalendar_month = cur_month\n\ncalendar_1 = c.Calendar(year=cur_year, month=cur_month, day=cur_day)\n\ndef Draw(events):\n\tglobal calendar_year, calendar_month, calendar_day\n\t\n\tupdate_current_date()\n\n\t# Update the month/year where the user is\n\tcalendar_1.set_UI_date(\"Month\", value=calendar_month, init=True)\n\tcalendar_1.set_UI_date(\"Year\", value=calendar_year, init=True)\n\t\n\t# Update the current date\n\tcalendar_1.set_real_date(\"Month\", value=cur_month)\n\tcalendar_1.set_real_date(\"Year\", value=cur_year)\n\tcalendar_1.set_real_date(\"Day\", value=cur_day)\n\n\tcalendar_1.update(events)\n\n\t# Draw time\n\ttime = datetime.now()\n\ttime_string = Text(0, 640, time.strftime(\"%H:%M:%S\"), Arial_40, (255,255,255), lock=\"x\")\n\ttime_string.draw()\n\n\n\tfor event in events:\n\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_RIGHT:\n\t\t\t\tif calendar_month == 12:\n\t\t\t\t\tcalendar_year += 1\n\t\t\t\t\tcalendar_month = 1\n\n\t\t\t\telse:\n\t\t\t\t\tcalendar_month += 1\t\t\t\t\n\t\t\t\t\t\n\t\t\telif event.key == K_LEFT:\n\t\t\t\tif calendar_month == 1:\n\t\t\t\t\tcalendar_year -= 1\n\t\t\t\t\tcalendar_month = 12\n\n\t\t\t\telse:\n\t\t\t\t\tcalendar_month -= 1\n\n\t\t\t", "repo_name": "EDUATO/calendar", "sub_path": "files/draw.py", "file_name": "draw.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.date.today", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "name"}, {"api_name": "files.calendar.calendar_engine.Calendar", "line_number": 27, "usage_type": "call"}, {"api_name": "files.calendar.calendar_engine", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "files.UI.Text.Text", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "2437103627", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 4 16:42:12 2021\r\n\r\n@author: Zsh\r\n\"\"\"\r\n\r\nimport librosa\r\nimport glob\r\nimport os\r\nimport pandas as pd\r\nimport random\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom torch.utils.data import Dataset, DataLoader, TensorDataset\r\nimport torch\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport pickle\r\nimport se_resnet\r\n\r\n# -------------------Data Preprocessing-------------------\r\n\r\ntrain=pd.read_csv(\"dataset\\\\train_list1.csv\",sep=\",\")\r\ntest=pd.read_csv(\"dataset\\\\test_list.csv\",sep=\",\")\r\n\r\n# Randomly plot an audio\r\n# i=random.choice(train.index)\r\n# audio_name=train.filename[i]\r\n# path=str(audio_name)\r\n# print(\"Label: \",train.label[i])\r\n# x, sr=librosa.load(str(audio_name))\r\n# plt.figure(figsize=(12,4))\r\n# librosa.display.waveplot(x,sr=16000)\r\n\r\n# -------------------Feature Extraction-------------------\r\n# def parser(row):\r\n# file_name=row.filename\r\n# try:\r\n# x, sr=librosa.load(file_name)\r\n# mfccs=np.mean(librosa.feature.mfcc(y=x, sr=sr, n_mfcc=90),axis=1)\r\n# stfts = np.abs(librosa.stft(x,n_fft=112))\r\n# # chromas = np.mean(librosa.feature.chroma_stft(S=stfts, sr=sr).T)\r\n# # mels = np.mean(librosa.feature.melspectrogram(x, sr=sr),axis=1)\r\n# # contrasts = np.mean(librosa.feature.spectral_contrast(S=stfts, sr=sr).T)\r\n# print(\"processing\")\r\n \r\n# except Exception as e:\r\n# print(\"error\")\r\n# return None,None\r\n# feature_mfcc=mfccs\r\n# feature_stft=np.mean(stfts,axis=1)\r\n# # feature_mel=mels\r\n# feature_both=np.concatenate((feature_mfcc,feature_stft),axis=0)\r\n# # feature_chroma=chromas\r\n# # feature_contrast=np.mean(contrasts)\r\n \r\n# label=row.label\r\n# return [feature_both,label]\r\n\r\n# temp_train=train.apply(parser,axis=1,result_type=\"expand\")\r\n# temp_test=test.apply(parser,axis=1,result_type=\"expand\")\r\n\r\n# Store temp data in plk file\r\n# with open(\"data_train_BOTH.plk\",'wb') as f:\r\n# pickle.dump(temp_train,f)\r\n# with open(\"data_test_BOTH.plk\",'wb') as f:\r\n# pickle.dump(temp_test,f)\r\n\r\n# # Save processed data to csv file\r\n# with open(\"data_train_THREE.plk\",'rb') as f:\r\n# temp_train=pickle.load(f)\r\n# temp_train.to_csv('test_THREE.csv', sep=',',header=False,index=False)\r\n# with open(\"data_test_BOTH.plk\",'rb') as f:\r\n# temp_test=pickle.load(f)\r\n# temp_test.to_csv('test_BOTH.csv', sep=',',header=False,index=False)\r\n\r\nclass Dataset(Dataset):\r\n def __init__(self,xy,use_gpu):\r\n xy=xy.values\r\n self.x=xy[:,[0]]\r\n self.y=xy[:,[1]]\r\n list_x=[]\r\n for row in self.x:\r\n list_x.append(row.tolist()[0])\r\n self.x=torch.tensor(list_x)\r\n \r\n list_y=[]\r\n for row in self.y:\r\n list_y.append(row.tolist()[0])\r\n self.y=torch.tensor(list_y)\r\n \r\n self.len=xy.shape[0]\r\n if use_gpu:\r\n self.x=self.x.to(\"cuda\")\r\n self.y=self.y.to(\"cuda\")\r\n \r\n def __getitem__(self,index):\r\n return self.x[index], self.y[index]\r\n\r\n def __len__(self):\r\n return self.len\r\n\r\nwith open(\"data_train_SC.plk\",'rb') as f:\r\n temp=pickle.load(f)\r\n# print(len(temp[0][0]))\r\ntrain_dataset=Dataset(temp,torch.cuda.is_available())\r\n\r\nwith open(\"data_test_SC.plk\",'rb') as f2:\r\n temp2=pickle.load(f2)\r\ntest_dataset=Dataset(temp2,torch.cuda.is_available())\r\n\r\ntrain_loader=DataLoader(dataset=train_dataset,batch_size=64,shuffle=True,num_workers=2)\r\ntest_loader=DataLoader(dataset=test_dataset,batch_size=64,shuffle=False,num_workers=2)\r\n\r\n# Channel Encoder (Classifier)\r\n# class Model(torch.nn.Module):\r\n# def __init__(self):\r\n# super(Model,self).__init__()\r\n# self.fc1=torch.nn.Linear(384,320)\r\n# self.fc2=torch.nn.Linear(320,256)\r\n# self.fc3=torch.nn.Linear(256,192)\r\n# self.fc4=torch.nn.Linear(192,128)\r\n# self.fc5=torch.nn.Linear(128,96)\r\n# self.fc6=torch.nn.Linear(96,80)\r\n# self.fc7=torch.nn.Linear(80,64)\r\n# self.fc8=torch.nn.Linear(64,50)\r\n \r\n# def forward(self,x):\r\n# x-x.view(-1,384)\r\n# x=F.relu(self.fc1(x))\r\n# # x=self.dp(x)\r\n# x=F.relu(self.fc2(x))\r\n# x=F.relu(self.fc3(x))\r\n# x=F.relu(self.fc4(x))\r\n# x=F.relu(self.fc5(x))\r\n# x=F.relu(self.fc6(x))\r\n# x=F.relu(self.fc7(x))\r\n# return self.fc8(x)\r\n\r\nmodel = getattr(se_resnet,\"se_resnet_50\")(num_classes = 50)\r\ncriterion=torch.nn.CrossEntropyLoss()\r\noptimizer=torch.optim.Adam(model.parameters())\r\n\r\ndef test():\r\n correct=0\r\n total=0\r\n\r\n with torch.no_grad():\r\n for data in test_loader:\r\n x,y_test=data\r\n x=x.view(-1,3,7,7)\r\n y_test_pred=model(x)\r\n _,predicted=torch.max(y_test_pred.data,dim=1)\r\n \r\n y_test_list=y_test.data.numpy()\r\n predict_list=predicted.data.numpy()\r\n \r\n total+=len(predict_list)\r\n for i in range(len(predict_list)):\r\n if y_test_list[i]==predict_list[i]:\r\n correct+=1\r\n acc=1.0*correct/total\r\n print('Accuracy: %.5f' % acc)\r\n return acc\r\n\r\nloss_value=[] \r\nacc_value=[]\r\nif __name__=='__main__':\r\n if torch.cuda.is_available():\r\n model=model.cuda()\r\n criterion=criterion.cuda()\r\n for epoch in range(100):\r\n batch_loss=0\r\n for batch_idx,data in enumerate(train_loader,0):\r\n inputs,labels=data\r\n inputs=inputs.view(-1,3,7,7)\r\n optimizer.zero_grad()\r\n y_pred=model(inputs)\r\n loss=criterion(y_pred,labels)\r\n \r\n batch_loss+=loss.item()\r\n \r\n loss.backward()\r\n optimizer.step()\r\n loss_value.append(batch_loss/64.0)\r\n print(\"epoch {}, Loss: {}\".format(epoch+1,batch_loss))\r\n acc=test()\r\n acc_value.append(acc)\r\n \r\n plt.plot(range(len(loss_value)),loss_value)\r\n plt.show()\r\n plt.plot(range(len(acc_value)),acc_value)\r\n plt.show()\r\n", "repo_name": "hbcfzy/FRSC", "sub_path": "FRSC-part1/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 6160, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 113, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 172, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "2031908929", "text": "import requests\nimport json\nfrom utils import *\nimport random\n\ndef refreshAuth():\n authurl = 'https://api.gfycat.com/v1/oauth/token'\n print(\"Refreshing OAUTH data.....\")\n with open('gifyCatData.txt') as json_file: \n data = json.load(json_file)\n \n payload = data['data'][0]\n auth = requests.post(authurl, data=str(payload))\n print(\"Status: \", auth.status_code)\n r = json.loads((auth.content).decode())\n accessToken = r['access_token']\n\n return accessToken\n\ndef makeSearchReq(searchWord):\n url = 'https://api.gfycat.com/v1/gfycats/search?search_text=' + searchWord\n data = open_file('currentGifKeys.txt')\n accessToken = data[0]\n\n headers = {\n \"Authorization\" : accessToken\n }\n\n searchReq = requests.get(url, headers=headers)\n\n while searchReq.status_code != 200:\n accessToken = refreshAuth()\n headers = {\n \"Authorization\" : accessToken\n }\n \n searchReq = requests.get(url, headers=headers)\n\n print(\"search successful...\")\n saveCurrentKey(accessToken)\n\n searchData = json.loads(searchReq.content.decode('utf-8'))\n \n return searchData\n\n\ndef saveCurrentKey(accessTok):\n temp = open('currentGifKeys.txt', 'w')\n temp.write(accessTok)\n\n\ndef getGifLink(searchResults):\n gifURLList = []\n try:\n for item in searchResults['gfycats']:\n for container in item:\n if container == 'url':\n gifURLList.append(item[container])\n\n except: \n return None\n\n if len(gifURLList) == 0:\n return None\n\n randNum = random.randint(0, len(gifURLList)-1)\n\n url = gifURLList[randNum]\n\n return url\n\n\ndef mainRun(searchWord):\n data = makeSearchReq(searchWord)\n url = getGifLink(data)\n\n return url", "repo_name": "tehzwen/discordBot", "sub_path": "gifs.py", "file_name": "gifs.py", "file_ext": "py", "file_size_in_byte": 1785, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "35109311011", "text": "import tripplanner\nimport unittest\nimport json\nfrom pymongo import MongoClient\nimport base64\n\n\ndef auth_header(username, password):\n credentials = '{0}:{1}'.format(username, password).encode('utf-8')\n encode_login = base64.b64encode(credentials).decode()\n return dict(Authorization=\"Basic \" + encode_login)\n\n\nclass FlaskrTestCase(unittest.TestCase):\n\n def setUp(self):\n self.app = tripplanner.app.test_client()\n # Run app in testing mode to retrieve exceptions and stack traces\n tripplanner.app.config['TESTING'] = True\n\n # Inject test database into application\n mongo = MongoClient('localhost', 27017)\n db = mongo.test_database\n tripplanner.app.db = db\n\n # Drop collection (significantly faster than dropping entire db)\n db.drop_collection('users')\n db.drop_collection('trips')\n\n def test_userdb(self):\n response = self.app.post('/users/',\n data=json.dumps(dict(username='admin',\n password='secret')),\n content_type='application/json')\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n trip_data = dict(trip='africa', waypoints=['egypt', 'ethiopia', 'south africa'])\n response = self.app.post('/trips/',\n data=json.dumps(trip_data),\n content_type='application/json',\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n postedObjectID = responseJSON['_id']\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n response = self.app.get('users/'+postedObjectID,\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n\n def test_tripdb_auth(self):\n # create new user\n response = self.app.post('/users/',\n data=json.dumps(dict(username='admin',\n password='secret')),\n content_type='application/json')\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n\n # post new trip with auth\n trip_data = dict(trip='europe', waypoints=['london', 'paris', 'milan'])\n response = self.app.post('/trips/',\n data=json.dumps(trip_data),\n content_type='application/json',\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n postedObjectID = responseJSON['_id']\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n # update existing trip with auth\n up_data = dict(trip='europe', waypoints=['brussels', 'paris', 'amsterdam'])\n response = self.app.put('trips/'+postedObjectID,\n data=json.dumps(up_data),\n content_type='application/json',\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n # get existing trip with auth\n response = self.app.get('trips/'+postedObjectID,\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n # get non existing trip\n response = self.app.get('trips/57389d84496254540367aa0d',\n headers=auth_header('admin', 'secret'))\n responseJSON = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n print(responseJSON)\n\n # delete trip with auth\n response = self.app.delete('trips/'+postedObjectID,\n headers=auth_header('admin', 'secret'))\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n # delete non existing trip\n response = self.app.delete('trips/68389d84496254540367aa0d',\n headers=auth_header('admin', 'secret'))\n self.assertEqual(response.status_code, 200)\n print(responseJSON)\n\n # get deleted trip\n response = self.app.get('trips/'+postedObjectID,\n headers=auth_header('admin', 'secret'))\n self.assertEqual(response.status_code, 404)\n print(responseJSON)\n\n\nif __name__ == '__main__':\n unittest.main()\n unittest.main()\n", "repo_name": "MakeSchool-17/trip-planner-flask-backend-kvncaldwll", "sub_path": "tripuserauthtest.py", "file_name": "tripuserauthtest.py", "file_ext": "py", "file_size_in_byte": 4929, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base64.b64encode", "line_number": 10, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "tripplanner.app.test_client", "line_number": 17, "usage_type": "call"}, {"api_name": "tripplanner.app", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tripplanner.app", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 22, "usage_type": "call"}, {"api_name": "tripplanner.app", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 62, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 68, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 82, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 89, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 96, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 120, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "40189142063", "text": "import bs4\nimport requests\nimport datetime\n\n# data = requests.get('http://kurstenge.kz/')\n#\n# # print(data.text)\n#\n# dom = bs4.BeautifulSoup(data.text)\n# print(dom.select('td')[0].getText)\n\n\nlistPeremena = [ '8:00-8:50','9:05-9:55','10:10-11:00','11:15-12:05','12:20-13:10','13:25-14:15','14:30-15:20','15:35-16:25','17:45-18:35']\n\n\n\n\n#\n# hourNew = 8\n#\n# minNew = 50\n#\n# hour = 8\n#\n# minute = 0\n#\n# time = int(time) + minute\n# if time > 60:\n# timeNew = int(time) - 60\n# hour = hour + 1\n# print('До конца осталось')\n# print(timeNew)\n#\n# print(str(hour) + ':' + str(timeNew))\n# minute = minute + time\n# minute = minNew - minute\n# print( 'До звонка осталось ' + str(minute) + ' минут')\n\nlists = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50]\n\n# # nowHour = datetime.datetime.now()\n# # nowHour = nowHour.hour\n# nowHour = input('hourss')\n# Hours = int(nowHour) - 8\n# minute = lists[Hours]\n#\n# print(minute)\n#\n# now = datetime.datetime.now()\n#\n# myTime = now.minute\n#\n# newTime = myTime - 50 + int(minute)\n#\n# print('до звонка ' + str(newTime) + ' мин')\n\n\n\nlistZvonok = ['8:00','9:05' ,'10:10' ,'11:15' ,'12:20' ,'13:25' ,'14:30', '15:35', '16:40', '17:45', '18:50' ]\n\nlistMonth = ['января','февраля','марта','апреля','мая','июня','июля','августа','сентября','октября','ноября','декабря']\nnow = datetime.datetime.now()\nday = now.day\nmonth = now.month\nmonth = listMonth[month - 1]\nhour = now.hour\nminute = now.minute\nyear = now.year\nretrn = str(day) + ' ' + month.strip()+ ' ' + str(year) + ' года'+ ' ' + str(hour) + ':' + str(minute)\nprint(retrn)\n\n\n", "repo_name": "KUUBIK/Telegram", "sub_path": "PyTime.py", "file_name": "PyTime.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "39617102626", "text": "from typing import TYPE_CHECKING\n\nimport aio_pika\n\nif TYPE_CHECKING:\n from app.web.app import Application\n\n\nclass LoggerPublisher:\n def __init__(self, app: \"Application\"):\n self.app = app\n\n async def info(self, message: str):\n conn = await aio_pika.connect_robust(self.app.config.rabbit.url)\n async with conn:\n channel = await conn.channel()\n logs_exchange = await channel.declare_exchange(\"logs\")\n await logs_exchange.publish(\n message=aio_pika.Message(f'{message}'.encode()),\n routing_key=\"info\"\n )\n self.app.logger.info(\"Logger publisher send message\")", "repo_name": "Pavel418890/aiohttp_tgbot", "sub_path": "app/store/rabbitmq/publisher/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 5, "usage_type": "name"}, {"api_name": "app.web.app", "line_number": 11, "usage_type": "name"}, {"api_name": "aio_pika.connect_robust", "line_number": 14, "usage_type": "call"}, {"api_name": "aio_pika.Message", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "26341592255", "text": "import time\n\nimport pytest\n\nfrom ingenialink.ethernet.network import EthernetNetwork\n\n\nMONITORING_CH_DATA_SIZE = 4\nMONITORING_NUM_SAMPLES = 100\nDISTURBANCE_CH_DATA_SIZE = 4\n\n\n@pytest.mark.no_connection\ndef test_connect_to_virtual(virtual_drive, read_config):\n server = virtual_drive\n time.sleep(1)\n net = EthernetNetwork()\n protocol_contents = read_config[\"ethernet\"]\n servo = net.connect_to_slave(server.ip, protocol_contents[\"dictionary\"], server.port)\n servo.write(\"CL_AUX_FBK_SENSOR\", 4)\n servo.write(\"DIST_CFG_REG0_MAP\", 4, 0)\n\n\n@pytest.mark.parametrize(\n \"reg, value, subnode\", [(\"CL_AUX_FBK_SENSOR\", 4, 1), (\"DIST_CFG_REG0_MAP\", 4, 0)]\n)\n@pytest.mark.no_connection\ndef test_virtual_drive_write_read(virtual_drive, read_config, reg, value, subnode):\n server = virtual_drive\n\n virtual_net = EthernetNetwork()\n protocol_contents = read_config[\"ethernet\"]\n virtual_servo = virtual_net.connect_to_slave(\n server.ip, protocol_contents[\"dictionary\"], server.port\n )\n\n virtual_servo.write(reg, value, subnode)\n response = virtual_servo.read(reg, subnode)\n assert response == value\n\n\n@pytest.mark.ethernet\n@pytest.mark.parametrize(\n \"reg, value, subnode\", [(\"CL_AUX_FBK_SENSOR\", 4, 1), (\"DIST_CFG_REG0_MAP\", 4, 0)]\n)\ndef test_virtual_drive_write_read_compare_responses(\n connect_to_slave, virtual_drive, read_config, reg, value, subnode\n):\n servo, net = connect_to_slave\n server = virtual_drive\n\n virtual_net = EthernetNetwork()\n protocol_contents = read_config[\"ethernet\"]\n virtual_servo = virtual_net.connect_to_slave(\n server.ip, protocol_contents[\"dictionary\"], server.port\n )\n\n virtual_response = virtual_servo.write(reg, value, subnode)\n response = servo.write(reg, value, subnode)\n assert response == virtual_response\n\n response = servo.read(reg, subnode)\n virtual_response = virtual_servo.read(reg, subnode)\n assert response == virtual_response\n\n new_value = virtual_response + 1\n virtual_servo.write(reg, new_value, subnode)\n saved_value = virtual_servo.read(reg, subnode)\n assert saved_value == new_value\n\n\n@pytest.mark.no_connection\n@pytest.mark.parametrize(\"divisor\", [1, 2])\ndef test_virtual_monitoring(virtual_drive, read_config, divisor):\n server = virtual_drive\n\n net = EthernetNetwork()\n protocol_contents = read_config[\"ethernet\"]\n servo = net.connect_to_slave(server.ip, protocol_contents[\"dictionary\"], server.port)\n\n servo.monitoring_disable()\n registers_key = [\"CL_POS_SET_POINT_VALUE\", \"CL_VOL_Q_SET_POINT\"]\n subnode = 1\n for idx, key in enumerate(registers_key):\n reg = servo._get_reg(key, subnode=1)\n address = reg.address\n servo.monitoring_set_mapped_register(\n idx, address, subnode, reg.dtype.value, MONITORING_CH_DATA_SIZE\n )\n\n servo.write(\"MON_DIST_FREQ_DIV\", divisor, subnode=0)\n servo.write(\"MON_CFG_SOC_TYPE\", 1, subnode=0)\n servo.write(\"MON_CFG_WINDOW_SAMP\", MONITORING_NUM_SAMPLES, subnode=0)\n\n servo.monitoring_enable()\n servo.write(\"MON_CMD_FORCE_TRIGGER\", 1, subnode=0)\n time.sleep(0.1)\n servo.monitoring_disable()\n\n servo.monitoring_read_data()\n\n for idx, key in enumerate(registers_key):\n reg = servo._get_reg(key, subnode=1)\n address = reg.address\n subnode = reg.subnode\n data = servo.monitoring_channel_data(idx)\n expected_data = [\n subnode + address + i for i in range(0, MONITORING_NUM_SAMPLES * divisor, divisor)\n ]\n assert data == expected_data\n\n\n@pytest.mark.no_connection\ndef test_virtual_disturbance(virtual_drive, read_config):\n server = virtual_drive\n\n net = EthernetNetwork()\n protocol_contents = read_config[\"ethernet\"]\n servo = net.connect_to_slave(server.ip, protocol_contents[\"dictionary\"], server.port)\n servo.disturbance_disable()\n servo.disturbance_remove_all_mapped_registers()\n\n registers_key = [\"CL_POS_SET_POINT_VALUE\", \"CL_VOL_Q_SET_POINT\"]\n subnode = 1\n dtypes = []\n data_arr = []\n for idx, key in enumerate(registers_key):\n reg = servo._get_reg(key, subnode=1)\n address = reg.address\n servo.disturbance_set_mapped_register(idx, address, subnode, reg.dtype.value, 4)\n dtypes.append(reg.dtype)\n data_arr.append([0, -1, 2, 3])\n\n channels = list(range(len(registers_key)))\n servo.disturbance_write_data(channels, dtypes, data_arr)\n servo.disturbance_enable()\n\n for channel in range(len(registers_key)):\n assert server._VirtualDrive__disturbance.channels[channel][\"data\"] == data_arr[channel]\n", "repo_name": "ingeniamc/ingenialink-python", "sub_path": "tests/test_virtual_drive.py", "file_name": "test_virtual_drive.py", "file_ext": "py", "file_size_in_byte": 4605, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "52", "api": [{"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "ingenialink.ethernet.network.EthernetNetwork", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 13, "usage_type": "attribute"}, {"api_name": "ingenialink.ethernet.network.EthernetNetwork", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ingenialink.ethernet.network.EthernetNetwork", "line_number": 52, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 43, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 43, "usage_type": "attribute"}, {"api_name": "ingenialink.ethernet.network.EthernetNetwork", "line_number": 77, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 97, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 73, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ingenialink.ethernet.network.EthernetNetwork", "line_number": 117, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 113, "usage_type": "attribute"}]} +{"seq_id": "12505962015", "text": "import logging\nimport json\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\nfrom dashboard.common import request_handler\nfrom dashboard.common import utils\nfrom dashboard.common import xsrf\nfrom dashboard.models import table_config\n\n\nclass CreateHealthReportHandler(request_handler.RequestHandler):\n\n def get(self):\n \"\"\"Renders the UI with the form fields.\"\"\"\n self.RenderStaticHtml('create_health_report.html')\n\n def post(self):\n \"\"\"POSTS the data to the datastore.\"\"\"\n\n user = users.get_current_user()\n if not user:\n self.response.out.write(json.dumps({'error': 'User not logged in.'}))\n return\n if not utils.IsInternalUser():\n self.response.out.write(json.dumps(\n {'error':\n 'Unauthorized access, please use chromium account to login.'}))\n return\n\n get_token = self.request.get('getToken')\n get_table_config_list = self.request.get('getTableConfigList')\n get_table_config_details = self.request.get('getTableConfigDetails')\n if get_token == 'true':\n values = {}\n self.GetDynamicVariables(values)\n self.response.out.write(json.dumps({\n 'xsrf_token': values['xsrf_token'],\n }))\n elif get_table_config_list:\n self._GetTableConfigList()\n elif get_table_config_details:\n self._GetTableConfigDetails(get_table_config_details)\n else:\n self._CreateTableConfig()\n\n def _GetTableConfigList(self):\n query = table_config.TableConfig.query()\n table_config_list = query.fetch(keys_only=True)\n return_list = []\n for config in table_config_list:\n return_list.append(config.id())\n self.response.out.write(json.dumps({\n 'table_config_list': return_list,\n }))\n\n def _GetTableConfigDetails(self, config_name):\n config_entity = ndb.Key('TableConfig', config_name).get()\n if config_entity:\n master_bot_list = []\n for bot in config_entity.bots:\n master_bot_list.append(bot.parent().string_id() + '/' + bot.string_id())\n self.response.out.write(json.dumps({\n 'table_name': config_name,\n 'table_bots': master_bot_list,\n 'table_tests': config_entity.tests,\n 'table_layout': config_entity.table_layout\n }))\n else:\n self.response.out.write(json.dumps({\n 'error': 'Invalid config name.'\n }))\n\n def _CreateTableConfig(self):\n \"\"\"Creates a table config. Writes a valid name or an error message.\"\"\"\n self._ValidateToken()\n name = self.request.get('tableName')\n master_bot = self.request.get('tableBots').splitlines()\n tests = self.request.get('tableTests').splitlines()\n table_layout = self.request.get('tableLayout')\n override = int(self.request.get('override'))\n user = users.get_current_user()\n if not name or not master_bot or not tests or not table_layout or not user:\n self.response.out.write(json.dumps({\n 'error': 'Please fill out the form entirely.'\n }))\n return\n\n try:\n created_table = table_config.CreateTableConfig(\n name=name, bots=master_bot, tests=tests, layout=table_layout,\n username=user.email(), override=override)\n except table_config.BadRequestError as error:\n self.response.out.write(json.dumps({\n 'error': error.message,\n }))\n logging.error(error.message)\n return\n\n\n if created_table:\n self.response.out.write(json.dumps({\n 'name': name,\n }))\n else:\n self.response.out.write(json.dumps({\n 'error': 'Could not create table.',\n }))\n logging.error('Could not create table.')\n\n def _ValidateToken(self):\n user = users.get_current_user()\n token = str(self.request.get('xsrf_token'))\n if not user or not xsrf._ValidateToken(token, user):\n self.abort(403)\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/catapult/dashboard/dashboard/create_health_report.py", "file_name": "create_health_report.py", "file_ext": "py", "file_size_in_byte": 3797, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dashboard.common.request_handler.RequestHandler", "line_number": 13, "usage_type": "attribute"}, {"api_name": "dashboard.common.request_handler", "line_number": 13, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 22, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 22, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "dashboard.common.utils.IsInternalUser", "line_number": 26, "usage_type": "call"}, {"api_name": "dashboard.common.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 27, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "dashboard.models.table_config.TableConfig.query", "line_number": 49, "usage_type": "call"}, {"api_name": "dashboard.models.table_config.TableConfig", "line_number": 49, "usage_type": "attribute"}, {"api_name": "dashboard.models.table_config", "line_number": 49, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 54, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.Key", "line_number": 59, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb", "line_number": 59, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 71, "usage_type": "call"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 83, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 83, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "dashboard.models.table_config.CreateTableConfig", "line_number": 91, "usage_type": "call"}, {"api_name": "dashboard.models.table_config", "line_number": 91, "usage_type": "name"}, {"api_name": "dashboard.models.table_config.BadRequestError", "line_number": 94, "usage_type": "attribute"}, {"api_name": "dashboard.models.table_config", "line_number": 94, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 95, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 110, "usage_type": "call"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 113, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 113, "usage_type": "name"}, {"api_name": "dashboard.common.xsrf._ValidateToken", "line_number": 115, "usage_type": "call"}, {"api_name": "dashboard.common.xsrf", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "13242801259", "text": "from __future__ import with_statement\n\nimport copy\nimport datetime\nimport getpass\nimport logging\nimport os\nimport py_compile\nimport shutil\nfrom StringIO import StringIO\nimport tempfile\nfrom testify import TestCase, assert_equal, assert_gt, assert_in, assert_not_in, assert_raises, setup, teardown, assert_not_equal\n\nfrom mrjob.conf import dump_mrjob_conf\nimport mrjob.emr\nfrom mrjob.emr import EMRJobRunner, describe_all_job_flows, parse_s3_uri\nfrom mrjob.parse import JOB_NAME_RE\nfrom mrjob.util import tar_and_gzip\nfrom tests.mockboto import MockS3Connection, MockEmrConnection, MockEmrObject, MockKey, add_mock_s3_data, DEFAULT_MAX_DAYS_AGO, DEFAULT_MAX_JOB_FLOWS_RETURNED, to_iso8601\nfrom tests.mr_two_step_job import MRTwoStepJob\nfrom tests.quiet import logger_disabled, no_handlers_for_logger\n\ntry:\n import boto\n from mrjob import botoemr\nexcept ImportError:\n boto = None\n botoemr = None\n\n\nclass MockEMRAndS3TestCase(TestCase):\n\n @setup\n def make_mrjob_conf(self):\n _, self.mrjob_conf_path = tempfile.mkstemp(prefix='mrjob.conf.')\n dump_mrjob_conf({'runners': {'emr': {\n 'check_emr_status_every': 0.01,\n 's3_scratch_uri': 's3://walrus/tmp',\n 's3_sync_wait_time': 0.01,\n }}}, open(self.mrjob_conf_path, 'w'))\n\n @setup\n def rm_mrjob_conf(self):\n os.unlink(self.mrjob_conf_path)\n\n @setup\n def sandbox_boto(self):\n self.mock_s3_fs = {}\n self.mock_emr_job_flows = {}\n self.mock_emr_failures = {}\n self.mock_emr_output = {}\n\n def mock_boto_connect_s3(*args, **kwargs):\n kwargs['mock_s3_fs'] = self.mock_s3_fs\n return MockS3Connection(*args, **kwargs)\n\n def mock_botoemr_EmrConnection(*args, **kwargs):\n kwargs['mock_s3_fs'] = self.mock_s3_fs\n kwargs['mock_emr_job_flows'] = self.mock_emr_job_flows\n kwargs['mock_emr_failures'] = self.mock_emr_failures\n kwargs['mock_emr_output'] = self.mock_emr_output\n return MockEmrConnection(*args, **kwargs)\n\n self._real_boto_connect_s3 = boto.connect_s3\n boto.connect_s3 = mock_boto_connect_s3\n\n self._real_botoemr_EmrConnection = botoemr.EmrConnection\n botoemr.EmrConnection = mock_botoemr_EmrConnection\n\n @teardown\n def unsandbox_boto(self):\n boto.connect_s3 = self._real_boto_connect_s3\n botoemr.EmrConnection = self._real_botoemr_EmrConnection\n\n def add_mock_s3_data(self, data):\n \"\"\"Update self.mock_s3_fs with a map from bucket name\n to key name to data.\"\"\"\n add_mock_s3_data(self.mock_s3_fs, data)\n\n\nclass EMRJobRunnerEndToEndTestCase(MockEMRAndS3TestCase):\n\n @setup\n def make_tmp_dir_and_mrjob_conf(self):\n self.tmp_dir = tempfile.mkdtemp()\n self.mrjob_conf_path = os.path.join(self.tmp_dir, 'mrjob.conf')\n dump_mrjob_conf({'runners': {'emr': {\n 'check_emr_status_every': 0.01,\n 's3_sync_wait_time': 0.01,\n 'aws_availability_zone': 'PUPPYLAND',\n 'additional_emr_info': {'key': 'value'},\n }}}, open(self.mrjob_conf_path, 'w'))\n\n @teardown\n def rm_tmp_dir(self):\n shutil.rmtree(self.tmp_dir)\n\n def test_end_to_end(self):\n # read from STDIN, a local file, and a remote file\n stdin = StringIO('foo\\nbar\\n')\n\n local_input_path = os.path.join(self.tmp_dir, 'input')\n with open(local_input_path, 'w') as local_input_file:\n local_input_file.write('bar\\nqux\\n')\n\n remote_input_path = 's3://walrus/data/foo'\n self.add_mock_s3_data({'walrus': {'data/foo': 'foo\\n'}})\n\n # setup fake output\n self.mock_emr_output = {('j-MOCKJOBFLOW0', 1): [\n '1\\t\"qux\"\\n2\\t\"bar\"\\n', '2\\t\"foo\"\\n5\\tnull\\n']}\n\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path,\n '-', local_input_path, remote_input_path,\n '--hadoop-input-format', 'FooFormat',\n '--hadoop-output-format', 'BarFormat'])\n mr_job.sandbox(stdin=stdin)\n\n local_tmp_dir = None\n results = []\n\n mock_s3_fs_snapshot = copy.deepcopy(self.mock_s3_fs)\n\n with mr_job.make_runner() as runner:\n assert isinstance(runner, EMRJobRunner)\n\n # make sure that initializing the runner doesn't affect S3\n # (Issue #50)\n assert_equal(mock_s3_fs_snapshot, self.mock_s3_fs)\n\n # make sure AdditionalInfo was JSON-ified from the config file.\n # checked now because you can't actually read it from the job flow\n # on real EMR.\n assert_equal(runner._opts['additional_emr_info'],\n '{\"key\": \"value\"}')\n\n runner.run()\n\n for line in runner.stream_output():\n key, value = mr_job.parse_output_line(line)\n results.append((key, value))\n\n local_tmp_dir = runner._get_local_tmp_dir()\n # make sure cleanup hasn't happened yet\n assert os.path.exists(local_tmp_dir)\n assert any(runner.ls(runner.get_output_dir()))\n\n emr_conn = runner.make_emr_conn()\n job_flow = emr_conn.describe_jobflow(runner.get_emr_job_flow_id())\n assert_equal(job_flow.state, 'COMPLETED')\n name_match = JOB_NAME_RE.match(job_flow.name)\n assert_equal(name_match.group(1), 'mr_two_step_job')\n assert_equal(name_match.group(2), getpass.getuser())\n\n # make sure our input and output formats are attached to\n # the correct steps\n assert_in('-inputformat', job_flow.steps[0].args())\n assert_not_in('-outputformat', job_flow.steps[0].args())\n assert_not_in('-inputformat', job_flow.steps[1].args())\n assert_in('-outputformat', job_flow.steps[1].args())\n\n # make sure mrjob.tar.gz is created and uploaded as\n # a bootstrap file\n assert runner._mrjob_tar_gz_path\n mrjob_tar_gz_file_dicts = [\n file_dict for file_dict in runner._files\n if file_dict['path'] == runner._mrjob_tar_gz_path]\n\n assert_equal(len(mrjob_tar_gz_file_dicts), 1)\n\n mrjob_tar_gz_file_dict = mrjob_tar_gz_file_dicts[0]\n assert mrjob_tar_gz_file_dict['name']\n assert_equal(mrjob_tar_gz_file_dict.get('bootstrap'), 'file')\n\n # shouldn't be in PYTHONPATH (we dump it directly in site-packages)\n pythonpath = runner._get_cmdenv().get('PYTHONPATH') or ''\n assert_not_in(mrjob_tar_gz_file_dict['name'],\n pythonpath.split(':'))\n\n assert_equal(sorted(results),\n [(1, 'qux'), (2, 'bar'), (2, 'foo'), (5, None)])\n\n # make sure cleanup happens\n assert not os.path.exists(local_tmp_dir)\n assert not any(runner.ls(runner.get_output_dir()))\n\n # job should get terminated\n emr_conn = runner.make_emr_conn()\n job_flow_id = runner.get_emr_job_flow_id()\n for i in range(10):\n emr_conn.simulate_progress(job_flow_id)\n\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n assert_equal(job_flow.state, 'TERMINATED')\n\n def test_failed_job(self):\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path])\n mr_job.sandbox()\n\n self.add_mock_s3_data({'walrus': {}})\n self.mock_emr_failures = {('j-MOCKJOBFLOW0', 0): None}\n\n with mr_job.make_runner() as runner:\n assert isinstance(runner, EMRJobRunner)\n\n with logger_disabled('mrjob.emr'):\n assert_raises(Exception, runner.run)\n\n emr_conn = botoemr.EmrConnection()\n job_flow_id = runner.get_emr_job_flow_id()\n for i in range(10):\n emr_conn.simulate_progress(job_flow_id)\n\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n assert_equal(job_flow.state, 'FAILED')\n\n # job should get terminated on cleanup\n emr_conn = runner.make_emr_conn()\n job_flow_id = runner.get_emr_job_flow_id()\n for i in range(10):\n emr_conn.simulate_progress(job_flow_id)\n\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n assert_equal(job_flow.state, 'TERMINATED')\n\n def test_pick_scratch_uri(self):\n self.add_mock_s3_data({'mrjob-walrus': {}, 'zebra': {}})\n runner = EMRJobRunner(conf_path=False)\n\n assert_equal(runner._opts['s3_scratch_uri'],\n 's3://mrjob-walrus/tmp/')\n\n def test_create_scratch_uri(self):\n # \"walrus\" bucket will be ignored; it doesn't start with \"mrjob-\"\n self.add_mock_s3_data({'walrus': {}, 'zebra': {}})\n\n runner = EMRJobRunner(conf_path=False, s3_sync_wait_time=0.01)\n\n # bucket name should be mrjob- plus 16 random hex digits\n s3_scratch_uri = runner._opts['s3_scratch_uri']\n assert_equal(s3_scratch_uri[:11], 's3://mrjob-')\n assert_equal(s3_scratch_uri[27:], '/tmp/')\n\n # bucket shouldn't actually exist yet\n scratch_bucket, _ = parse_s3_uri(s3_scratch_uri)\n assert_not_in(scratch_bucket, self.mock_s3_fs.keys())\n\n # need to do something to ensure that the bucket actually gets\n # created. let's launch a (mock) job flow\n jfid = runner.make_persistent_job_flow()\n assert_in(scratch_bucket, self.mock_s3_fs.keys())\n runner.make_emr_conn().terminate_jobflow(jfid)\n\n # once our scratch bucket is created, we should re-use it\n runner2 = EMRJobRunner(conf_path=False)\n assert_equal(runner2._opts['s3_scratch_uri'], s3_scratch_uri)\n s3_scratch_uri = runner._opts['s3_scratch_uri']\n\n def test_bootstrap_files_only_get_uploaded_once(self):\n # just a regression test for Issue #8\n\n # use self.mrjob_conf_path because it's easier than making a new file\n bootstrap_file = self.mrjob_conf_path\n\n runner = EMRJobRunner(conf_path=False,\n bootstrap_files=[bootstrap_file])\n\n matching_file_dicts = [fd for fd in runner._files\n if fd['path'] == bootstrap_file]\n assert_equal(len(matching_file_dicts), 1)\n\n def test_attach_to_existing_job_flow(self):\n emr_conn = EMRJobRunner(conf_path=False).make_emr_conn()\n # set log_uri to None, so that when we describe the job flow, it\n # won't have the loguri attribute, to test Issue #112\n emr_job_flow_id = emr_conn.run_jobflow(\n name='Development Job Flow', log_uri=None)\n\n stdin = StringIO('foo\\nbar\\n')\n self.mock_emr_output = {(emr_job_flow_id, 1): [\n '1\\t\"bar\"\\n1\\t\"foo\"\\n2\\tnull\\n']}\n\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path,\n '--emr-job-flow-id', emr_job_flow_id])\n mr_job.sandbox(stdin=stdin)\n\n results = []\n with mr_job.make_runner() as runner:\n runner.run()\n\n # Issue 182: don't create the bootstrap script when\n # attaching to another job flow\n assert_equal(runner._master_bootstrap_script, None)\n\n for line in runner.stream_output():\n key, value = mr_job.parse_output_line(line)\n results.append((key, value))\n\n assert_equal(sorted(results),\n [(1, 'bar'), (1, 'foo'), (2, None)])\n\n def test_default_hadoop_version(self):\n stdin = StringIO('foo\\nbar\\n')\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path])\n mr_job.sandbox(stdin=stdin)\n\n with mr_job.make_runner() as runner:\n runner.run()\n\n emr_conn = runner.make_emr_conn()\n job_flow = emr_conn.describe_jobflow(runner.get_emr_job_flow_id())\n\n assert_equal(job_flow.hadoopversion, '0.18')\n\n def test_set_hadoop_version(self):\n stdin = StringIO('foo\\nbar\\n')\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path,\n '--hadoop-version', '0.20'])\n mr_job.sandbox(stdin=stdin)\n\n with mr_job.make_runner() as runner:\n runner.run()\n\n emr_conn = runner.make_emr_conn()\n job_flow = emr_conn.describe_jobflow(runner.get_emr_job_flow_id())\n\n assert_equal(job_flow.hadoopversion, '0.20')\n\n def test_availability_zone_config(self):\n # Confirm that the mrjob.conf option 'aws_availability_zone' was\n # propagated through to the job flow\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path])\n mr_job.sandbox()\n\n with mr_job.make_runner() as runner:\n runner.run()\n\n emr_conn = runner.make_emr_conn()\n job_flow_id = runner.get_emr_job_flow_id()\n\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n assert_equal(job_flow.availabilityzone, 'PUPPYLAND')\n\n def test_debugging_works(self):\n mr_job = MRTwoStepJob(['-r', 'emr', '-v',\n '-c', self.mrjob_conf_path,\n '--enable-emr-debugging'])\n mr_job.sandbox()\n\n with mr_job.make_runner() as runner:\n runner.run()\n flow = runner.make_emr_conn().describe_jobflow(runner._emr_job_flow_id)\n assert_equal(flow.steps[0].name, 'Setup Hadoop Debugging')\n\n\nclass BucketRegionTestCase(MockEMRAndS3TestCase):\n\n @setup\n def make_dummy_data(self):\n self.add_mock_s3_data({'mrjob-1': {}})\n s3c = boto.connect_s3()\n self.bucket1 = s3c.get_bucket('mrjob-1')\n self.bucket1_uri = 's3://mrjob-1/tmp/'\n\n def test_region_nobucket_nolocation(self):\n # aws_region specified, no bucket specified, default bucket has no location\n j = EMRJobRunner(aws_region='PUPPYLAND',\n s3_endpoint='PUPPYLAND',\n conf_path=False)\n assert_not_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)\n\n def test_region_nobucket_nomatchexists(self):\n # aws_region specified, no bucket specified, no buckets have matching region\n self.bucket1.set_location('PUPPYLAND')\n j = EMRJobRunner(aws_region='KITTYLAND',\n s3_endpoint='KITTYLAND',\n conf_path=False)\n assert_not_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)\n\n def test_noregion_nobucket_nolocation(self):\n # aws_region not specified, no bucket specified, default bucket has no location\n j = EMRJobRunner(conf_path=False)\n assert_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)\n\n def test_noregion_bucket_nolocation(self):\n # aws_region not specified, bucket specified without location\n j = EMRJobRunner(conf_path=False,\n s3_scratch_uri=self.bucket1_uri)\n assert_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)\n\n def test_noregion_bucket_location(self):\n # aws_region not specified, bucket specified with location\n self.bucket1.set_location('PUPPYLAND')\n j = EMRJobRunner(conf_path=False)\n assert_equal(j._aws_region, 'PUPPYLAND')\n\n\nclass ExtraBucketRegionTestCase(MockEMRAndS3TestCase):\n\n @setup\n def make_dummy_data(self):\n self.add_mock_s3_data({'mrjob-1': {}})\n s3c = boto.connect_s3()\n self.bucket1 = s3c.get_bucket('mrjob-1')\n self.bucket1_uri = 's3://mrjob-1/tmp/'\n\n self.add_mock_s3_data({'mrjob-2': {}})\n self.bucket2 = s3c.get_bucket('mrjob-2')\n self.bucket2.set_location('KITTYLAND')\n self.bucket2_uri = 's3://mrjob-2/tmp/'\n\n def test_region_nobucket_matchexists(self):\n # aws_region specified, no bucket specified, bucket exists with matching region\n j = EMRJobRunner(aws_region='KITTYLAND',\n s3_endpoint='KITTYLAND',\n conf_path=False)\n assert_equal(j._opts['s3_scratch_uri'], self.bucket2_uri)\n\n def test_region_bucket_match(self):\n # aws_region specified, bucket specified with matching location\n j = EMRJobRunner(aws_region='PUPPYLAND',\n s3_endpoint='PUPPYLAND',\n s3_scratch_uri=self.bucket1_uri,\n conf_path=False)\n assert_equal(j._opts['s3_scratch_uri'], self.bucket1_uri)\n\n def test_region_bucket_doesnotmatch(self):\n # aws_region specified, bucket specified with incorrect location\n with no_handlers_for_logger():\n stderr = StringIO()\n log = logging.getLogger('mrjob.emr')\n log.addHandler(logging.StreamHandler(stderr))\n log.setLevel(logging.WARNING)\n\n j = EMRJobRunner(aws_region='PUPPYLAND',\n s3_endpoint='PUPPYLAND',\n s3_scratch_uri=self.bucket2_uri,\n conf_path=False)\n\n assert_in('does not match bucket region', stderr.getvalue())\n\n\nclass DescribeAllJobFlowsTestCase(MockEMRAndS3TestCase):\n\n def test_can_get_all_job_flows(self):\n now = datetime.datetime.utcnow()\n\n NUM_JOB_FLOWS = 2222\n assert_gt(NUM_JOB_FLOWS, DEFAULT_MAX_JOB_FLOWS_RETURNED)\n\n for i in range(NUM_JOB_FLOWS):\n jfid = 'j-%04d' % i\n self.mock_emr_job_flows[jfid] = MockEmrObject(\n creationdatetime=to_iso8601(now - datetime.timedelta(minutes=i)),\n jobflowid=jfid)\n\n emr_conn = EMRJobRunner(conf_path=False).make_emr_conn()\n\n # ordinary describe_jobflows() hits the limit on number of job flows\n some_jfs = emr_conn.describe_jobflows()\n assert_equal(len(some_jfs), DEFAULT_MAX_JOB_FLOWS_RETURNED)\n\n all_jfs = describe_all_job_flows(emr_conn)\n assert_equal(len(all_jfs), NUM_JOB_FLOWS)\n assert_equal(sorted(jf.jobflowid for jf in all_jfs),\n [('j-%04d' % i) for i in range(NUM_JOB_FLOWS)])\n\n\n### tests for error parsing ###\n\nBUCKET = 'walrus'\nBUCKET_URI = 's3://' + BUCKET + '/'\n\nLOG_DIR = 'j-JOBFLOWID/'\n\nGARBAGE = \\\n\"\"\"GarbageGarbageGarbage\n\"\"\"\n\nTRACEBACK_START = 'Traceback (most recent call last):\\n'\n\nPY_EXCEPTION = \\\n\"\"\" File \"\", line 1, in \nTypeError: 'int' object is not iterable\n\"\"\"\n\nCHILD_ERR_LINE = '2010-07-27 18:25:48,397 WARN org.apache.hadoop.mapred.TaskTracker (main): Error running child\\n'\n\nJAVA_STACK_TRACE = \"\"\"java.lang.OutOfMemoryError: Java heap space\n at org.apache.hadoop.mapred.IFile$Reader.readNextBlock(IFile.java:270)\n at org.apache.hadoop.mapred.IFile$Reader.next(IFile.java:332)\n\"\"\"\n\nHADOOP_ERR_LINE_PREFIX = '2010-07-27 19:53:35,451 ERROR org.apache.hadoop.streaming.StreamJob (main): '\n\nUSEFUL_HADOOP_ERROR = 'Error launching job , Output path already exists : Output directory s3://yourbucket/logs/2010/07/23/ already exists and is not empty'\n\nBORING_HADOOP_ERROR = 'Job not Successful!'\nTASK_ATTEMPTS_DIR = LOG_DIR + 'task-attempts/'\n\nATTEMPT_0_DIR = TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_m_000126_0/'\nATTEMPT_1_DIR = TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_m_000126_0/'\n\ndef make_input_uri_line(input_uri):\n return \"2010-07-27 17:55:29,400 INFO org.apache.hadoop.fs.s3native.NativeS3FileSystem (main): Opening '%s' for reading\\n\" % input_uri\n\n\nclass FindProbableCauseOfFailureTestCase(MockEMRAndS3TestCase):\n # We're mostly concerned here that the right log files are read in the\n # right order. parsing of the logs is handled by tests.parse_test\n\n @setup\n def make_runner(self):\n self.add_mock_s3_data({'walrus': {}})\n self.runner = EMRJobRunner(s3_sync_wait_time=0,\n s3_scratch_uri='s3://walrus/tmp',\n conf_path=False)\n self.runner._s3_job_log_uri = BUCKET_URI + LOG_DIR\n\n @teardown\n def cleanup_runner(self):\n self.runner.cleanup()\n\n def test_empty(self):\n self.add_mock_s3_data({'walrus': {}})\n assert_equal(self.runner._find_probable_cause_of_failure([1]), None)\n\n def test_python_exception(self):\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'stderr':\n GARBAGE + TRACEBACK_START + PY_EXCEPTION + GARBAGE,\n ATTEMPT_0_DIR + 'syslog':\n make_input_uri_line(BUCKET_URI + 'input.gz'),\n }})\n assert_equal(self.runner._find_probable_cause_of_failure([1]),\n {'lines': list(StringIO(PY_EXCEPTION)),\n 's3_log_file_uri':\n BUCKET_URI + ATTEMPT_0_DIR + 'stderr',\n 'input_uri': BUCKET_URI + 'input.gz'})\n\n def test_python_exception_without_input_uri(self):\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'stderr': (\n GARBAGE + TRACEBACK_START + PY_EXCEPTION + GARBAGE),\n }})\n assert_equal(self.runner._find_probable_cause_of_failure([1]),\n {'lines': list(StringIO(PY_EXCEPTION)),\n 's3_log_file_uri':\n BUCKET_URI + ATTEMPT_0_DIR + 'stderr',\n 'input_uri': None})\n\n def test_java_exception(self):\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'stderr': GARBAGE + GARBAGE,\n ATTEMPT_0_DIR + 'syslog':\n make_input_uri_line(BUCKET_URI + 'input.gz') +\n GARBAGE +\n CHILD_ERR_LINE +\n JAVA_STACK_TRACE +\n GARBAGE,\n }})\n assert_equal(self.runner._find_probable_cause_of_failure([1]),\n {'lines': list(StringIO(JAVA_STACK_TRACE)),\n 's3_log_file_uri':\n BUCKET_URI + ATTEMPT_0_DIR + 'syslog',\n 'input_uri': BUCKET_URI + 'input.gz'})\n\n def test_java_exception_without_input_uri(self):\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'syslog':\n CHILD_ERR_LINE +\n JAVA_STACK_TRACE +\n GARBAGE,\n }})\n assert_equal(self.runner._find_probable_cause_of_failure([1]),\n {'lines': list(StringIO(JAVA_STACK_TRACE)),\n 's3_log_file_uri':\n BUCKET_URI + ATTEMPT_0_DIR + 'syslog',\n 'input_uri': None})\n\n def test_hadoop_streaming_error(self):\n # we should look only at step 2 since the errors in the other\n # steps are boring\n #\n # we include input.gz just to test that we DON'T check for it\n self.add_mock_s3_data({'walrus': {\n LOG_DIR + 'steps/1/syslog':\n GARBAGE +\n HADOOP_ERR_LINE_PREFIX + BORING_HADOOP_ERROR + '\\n',\n LOG_DIR + 'steps/2/syslog':\n GARBAGE +\n make_input_uri_line(BUCKET_URI + 'input.gz') +\n HADOOP_ERR_LINE_PREFIX + USEFUL_HADOOP_ERROR + '\\n',\n LOG_DIR + 'steps/3/syslog':\n HADOOP_ERR_LINE_PREFIX + BORING_HADOOP_ERROR + '\\n',\n }})\n\n assert_equal(self.runner._find_probable_cause_of_failure([1, 2, 3]),\n {'lines': [USEFUL_HADOOP_ERROR + '\\n'],\n 's3_log_file_uri':\n BUCKET_URI + LOG_DIR + 'steps/2/syslog',\n 'input_uri': None})\n\n def test_later_task_attempt_steps_win(self):\n # should look at later steps first\n self.add_mock_s3_data({'walrus': {\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_r_000126_3/stderr':\n TRACEBACK_START + PY_EXCEPTION,\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0002_m_000004_0/syslog':\n CHILD_ERR_LINE + JAVA_STACK_TRACE,\n }})\n failure = self.runner._find_probable_cause_of_failure([1, 2])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + TASK_ATTEMPTS_DIR +\n 'attempt_201007271720_0002_m_000004_0/syslog')\n\n def test_later_step_logs_win(self):\n self.add_mock_s3_data({'walrus': {\n LOG_DIR + 'steps/1/syslog':\n HADOOP_ERR_LINE_PREFIX + USEFUL_HADOOP_ERROR + '\\n',\n LOG_DIR + 'steps/2/syslog':\n HADOOP_ERR_LINE_PREFIX + USEFUL_HADOOP_ERROR + '\\n',\n }})\n failure = self.runner._find_probable_cause_of_failure([1, 2])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + LOG_DIR + 'steps/2/syslog')\n\n def test_reducer_beats_mapper(self):\n # should look at reducers over mappers\n self.add_mock_s3_data({'walrus': {\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_m_000126_3/stderr':\n TRACEBACK_START + PY_EXCEPTION,\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_r_000126_3/syslog':\n CHILD_ERR_LINE + JAVA_STACK_TRACE,\n }})\n failure = self.runner._find_probable_cause_of_failure([1])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + TASK_ATTEMPTS_DIR +\n 'attempt_201007271720_0001_r_000126_3/syslog')\n\n def test_more_attempts_win(self):\n # look at fourth attempt before looking at first attempt\n self.add_mock_s3_data({'walrus': {\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_m_000126_0/stderr':\n TRACEBACK_START + PY_EXCEPTION,\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0001_m_000004_3/syslog':\n CHILD_ERR_LINE + JAVA_STACK_TRACE,\n }})\n failure = self.runner._find_probable_cause_of_failure([1])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + TASK_ATTEMPTS_DIR +\n 'attempt_201007271720_0001_m_000004_3/syslog')\n\n def test_py_exception_beats_java_stack_trace(self):\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'stderr': TRACEBACK_START + PY_EXCEPTION,\n ATTEMPT_0_DIR + 'syslog': CHILD_ERR_LINE + JAVA_STACK_TRACE,\n }})\n failure = self.runner._find_probable_cause_of_failure([1])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + ATTEMPT_0_DIR + 'stderr')\n\n def test_exception_beats_hadoop_error(self):\n self.add_mock_s3_data({'walrus': {\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0002_m_000126_0/stderr':\n TRACEBACK_START + PY_EXCEPTION,\n LOG_DIR + 'steps/1/syslog':\n HADOOP_ERR_LINE_PREFIX + USEFUL_HADOOP_ERROR + '\\n',\n }})\n failure = self.runner._find_probable_cause_of_failure([1, 2])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + TASK_ATTEMPTS_DIR +\n 'attempt_201007271720_0002_m_000126_0/stderr')\n\n def test_step_filtering(self):\n # same as previous test, but step 2 is filtered out\n self.add_mock_s3_data({'walrus': {\n TASK_ATTEMPTS_DIR + 'attempt_201007271720_0002_m_000126_0/stderr':\n TRACEBACK_START + PY_EXCEPTION,\n LOG_DIR + 'steps/1/syslog':\n HADOOP_ERR_LINE_PREFIX + USEFUL_HADOOP_ERROR + '\\n',\n }})\n failure = self.runner._find_probable_cause_of_failure([1])\n assert_equal(failure['s3_log_file_uri'],\n BUCKET_URI + LOG_DIR + 'steps/1/syslog')\n\n def test_ignore_errors_from_steps_that_later_succeeded(self):\n # This tests the fix for Issue #31\n self.add_mock_s3_data({'walrus': {\n ATTEMPT_0_DIR + 'stderr':\n GARBAGE + TRACEBACK_START + PY_EXCEPTION + GARBAGE,\n ATTEMPT_0_DIR + 'syslog':\n make_input_uri_line(BUCKET_URI + 'input.gz'),\n ATTEMPT_1_DIR + 'stderr': '',\n ATTEMPT_1_DIR + 'syslog':\n make_input_uri_line(BUCKET_URI + 'input.gz'),\n }})\n assert_equal(self.runner._find_probable_cause_of_failure([1]), None)\n\n\nclass TestEMRandS3Endpoints(MockEMRAndS3TestCase):\n\n def test_no_region(self):\n runner = EMRJobRunner(conf_path=False)\n assert_equal(runner.make_emr_conn().endpoint,\n 'elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3.amazonaws.com')\n assert_equal(runner._aws_region, '')\n\n def test_none_region(self):\n # blank region should be treated the same as no region\n runner = EMRJobRunner(conf_path=False, aws_region=None)\n assert_equal(runner.make_emr_conn().endpoint,\n 'elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3.amazonaws.com')\n assert_equal(runner._aws_region, '')\n\n def test_blank_region(self):\n # blank region should be treated the same as no region\n runner = EMRJobRunner(conf_path=False, aws_region='')\n assert_equal(runner.make_emr_conn().endpoint,\n 'elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3.amazonaws.com')\n assert_equal(runner._aws_region, '')\n\n def test_eu(self):\n runner = EMRJobRunner(conf_path=False, aws_region='EU')\n assert_equal(runner.make_emr_conn().endpoint,\n 'eu-west-1.elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3-eu-west-1.amazonaws.com')\n\n def test_us_east_1(self):\n runner = EMRJobRunner(conf_path=False, aws_region='us-east-1')\n assert_equal(runner.make_emr_conn().endpoint,\n 'us-east-1.elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3.amazonaws.com')\n\n def test_us_west_1(self):\n runner = EMRJobRunner(conf_path=False, aws_region='us-west-1')\n assert_equal(runner.make_emr_conn().endpoint,\n 'us-west-1.elasticmapreduce.amazonaws.com')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3-us-west-1.amazonaws.com')\n\n def test_ap_southeast_1(self):\n runner = EMRJobRunner(conf_path=False, aws_region='ap-southeast-1')\n assert_equal(runner.make_s3_conn().endpoint,\n 's3-ap-southeast-1.amazonaws.com')\n assert_raises(Exception, runner.make_emr_conn)\n\n def test_bad_region(self):\n # should fail in the constructor because the constructor connects to S3\n assert_raises(Exception, EMRJobRunner,\n conf_path=False, aws_region='the-moooooooon-1')\n\n def test_case_sensitive(self):\n assert_raises(Exception, EMRJobRunner,\n conf_path=False, aws_region='eu')\n assert_raises(Exception, EMRJobRunner,\n conf_path=False, aws_region='US-WEST-1')\n\n def test_explicit_endpoints(self):\n runner = EMRJobRunner(conf_path=False, aws_region='EU',\n s3_endpoint='s3-proxy', emr_endpoint='emr-proxy')\n assert_equal(runner.make_emr_conn().endpoint, 'emr-proxy')\n assert_equal(runner.make_s3_conn().endpoint, 's3-proxy')\n\n\nclass TestLs(MockEMRAndS3TestCase):\n\n def test_s3_ls(self):\n self.add_mock_s3_data({'walrus': {'one': '', 'two': '', 'three': ''}})\n\n runner = EMRJobRunner(s3_scratch_uri='s3://walrus/tmp',\n conf_path=False)\n\n assert_equal(set(runner._s3_ls('s3://walrus/')),\n set(['s3://walrus/one',\n 's3://walrus/two',\n 's3://walrus/three',]))\n\n assert_equal(set(runner._s3_ls('s3://walrus/t')),\n set(['s3://walrus/two',\n 's3://walrus/three',]))\n\n assert_equal(set(runner._s3_ls('s3://walrus/t/')),\n set([]))\n\n # if we ask for a nonexistent bucket, we should get some sort\n # of exception (in practice, buckets with random names will\n # probably be owned by other people, and we'll get some sort\n # of permissions error)\n assert_raises(Exception, set, runner._s3_ls('s3://lolcat/'))\n\nclass TestNoBoto(TestCase):\n\n @setup\n def blank_out_boto(self):\n self._real_boto = mrjob.emr.boto\n mrjob.emr.boto = None\n self._real_botoemr = mrjob.emr.botoemr\n mrjob.emr.botoemr = None\n\n @teardown\n def restore_boto(self):\n mrjob.emr.boto = self._real_boto\n mrjob.emr.botoemr = self._real_botoemr\n\n def test_init(self):\n # merely creating an EMRJobRunner should raise an exception\n # because it'll need to connect to S3 to set s3_scratch_uri\n assert_raises(ImportError, EMRJobRunner, conf_path=False)\n\n def test_init_with_s3_scratch_uri(self):\n # this also raises an exception because we have to check\n # the bucket location\n assert_raises(ImportError, EMRJobRunner,\n conf_path=False, s3_scratch_uri='s3://foo/tmp')\n\n\nclass TestBootstrapScripts(MockEMRAndS3TestCase):\n\n @setup\n def make_tmp_dir(self):\n self.tmp_dir = tempfile.mkdtemp()\n\n @teardown\n def rm_tmp_dir(self):\n shutil.rmtree(self.tmp_dir)\n\n def test_master_bootstrap_script_is_valid_python(self):\n # create a fake src tarball\n with open(os.path.join(self.tmp_dir, 'foo.py'), 'w'): pass\n yelpy_tar_gz_path = os.path.join(self.tmp_dir, 'yelpy.tar.gz')\n tar_and_gzip(self.tmp_dir, yelpy_tar_gz_path, prefix='yelpy')\n\n # do everything\n runner = EMRJobRunner(conf_path=False,\n bootstrap_cmds=['echo \"Hi!\"', 'true', 'ls'],\n bootstrap_files=['/tmp/quz'],\n bootstrap_mrjob=True,\n bootstrap_python_packages=[yelpy_tar_gz_path],\n bootstrap_scripts=['speedups.sh', '/tmp/s.sh'])\n script_path = os.path.join(self.tmp_dir, 'b.py')\n runner._create_master_bootstrap_script(dest=script_path)\n\n assert os.path.exists(script_path)\n py_compile.compile(script_path)\n\n def test_no_bootstrap_script_if_not_needed(self):\n script_path = os.path.join(self.tmp_dir, 'b.py')\n\n runner = EMRJobRunner(conf_path=False,\n bootstrap_mrjob=False)\n runner._create_master_bootstrap_script(dest=script_path)\n assert not os.path.exists(script_path)\n\n # bootstrap actions don't figure into the master bootstrap script\n runner = EMRJobRunner(conf_path=False,\n bootstrap_mrjob=False,\n bootstrap_actions=['foo', 'bar baz'])\n runner._create_master_bootstrap_script(dest=script_path)\n assert not os.path.exists(script_path)\n\n def test_bootstrap_actions_get_added(self):\n bootstrap_actions = [\n 's3://elasticmapreduce/bootstrap-actions/configure-hadoop -m,mapred.tasktracker.map.tasks.maximum=1',\n 's3://foo/bar#xyzzy', # use alternate name for script\n ]\n\n runner = EMRJobRunner(conf_path=False,\n bootstrap_actions=bootstrap_actions,\n s3_sync_wait_time=0.01)\n\n job_flow_id = runner.make_persistent_job_flow()\n\n emr_conn = runner.make_emr_conn()\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n actions = job_flow.bootstrapactions\n\n assert_equal(len(actions), 3)\n\n assert_equal(\n actions[0].path,\n 's3://elasticmapreduce/bootstrap-actions/configure-hadoop')\n assert_equal(\n actions[0].args,\n ['-m,mapred.tasktracker.map.tasks.maximum=1'])\n assert_equal(actions[0].name, 'configure-hadoop')\n\n assert_equal(actions[1].path, 's3://foo/bar')\n assert_equal(actions[1].args, [])\n assert_equal(actions[1].name, 'xyzzy')\n\n # check for master bootstrap script\n assert actions[2].path.startswith('s3://mrjob-')\n assert actions[2].path.endswith('b.py')\n assert_equal(actions[2].args, [])\n assert_equal(actions[2].name, 'master')\n\n # make sure master bootstrap script is on S3\n assert runner.path_exists(actions[2].path)\n\n def test_local_bootstrap_action(self):\n # make sure that local bootstrap action scripts get uploaded to S3\n action_path = os.path.join(self.tmp_dir, 'apt-install.sh')\n with open(action_path, 'w') as f:\n f.write('for $pkg in $@; do sudo apt-get install $pkg; done\\n')\n\n bootstrap_actions = [\n action_path + ' python-scipy mysql-server']\n\n runner = EMRJobRunner(conf_path=False,\n bootstrap_actions=bootstrap_actions,\n s3_sync_wait_time=0.01)\n\n job_flow_id = runner.make_persistent_job_flow()\n\n emr_conn = runner.make_emr_conn()\n job_flow = emr_conn.describe_jobflow(job_flow_id)\n actions = job_flow.bootstrapactions\n\n assert_equal(len(actions), 2)\n\n assert actions[0].path.startswith('s3://mrjob-')\n assert actions[0].path.endswith('/apt-install.sh')\n assert_equal(actions[0].name, 'apt-install.sh')\n assert_equal(actions[0].args, ['python-scipy', 'mysql-server'])\n\n # check for master boostrap script\n assert actions[1].path.startswith('s3://mrjob-')\n assert actions[1].path.endswith('b.py')\n assert_equal(actions[1].args, [])\n assert_equal(actions[1].name, 'master')\n\n # make sure master bootstrap script is on S3\n assert runner.path_exists(actions[1].path)\n", "repo_name": "Jyrsa/mrjob", "sub_path": "tests/emr_test.py", "file_name": "emr_test.py", "file_ext": "py", "file_size_in_byte": 37958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "mrjob.botoemr", "line_number": 28, "usage_type": "name"}, {"api_name": "testify.TestCase", "line_number": 31, "usage_type": "name"}, {"api_name": "tempfile.mkstemp", "line_number": 35, "usage_type": "call"}, {"api_name": "mrjob.conf.dump_mrjob_conf", "line_number": 36, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 33, "usage_type": "name"}, {"api_name": "os.unlink", "line_number": 44, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 42, "usage_type": "name"}, {"api_name": "tests.mockboto.MockS3Connection", "line_number": 55, "usage_type": "call"}, {"api_name": "tests.mockboto.MockEmrConnection", "line_number": 62, "usage_type": "call"}, {"api_name": "boto.connect_s3", "line_number": 64, "usage_type": "attribute"}, {"api_name": "boto.connect_s3", "line_number": 65, "usage_type": "attribute"}, {"api_name": "mrjob.botoemr.EmrConnection", "line_number": 67, "usage_type": "attribute"}, {"api_name": "mrjob.botoemr", "line_number": 67, "usage_type": "name"}, {"api_name": "mrjob.botoemr.EmrConnection", "line_number": 68, "usage_type": "attribute"}, {"api_name": "mrjob.botoemr", "line_number": 68, "usage_type": "name"}, {"api_name": "testify.setup", "line_number": 46, "usage_type": "name"}, {"api_name": "boto.connect_s3", "line_number": 72, "usage_type": "attribute"}, {"api_name": "mrjob.botoemr.EmrConnection", "line_number": 73, "usage_type": "attribute"}, {"api_name": "mrjob.botoemr", "line_number": 73, "usage_type": "name"}, {"api_name": "testify.teardown", "line_number": 70, "usage_type": "name"}, {"api_name": "tests.mockboto.add_mock_s3_data", "line_number": 78, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "mrjob.conf.dump_mrjob_conf", "line_number": 87, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 83, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 96, "usage_type": "call"}, {"api_name": "testify.teardown", "line_number": 94, "usage_type": "name"}, {"api_name": "StringIO.StringIO", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 113, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 123, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 126, "usage_type": "argument"}, {"api_name": "testify.assert_equal", "line_number": 130, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 146, "usage_type": "call"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "testify.assert_equal", "line_number": 151, "usage_type": "call"}, {"api_name": "mrjob.parse.JOB_NAME_RE.match", "line_number": 152, "usage_type": "call"}, {"api_name": "mrjob.parse.JOB_NAME_RE", "line_number": 152, "usage_type": "name"}, {"api_name": "testify.assert_equal", "line_number": 153, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 154, "usage_type": "call"}, {"api_name": "getpass.getuser", "line_number": 154, "usage_type": "call"}, {"api_name": "testify.assert_in", "line_number": 158, "usage_type": "call"}, {"api_name": "testify.assert_not_in", "line_number": 159, "usage_type": "call"}, {"api_name": "testify.assert_not_in", "line_number": 160, "usage_type": "call"}, {"api_name": "testify.assert_in", "line_number": 161, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 170, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 174, "usage_type": "call"}, {"api_name": "testify.assert_not_in", "line_number": 178, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "testify.assert_equal", "line_number": 195, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 198, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 206, "usage_type": "argument"}, {"api_name": "tests.quiet.logger_disabled", "line_number": 208, "usage_type": "call"}, {"api_name": "testify.assert_raises", "line_number": 209, "usage_type": "call"}, {"api_name": "mrjob.botoemr.EmrConnection", "line_number": 211, "usage_type": "call"}, {"api_name": "mrjob.botoemr", "line_number": 211, "usage_type": "name"}, {"api_name": "testify.assert_equal", "line_number": 217, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 226, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 230, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 232, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 239, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 243, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 244, "usage_type": "call"}, {"api_name": "mrjob.emr.parse_s3_uri", "line_number": 247, "usage_type": "call"}, {"api_name": "testify.assert_not_in", "line_number": 248, "usage_type": "call"}, {"api_name": "testify.assert_in", "line_number": 253, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 257, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 258, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 267, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 272, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 275, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 281, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 285, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 296, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 302, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 306, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 307, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 317, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 320, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 321, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 332, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 337, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 348, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 351, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 359, "usage_type": "call"}, {"api_name": "boto.connect_s3", "line_number": 367, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 364, "usage_type": "name"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 373, "usage_type": "call"}, {"api_name": "testify.assert_not_equal", "line_number": 376, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 381, "usage_type": "call"}, {"api_name": "testify.assert_not_equal", "line_number": 384, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 388, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 389, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 393, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 395, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 400, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 401, "usage_type": "call"}, {"api_name": "boto.connect_s3", "line_number": 409, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 406, "usage_type": "name"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 420, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 423, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 427, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 431, "usage_type": "call"}, {"api_name": "tests.quiet.no_handlers_for_logger", "line_number": 435, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 436, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 437, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 438, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 439, "usage_type": "attribute"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 441, "usage_type": "call"}, {"api_name": "testify.assert_in", "line_number": 446, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 452, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 452, "usage_type": "attribute"}, {"api_name": "testify.assert_gt", "line_number": 455, "usage_type": "call"}, {"api_name": "tests.mockboto.DEFAULT_MAX_JOB_FLOWS_RETURNED", "line_number": 455, "usage_type": "argument"}, {"api_name": "tests.mockboto.MockEmrObject", "line_number": 459, "usage_type": "call"}, {"api_name": "tests.mockboto.to_iso8601", "line_number": 460, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 460, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 463, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 467, "usage_type": "call"}, {"api_name": "tests.mockboto.DEFAULT_MAX_JOB_FLOWS_RETURNED", "line_number": 467, "usage_type": "argument"}, {"api_name": "mrjob.emr.describe_all_job_flows", "line_number": 469, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 470, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 471, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 521, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 518, "usage_type": "name"}, {"api_name": "testify.teardown", "line_number": 526, "usage_type": "name"}, {"api_name": "testify.assert_equal", "line_number": 532, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 541, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 542, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 552, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 553, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 568, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 569, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 581, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 582, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 604, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 619, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 631, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 643, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 656, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 666, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 677, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 690, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 704, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 710, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 711, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 713, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 715, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 719, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 720, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 722, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 724, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 728, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 729, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 731, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 733, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 736, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 737, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 739, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 743, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 744, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 746, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 750, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 751, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 753, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 757, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 758, "usage_type": "call"}, {"api_name": "testify.assert_raises", "line_number": 760, "usage_type": "call"}, {"api_name": "testify.assert_raises", "line_number": 764, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 764, "usage_type": "argument"}, {"api_name": "testify.assert_raises", "line_number": 768, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 768, "usage_type": "argument"}, {"api_name": "testify.assert_raises", "line_number": 770, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 770, "usage_type": "argument"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 774, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 776, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 777, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 785, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 788, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 793, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 797, "usage_type": "call"}, {"api_name": "testify.assert_raises", "line_number": 804, "usage_type": "call"}, {"api_name": "testify.TestCase", "line_number": 806, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 810, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 810, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 811, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 811, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 812, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 812, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 813, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 813, "usage_type": "name"}, {"api_name": "testify.setup", "line_number": 808, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 817, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 817, "usage_type": "name"}, {"api_name": "mrjob.conf.emr", "line_number": 818, "usage_type": "attribute"}, {"api_name": "mrjob.conf", "line_number": 818, "usage_type": "name"}, {"api_name": "testify.teardown", "line_number": 815, "usage_type": "name"}, {"api_name": "testify.assert_raises", "line_number": 823, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 823, "usage_type": "argument"}, {"api_name": "testify.assert_raises", "line_number": 828, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 828, "usage_type": "argument"}, {"api_name": "tempfile.mkdtemp", "line_number": 836, "usage_type": "call"}, {"api_name": "testify.setup", "line_number": 834, "usage_type": "name"}, {"api_name": "shutil.rmtree", "line_number": 840, "usage_type": "call"}, {"api_name": "testify.teardown", "line_number": 838, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 844, "usage_type": "call"}, {"api_name": "os.path", "line_number": 844, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 845, "usage_type": "call"}, {"api_name": "os.path", "line_number": 845, "usage_type": "attribute"}, {"api_name": "mrjob.util.tar_and_gzip", "line_number": 846, "usage_type": "call"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 849, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 855, "usage_type": "call"}, {"api_name": "os.path", "line_number": 855, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 858, "usage_type": "call"}, {"api_name": "os.path", "line_number": 858, "usage_type": "attribute"}, {"api_name": "py_compile.compile", "line_number": 859, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 862, "usage_type": "call"}, {"api_name": "os.path", "line_number": 862, "usage_type": "attribute"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 864, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 867, "usage_type": "call"}, {"api_name": "os.path", "line_number": 867, "usage_type": "attribute"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 870, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 874, "usage_type": "call"}, {"api_name": "os.path", "line_number": 874, "usage_type": "attribute"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 882, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 892, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 894, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 897, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 900, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 902, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 903, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 904, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 909, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 910, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 917, "usage_type": "call"}, {"api_name": "os.path", "line_number": 917, "usage_type": "attribute"}, {"api_name": "mrjob.emr.EMRJobRunner", "line_number": 924, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 934, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 938, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 939, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 944, "usage_type": "call"}, {"api_name": "testify.assert_equal", "line_number": 945, "usage_type": "call"}]} +{"seq_id": "73946633445", "text": "from flask import Flask, render_template, jsonify, request, redirect\nimport pandas as pd\nimport pymongo\nimport re\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n # \"\"\"Return the homepage.\"\"\"\n \n return render_template(\"index.html\")\n\n\n# Add any other routes here\n@app.route(\"/budget\")\ndef budgetpage():\n # \"\"\"Return the homepage.\"\"\"\n return render_template(\"budget.html\")\n \n@app.route(\"/price/\")\ndef winebudget(selected_budget):\n #create mongodb connection\n conn = 'mongodb://test:password1@ds123444.mlab.com:23444/heroku_3t530jfl'\n client = pymongo.MongoClient(conn)\n\n #extract to DB\n db = client.heroku_3t530jfl\n wine_collections = db.wine_db.find({}, {'_id': 0})\n df = pd.DataFrame(list(wine_collections))\n\n #sorted by review\n sorted_wine = df.sort_values(by=\"review\", ascending=False)\n sorted_wine.reset_index(inplace=True, drop=True)\n sorted_wine.head()\n\n #take only top 100 wines\n filtered_data = sorted_wine.loc[sorted_wine[\"price\"]<=int(selected_budget)]\n top_100 = filtered_data[:100]\n\n\n #return new data for graphing\n wine_name = list(top_100[\"brand_name\"])\n grape_type = list(top_100[\"grape_type\"])\n price = list(top_100[\"price\"])\n review = list(top_100[\"review\"])\n\n data = []\n i=0\n\n while i < len(wine_name):\n wine_info = {\n \"wine_name\": wine_name[i],\n \"grape_type\": grape_type[i],\n \"price\": int(price[i]),\n \"review\": int(review[i])\n }\n data.append(wine_info)\n i +=1\n return jsonify(data)\n\n@app.route(\"/taste\")\ndef tastepage():\n # \"\"\"Return the homepage.\"\"\"\n return render_template(\"taste.html\")\n\n@app.route(\"/alcohol/\")\ndef drinkbudget(selected_percent):\n #create mongodb connection\n conn = 'mongodb://test:password1@ds123444.mlab.com:23444/heroku_3t530jfl'\n client = pymongo.MongoClient(conn)\n\n #extract to DB\n db = client.heroku_3t530jfl\n wine_collections = db.wine_db.find()\n df = pd.DataFrame(list(wine_collections))\n\n sorted_wine = df.sort_values(by=\"review\", ascending=False)\n sorted_wine.reset_index(inplace=True, drop=True)\n\n filtered_data = sorted_wine.loc[sorted_wine[\"lower_alcohol\"]<=int(selected_percent)]\n top_10 = filtered_data[:10]\n\n wine_name = list(top_10[\"brand_name\"])\n grape_type = list(top_10[\"grape_type\"])\n alcohol = list(top_10[\"lower_alcohol\"])\n\n data = []\n i=0\n\n while i < len(wine_name):\n wine_info = {\n \"wine_name\": wine_name[i],\n \"grape_type\": grape_type[i],\n \"alcohol\": alcohol[i]\n }\n data.append(wine_info)\n i +=1\n return jsonify(data)\n\n\n@app.route(\"/food\")\ndef foodpage():\n conn = 'mongodb://test:password1@ds123444.mlab.com:23444/heroku_3t530jfl'\n client = pymongo.MongoClient(conn)\n\n db = client.heroku_3t530jfl\n food_items = db.wine_db.find({}, {'_id': False, 'food_pairing': True})\n food_list = pd.DataFrame(list(food_items))\n\n\n # \"\"\"Return the homepage.\"\"\"\n return render_template(\"food.html\", food_list=food_list['food_pairing'].unique())\n\n@app.route(\"/yummy/\")\ndef foodchart(selected_food):\n\n conn = 'mongodb://test:password1@ds123444.mlab.com:23444/heroku_3t530jfl'\n client = pymongo.MongoClient(conn)\n\n db = client.heroku_3t530jfl\n food_choice = db.wine_db.find({'food_pairing': re.compile('^' + re.escape(selected_food) + '$', re.IGNORECASE)}, {'_id': False})\n food_list = list(food_choice)\n\n df = pd.DataFrame(food_list)\n\n grape_type=df.groupby(\"grape_type\").nunique()\n new_df=pd.DataFrame(grape_type)\n \n completed_df = new_df[[\"brand_name\"]]\n new_df=completed_df.reset_index()\n\n grape_names = list(new_df[\"grape_type\"])\n wine_count = list(new_df[\"brand_name\"])\n\n data = []\n i=0\n while i < len(grape_names):\n dict = {\n \"grape_type\":grape_names[i],\n \"wine_count\":wine_count[i]\n }\n data.append(dict)\n i+=1\n\n \n return jsonify(data)\n\n \n\n@app.route(\"/about\")\ndef aboutpage():\n # \"\"\"Return the homepage.\"\"\"\n return render_template(\"about.html\")\n \n\nif __name__ == \"__main__\":\n app.run()\n\n\n", "repo_name": "Donthave1/DrinkDrankDrunk", "sub_path": "Archive/backup.py", "file_name": "backup.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 100, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 114, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 120, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 123, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 123, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 129, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "70942432795", "text": "\"\"\"\nModule to make the image stacking / production occur.\n\"\"\"\n\n\nimport os\nfrom glob import glob\n\nimport astropy.io.fits as pyfits\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n\nfrom . import plotting as pl\nfrom . import registration as reg\nfrom . import utils as u\nfrom . import contrast as contrast\n\n\nclass FlatOpeningError(ValueError):\n pass\n\n\ndef open_flats(flatfile):\n \"\"\"\n Opens flats files. Essentially a wrapper around pyfits.getdata that\n also includes a descriptive exception if the file doesn't exist.\n\n Inputs:\n :flatfile: (str) path to dark to be opened.\n\n Outputs:\n :dark: (array) data from darks FITS file.\n \"\"\"\n if flatfile[-4:] != \"fits\":\n raise FlatOpeningError(\n \"\"\"Currently, SImMER only supports flats in FITS files.\"\"\"\n )\n if not os.path.exists(flatfile):\n raise FlatOpeningError(\n \"\"\"The requested flat file can't be found. Please check that you have a flat\n file corresponding to every filter used in your observations.\"\"\"\n )\n else:\n flat = pyfits.getdata(flatfile, 0)\n return flat\n\n\ndef image_driver(raw_dir, reddir, config, inst, sep_skies=False, plotting_yml=None, verbose=False):\n \"\"\"Do flat division, sky subtraction, and initial alignment via coords in header.\n Returns Python list of each registration method used per star.\n\n Inputs:\n :raw_dir: (string) directory for the raw data\n :reddir: (string) directory for the reduced data\n :config: (pandas DataFrame) dataframe corresponding to config sheet for data.\n :inst: (Instrument object) instrument for which data is being reduced.\n :plotting_yml: (string) path to the plotting configuration file.\n\n \"\"\"\n # Save these images to the appropriate folder.\n\n if plotting_yml:\n pl.initialize_plotting(plotting_yml)\n\n if inst.take_skies:\n skies = config[config.Comments == \"sky\"]\n else:\n skies = config[\n (config.Object != \"flat\")\n & (config.Object != \"dark\")\n & (config.Object != \"setup\")\n ]\n stars = skies.Object.unique()\n sdirs = glob(reddir + \"*/\")\n\n #Make sure list of stars doesn't include sky frames taken by nodding\n if sep_skies == True:\n keep = np.zeros(len(stars)) + 1\n for kk in np.arange(len(keep)):\n if(\"sky\" in stars[kk]):\n keep[kk] = 0\n wstar = np.where(keep == 1)\n stars = stars[wstar]\n\n else:\n stars = stars\n\n methods = []\n\n for star in tqdm(\n np.unique(stars), desc=\"Running image driver\", position=0, leave=True\n ):\n s_dir = reddir + star + \"/\"\n if (\n s_dir not in sdirs\n ): # make sure there's a subdirectory for each star\n os.mkdir(s_dir)\n\n filts = skies[\n skies.Object == star\n ].Filter.values # array of filters as strings\n for n, filter_name in enumerate(filts):\n obj = config[config.Object == star]\n imlist = eval(\n obj[obj.Comments != \"sky\"].Filenums.values[n]\n ) # pylint: disable=eval-used # liter_eval issues\n # cast obj_methods as list so that elementwise comparison isn't performed\n obj_methods = config[config.Object == star].Method.values\n\n # use pd.isnull because it can check against strings\n if np.all(pd.isnull(obj_methods)):\n methods.append(\"quick_look\")\n else:\n obj_method = obj_methods[~pd.isnull(obj_methods)][0].lower()\n if \"saturated\" and \"separated\" in obj_method:\n methods.append(\"saturated separated\")\n elif \"saturated\" in obj_method and \"separated\" not in obj_method:\n methods.append(\"saturated\")\n elif \"saturated\" not in obj_method and \"separated\" in obj_method:\n methods.append(\"separated\")\n create_imstack(\n raw_dir, reddir, s_dir, imlist, inst, filter_name=filter_name\n )\n return methods\n\n\ndef create_imstack(\n raw_dir, reddir, s_dir, imlist, inst, plotting_yml=None, filter_name=None\n):\n \"\"\"Create the stack of images by performing flat division, sky subtraction.\n\n Inputs:\n :raw_dir: (string) path to directory containing raw data\n :reddir: (string) path to directory containing reduced data\n :s_dir: (string) path to directory corresponding to a specific star.\n :imlist: (list) list of strings of paths pointing to image files.\n :inst: (Instrument object) instrument for which data is being reduced.\n :plot: (bool) determines whether or not intermediate plots should be produced.\n :filter_name: (string) name of the filter used for the images in question.\n\n Outputs:\n :im_array: (3d array) array of 2d images.\n :shifts_all: recording of all the x-y shifts made\n \"\"\"\n if plotting_yml:\n pl.initialize_plotting(plotting_yml)\n\n nims = len(imlist)\n imfiles = u.make_filelist(raw_dir, imlist, inst)\n\n #Keep track of original filenames so that we can annotate the shift1_cube\n #image arrays and easily decide which images to exclude\n original_fnames=imfiles.copy()\n for jj in np.arange(len(imfiles)):\n original_fnames[jj] = os.path.basename(imfiles[jj]).split('.')[0]\n\n im_array = u.read_imcube(imfiles)\n\n im_array = inst.adjust_array(im_array, nims)\n\n head = inst.head(imfiles[0])\n filt = inst.filt(nims, head, filter_name)\n\n # if necessary, make directory for filter. Also grab correct flat file\n fdirs = glob(s_dir + \"*/\")\n\n sf_dir = s_dir + filt + \"/\"\n if sf_dir not in fdirs: # make a directory for each filt\n os.mkdir(sf_dir)\n\n flatfile = reddir + f\"flat_{filt}.fits\"\n if (\n inst.name == \"PHARO\" and filt == \"Br-gamma\"\n ): # not sure whether this is generalizable\n flatfile = reddir + \"flat_K_short.fits\"\n\n #For ShARCS, use Ks flat instead of BrG-2.16 if necessary\n if (inst.name == \"ShARCS\" and filt == \"BrG-2.16\"):\n if os.path.exists(flatfile) == False:\n flatfile = reddir + 'flat_Ks.fits'\n\n #For ShARCS, use J flat instead of J+Ch4-1.2 if necessary\n if (inst.name == \"ShARCS\" and filt == \"J+Ch4-1.2\"):\n if os.path.exists(flatfile) == False:\n flatfile = reddir + 'flat_J.fits'\n\n flat = open_flats(flatfile)\n\n skyfile = sf_dir + \"sky.fits\"\n sky = pyfits.getdata(skyfile, 0)\n sky[np.isnan(sky)] = 0.0 # set nans from flat=0 pixels to 0 in sky\n\n shifts_all = []\n for i in range(nims):\n # flat division and sky subtraction\n current_im = im_array[i, :, :]\n flat[flat == 0] = np.nan\n current_im = (\n current_im / flat\n ) - sky # where flat = 0, this will be nan\n current_head = pyfits.getheader(imfiles[i])\n\n # bad pixel correction\n current_im = inst.bad_pix(current_im)\n\n # now deal with headers and shifts\n shifted_im, shifts = reg.shift_bruteforce(\n current_im\n ) # put it at the center\n\n shifts_all.append(shifts)\n\n im_array[i, :, :] = shifted_im\n hdu = pyfits.PrimaryHDU(shifted_im, header=current_head)\n hdu.writeto(\n sf_dir + \"sh{:02d}.fits\".format(i),\n overwrite=True,\n output_verify=\"ignore\",\n )\n\n pl.plot_array(\n \"intermediate\", im_array, -10.0, 10000.0, sf_dir, \"shift1_cube.png\",snames=original_fnames\n )\n\n # write shifts to file\n textfile = open(sf_dir + \"shifts.txt\", \"w\")\n textfile.write(\"im, d_row, d_col\\n\")\n for i, shift in enumerate(shifts_all):\n textfile.write(\"{},{},{}\\n\".format(i, *shift))\n textfile.close()\n return im_array, shifts_all\n\n\ndef create_im(s_dir, ssize1, plotting_yml=None, fdirs=None, method=\"quick_look\", verbose=False):\n \"\"\"Take the shifted, cut down images from before, then perform registration\n and combine. Tests should happen before this, as this is a per-star basis.\n\n Inputs:\n :s_dir: (str) directory for the raw data\n :ssize1: (int) initial pixel search size of box.\n :plotting_yml: (str) path to the plotting configuration file.\n :fdirs: (list of str) file directories.\n :method: (str) image registration method.\n \"\"\"\n if plotting_yml:\n pl.initialize_plotting(plotting_yml)\n\n if not fdirs:\n fdirs = glob(s_dir + \"*/\")\n\n for sf_dir in fdirs: # each filter for each star\n #Only register star images, not sky images\n dirparts = sf_dir.split('/')\n if 'sky' in dirparts[len(dirparts)-3]:\n if verbose == True:\n print('this is a sky directory: ', sf_dir)\n continue\n\n if verbose == True:\n print('working on sf_dir ', sf_dir)\n\n files = glob(\n sf_dir + f\"sh*.fits\"\n ) # might need to change to file_prefix\n nims = len(files)\n\n frames = u.read_imcube(files)\n frames = frames.astype(float)\n\n arrsize1 = ssize1 * 2 + 1\n rots = np.zeros((nims, arrsize1, arrsize1))\n newshifts1 = []\n\n # if we're doing PSF-fitting, we do it across all the images at once\n if method == 'psf':\n frames = reg.register_psf_fit(frames)\n\n for i in range(nims): # each image\n image = frames[i, :, :]\n\n #Interpolate over NaNs so that scipy can shift images\n #without producing arrays that are completely NaN\n #Following this tutorial: https://docs.astropy.org/en/stable/convolution/index.html\n\n # Generate Gaussian kernel with x_stddev=1 (and y_stddev=1)\n # It is a 9x9 array\n kernel = Gaussian2DKernel(x_stddev=1)\n\n # Replace NaNs with interpolated values\n image = interpolate_replace_nans(image, kernel)\n\n if method == \"saturated\":\n image_centered, rot, newshifts1 = reg.register_saturated(\n image, ssize1, newshifts1\n )\n rots[i, :, :] = rot\n elif method == \"quick_look\":\n image[image < 0.0] = 0.0\n image_centered = reg.register_bruteforce(image)\n if len(image_centered) == 0:\n print(\"Resorting to saturated mode.\")\n image_centered, rot, newshifts1 = reg.register_saturated(\n image, ssize1, newshifts1\n )\n rots[i, :, :] = rot\n elif method == \"saturated separated\":\n rough_center = reg.find_wide_binary(image)\n image_centered, rot, newshifts1 = reg.register_saturated(\n image, ssize1, newshifts1, rough_center=rough_center\n )\n rots[i, :, :] = rot\n elif method == \"separated\":\n rough_center = reg.find_wide_binary(image)\n image_centered = reg.register_bruteforce(\n image, rough_center=rough_center\n )\n frames[i, :, :] = image_centered # newimage\n\n final_im = np.nanmedian(frames, axis=0)\n #Trim down to smaller final size\n final_im = final_im[100:700,100:700] #extract central 600x600 pixel region\n\n #Trim down to smaller final size\n cutsize = 600 #desired axis length of final cutout image\n astart = int(round((final_im.shape[0]-cutsize)/2.))\n bstart = int(round((final_im.shape[1]-cutsize)/2.))\n aend = astart+cutsize\n bend = bstart+cutsize\n if np.logical_or(aend > final_im.shape[0],bend > final_im.shape[1]):\n print('ERROR: Requested cutout is too large. Using full image instead.')\n print('Current image dimensions: ', final_im.shape)\n print('Desired cuts: ', astart, aend, bstart, bend)\n else:\n final_im = final_im[astart:astart+cutsize,bstart:bstart+cutsize] #extract central cutsize x cutsize pixel region from larger image\n\n head = pyfits.getheader(files[0])\n hdu = pyfits.PrimaryHDU(final_im, header=head)\n hdu.writeto(\n sf_dir + \"final_im.fits\", overwrite=True, output_verify=\"ignore\"\n )\n\n textfile1 = open(sf_dir + \"shifts2.txt\", \"w\")\n textfile1.write(\"im, d_row, d_col\\n\")\n for i, item in enumerate(newshifts1):\n textfile1.write(\"{},{},{}\\n\".format(i, *item))\n textfile1.close()\n pl.plot_array(\n \"rots\",\n rots,\n 0.0,\n 1.0,\n sf_dir,\n \"rots.png\",\n extent=[-ssize1, ssize1, -ssize1, ssize1],\n )\n\n final_vmin, final_vmax = np.percentile(final_im, [1,99])\n pl.plot_array(\n \"final_im\", final_im, final_vmin, final_vmax, sf_dir, \"final_image.png\"\n )\n frames_vmin, frames_vmax = np.percentile(frames, [1,99])\n pl.plot_array(\n \"intermediate\", frames, frames_vmin, frames_vmax, sf_dir, \"centers.png\"\n )\n", "repo_name": "arjunsavel/SImMER", "sub_path": "src/simmer/image.py", "file_name": "image.py", "file_ext": "py", "file_size_in_byte": 13162, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.exists", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getdata", "line_number": 46, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 46, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 84, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 93, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 167, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path", "line_number": 181, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getdata", "line_number": 192, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 192, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 199, "usage_type": "attribute"}, {"api_name": "astropy.io.fits.getheader", "line_number": 203, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 203, "usage_type": "name"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 216, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 216, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 251, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 273, "usage_type": "call"}, {"api_name": "astropy.convolution.Gaussian2DKernel", "line_number": 289, "usage_type": "call"}, {"api_name": "astropy.convolution.interpolate_replace_nans", "line_number": 292, "usage_type": "call"}, {"api_name": "numpy.nanmedian", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 331, "usage_type": "call"}, {"api_name": "astropy.io.fits.getheader", "line_number": 338, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 338, "usage_type": "name"}, {"api_name": "astropy.io.fits.PrimaryHDU", "line_number": 339, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 339, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 359, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 363, "usage_type": "call"}]} +{"seq_id": "26477915986", "text": "# read file\nwith open(\"/Users/thanhminn/data for data analyst/data_project/project thi học sinh giỏi cấp tphcm 2023/Main Data/final_data.csv\") as file:\n data = file.read().split(\"\\n\")\n\n#set data for code\nheader = data[0]\nstudents = data[1:]\ntotal_student = len(students)\n\n# split header\nheader = header.split(\",\")\narr = header[4:]\n\n# turn each student into a list\nfor i in range(len(students)):\n\tstudents[i] = students[i].split(\",\")\n\n# number of students who took 0,1,2,3,...12 \nnum_of_subject_taken = [0]*12\n\nfor student in students:\n\tcount = 0\n\tfor i in range(12):\n\t\tif student[i+4] != \"-1\":\n\t\t\tcount += 1\n\tnum_of_subject_taken[count] += 1\n \n#Drawing bar chart plot\nimport matplotlib.pyplot as plt\nimport numpy \n\n#create y axis label using arr list\narr = [\"0 Subject\",\"1 Subject\",\"2 Subject\",\"3 Subject\",\"4 Subject\",\"5 Subject\",\"6 Subject\",\"7 Subject\",\"8 Subject\",\"9 Subject\",\"10 Subject\",\"11 Subject\"]\nfig, ax = plt.subplots()\n\n# list form y label\ny_pos = numpy.arange(len(arr))\n\n# plot the bar chart using 2 list\nplt.bar(y_pos, num_of_subject_taken,color=(0.1, 0.1, 0.1, 0.1), edgecolor='black')\n\n# change horizontal category name\nplt.xticks(y_pos, arr,)\n\n# label and title\nplt.ylabel('Numbers of Student',fontdict={'fontname':'Comic Sans MS',})\nplt.title('Student Take 1,2,..... Subject ',fontdict={'fontname':'Comic Sans MS',})\n\n# draw number of student \nrects = ax.patches\nfor rect, label in zip(rects, num_of_subject_taken):\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2, height + 0.75, label, ha='center', va='bottom')\n \n# show the plot\nplt.show()\n", "repo_name": "minnnhoclamdataaa/Data_Project", "sub_path": "student take 0,1,2,3,...,12 subject.py", "file_name": "student take 0,1,2,3,...,12 subject.py", "file_ext": "py", "file_size_in_byte": 1606, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "29931956694", "text": "import xml.etree.ElementTree as ET\nimport json\nimport string\nfrom pinecone_db import PineconeDB\n\n\n#tree = ET.parse(\"fact_checks_20190605.txt\")\n#root = tree.getroot()\n\npcdb = PineconeDB()\nwith open(\"../fact_checks_20190605.txt\") as f:\n lines = f.readlines()\n\n num_errors = 0\n num_success = 0\n for line in lines:\n try:\n xml_root = ET.fromstring(line)\n\n json_data = json.loads(xml_root.text)\n\n # print(json_data[\"url\"])\n # print(json_data[\"claimReviewed\"])\n claim = json_data[\"claimReviewed\"].translate(str.maketrans('', '', string.punctuation))\n # print(claim)\n\n pcdb.add_strings([claim], json_data[\"url\"])\n\n num_success += 1\n except Exception as e:\n print(e)\n num_errors += 1\n\n\nprint(\"{} entries successfully uploaded and {} errors\".format(num_success, num_errors))\n\n", "repo_name": "dtkettler/factchecker", "sub_path": "crawlers/upload_fact_data.py", "file_name": "upload_fact_data.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pinecone_db.PineconeDB", "line_number": 10, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.fromstring", "line_number": 18, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 20, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 24, "usage_type": "attribute"}]} +{"seq_id": "40839417200", "text": "#!/usr/bin/env python\nfrom operator import itemgetter\nfrom pathlib import Path\nfrom typing import Tuple, List\n\nfrom isotyper.statistics._settings import *\nfrom isotyper.utilities import (\n write_out,\n create_file,\n Tree,\n READ_NUMBER_DIVISION,\n)\n\n\ndef renyi_entropy(\n cpoints: List,\n cvdf: List,\n vpoints: List,\n vvdf: List,\n totalv: int,\n totalreads: int,\n) -> Tuple[float, float]:\n \"\"\"Renyi entropy\n\n Parameters\n ----------\n cpoints : List\n cluster sizes\n cvdf : List\n cluster size distribution\n vpoints : List\n vertex sizes\n vvdf : List\n vertex size distribution\n totalv : int\n total vertices\n totalreads : int\n total read counts\n\n Returns\n -------\n Tuple[float, float]\n vertex and cluster Renyi entropies.\n \"\"\"\n vrenyi = 0\n crenyi = 0\n tv = totalreads * totalreads * 1.0 # KT: what is this * 1.0 for?\n tc = totalv * totalv * 1.0\n for i in range(0, len(vpoints)):\n vrenyi += vvdf[i] * (vpoints[i] * vpoints[i] / tv)\n for i in range(0, len(cpoints)):\n crenyi += cvdf[i] * (cpoints[i] * cpoints[i] / tc)\n return (vrenyi, crenyi)\n\n\ndef vdf(n: List) -> Tuple[List, List]:\n \"\"\"distribution function.\n\n Parameters\n ----------\n n : List\n list of numbers\n\n Returns\n -------\n Tuple[List, List]\n size vector and cumulative distribution function\n \"\"\"\n points = sorted(list(set(n)))\n vdf = []\n for i in range(0, len(points)):\n vdf.append(n.count(points[i]))\n return (points, vdf)\n\n\ndef gini_index(\n cpoints: List, cvdf: List, vpoints: List, vvdf: List\n) -> Tuple[float, float]:\n \"\"\"Gini index caculator for vertex and cluster size.\n\n Parameters\n ----------\n cpoints : List\n cluster sizes\n cvdf : List\n cluster size distribution\n vpoints : List\n vertex sizes\n vvdf : List\n vertex size distribution\n\n Returns\n -------\n Tuple[float, float]\n vertex and cluster gini indices.\n \"\"\"\n (vgini) = get_gini(n=vpoints, v=vvdf)\n (cgini) = get_gini(n=cpoints, v=cvdf)\n return (vgini, cgini)\n\n\ndef get_gini(n: List, v: List) -> float:\n \"\"\"get gini.\n\n Parameters\n ----------\n n : List\n list of numbers 1\n v : List\n size distribution\n\n Returns\n -------\n float\n gini index value\n \"\"\"\n values = []\n for i in range(0, len(n)):\n for j in range(0, v[i]):\n values.append(n[i])\n n = len(values)\n assert n > 0, \"Empty list of values\"\n # Sort smallest to largest\n sortedValues = sorted(values)\n cumm = [0]\n for i in range(n):\n cumm.append(sum(sortedValues[0 : (i + 1)]))\n LorenzPoints = [[], []]\n # Some of all y values\n sumYs = 0\n # Robin Hood index max(x_i, y_i)\n robinHoodIdx = -1\n for i in range(1, n + 2):\n x = 100.0 * (i - 1) / n\n y = 100.0 * (cumm[i - 1] / float(cumm[n]))\n LorenzPoints[0].append(x)\n LorenzPoints[1].append(y)\n sumYs += y\n maxX_Y = x - y\n if maxX_Y > robinHoodIdx:\n robinHoodIdx = maxX_Y\n # Gini index\n giniIdx = 100 + (100 - 2 * sumYs) / n\n return giniIdx / 100\n\n\ndef get_network_statistics_per_chain(\n cluster_file: Path,\n sample_id: str,\n per_chain_repertoire_statistics_file: Path,\n):\n \"\"\"Summary\n\n Parameters\n ----------\n cluster_file : Path\n path to clustered file.\n sample_id : str\n name of sample.\n per_chain_repertoire_statistics_file : Path\n path to output statistics file.\n \"\"\"\n create_file(per_chain_repertoire_statistics_file)\n fh = open(cluster_file, \"r\")\n cluster = Tree()\n index, sizesv, c_sizes = 0, [], {}\n total_v, total_reads = [], []\n sizesv, c_sizes = {}, {}\n chains_short = []\n t1 = 0\n n = 0\n for l in fh:\n if l[0] != \"#\":\n l = l.strip().split()\n seq_id = l[2]\n chains, freq, id_short = (\n seq_id.split(\"|\")[1].split(\"_\"),\n list(\n map(\n int,\n seq_id.split(READ_NUMBER_DIVISION)[1]\n .split(\"|\")[0]\n .split(\"_\"),\n )\n ),\n seq_id.split(READ_NUMBER_DIVISION)[0],\n )\n t1 = t1 + sum(freq)\n n = n + 1\n if len(chains_short) == 0:\n for i in range(0, len(chains)):\n c = chains[i].split(\"*\")[0]\n if c not in chains_short:\n chains_short.append(c)\n non_zero = [i for i in range(len(freq)) if freq[i] != 0]\n if len(total_v) == 0:\n total_v, total_reads = [0] * len(chains_short), [0] * len(\n chains_short\n )\n for c in chains_short:\n sizesv[c], c_sizes[c] = [], []\n for i in non_zero:\n c = chains[i].split(\"*\")[0]\n cluster[c][l[1]][freq[i]][id_short].value = 1\n index = chains_short.index(c)\n total_v[index] = total_v[index] + 1\n sizesv[c] = sizesv[c] + [freq[i]]\n total_reads[index] = total_reads[index] + freq[i]\n fh.close()\n print(total_reads, t1, n)\n if t1 != sum(total_reads):\n print(\"ERROR IN COUNTING!!\")\n out = (\n \"#Id\\tIsotype\\tN reads\\tN vertices\\tVertex Gini Index\\t\"\n + \"Cluster Gini Index\\tLargest Cluster (%)\\t2nd Largest Cluster (%)\\n\"\n )\n for c1 in chains_short:\n cluster_sizes_sub = []\n for clus in cluster[c1]:\n f = 0\n for f1 in cluster[c1][clus]:\n f = f + (f1 * len(cluster[c1][clus][f1]))\n cluster_sizes_sub = cluster_sizes_sub + [f]\n if len(cluster_sizes_sub) > 0:\n (vpoints, vvdf) = vdf(n=sizesv[c1])\n (cpoints, cvdf) = vdf(n=cluster_sizes_sub)\n vgini, cgini = gini_index(\n cpoints=cpoints,\n cvdf=cvdf,\n vpoints=vpoints,\n vvdf=vvdf,\n )\n max_pop, max_1_pop = cpoints[len(cpoints) - 1] * 100.0 / sum(\n sizesv[c1]\n ), cpoints[len(cpoints) - 2] * 100.0 / sum(sizesv[c1])\n out = (\n out\n + str(sample_id)\n + \"\\t\"\n + c1\n + \"\\t\"\n + str(sum(sizesv[c1]))\n + \"\\t\"\n + str(len(sizesv[c1]))\n + \"\\t\"\n + str(vgini)\n + \"\\t\"\n + str(cgini)\n + \"\\t\"\n + str(max_pop)\n + \"\\t\"\n + str(max_1_pop)\n + \"\\n\"\n )\n write_out(out, per_chain_repertoire_statistics_file)\n\n\ndef get_cluster_vertex_distributions(cluster_file: Path) -> Tuple:\n \"\"\"Read clustered file and obtain stats\n\n Parameters\n ----------\n cluster_file : Path\n path to cluster file.\n\n Returns\n -------\n Tuple\n general statistics about cluster vertices,\n \"\"\"\n fh = open(cluster_file, \"r\")\n cluster = Tree()\n (\n index,\n totalc,\n totalv,\n totalreads,\n sizesv,\n c_sizes,\n vertices_in_max_cluster,\n ) = (0, 0, 0, 0, [], {}, 0)\n for l in fh:\n index = index + 1\n if index > 1:\n l = l.strip()\n l = l.split()\n cluster[l[1]][l[2]].value = 1\n size = int(l[3])\n sizesv.append(size)\n totalv = totalv + 1\n totalreads = totalreads + size\n if int(l[1]) == 1:\n vertices_in_max_cluster = vertices_in_max_cluster + 1\n if l[1] in c_sizes:\n c_sizes[l[1]] = c_sizes[l[1]] + size\n else:\n c_sizes[l[1]] = size\n fh.close()\n sizes = []\n totalc = len(cluster)\n for c in cluster:\n sizes.append(len(cluster[c]))\n (cpoints, cvdf) = vdf(sizes)\n (vpoints, vvdf) = vdf(sizesv)\n return (\n cpoints,\n cvdf,\n vpoints,\n vvdf,\n totalc,\n totalv,\n totalreads,\n c_sizes,\n vertices_in_max_cluster,\n )\n\n\ndef proportional_measures(\n c_sizes: List, totalreads: int\n) -> Tuple[float, float]:\n \"\"\"Summary\n\n Parameters\n ----------\n c_sizes : List\n lsit of sizes\n totalreads : int\n total read count\n\n Returns\n -------\n Tuple[float, float]\n proportions\n \"\"\"\n sizes = []\n for c in c_sizes:\n sizes.append((c, c_sizes[c]))\n s = sorted(sizes, key=itemgetter(1), reverse=True)\n (max_pop, max_1_pop) = (\n s[0][1] * 100.0 / totalreads,\n s[1][1] * 100.0 / totalreads,\n )\n return (max_pop, max_1_pop)\n\n\ndef print_distributions(points: List, cdf: List, file_out: Path):\n \"\"\"Print distributions.\n\n Parameters\n ----------\n points : List\n list of sizes\n cdf : List\n cumulative distribution\n file_out : Path\n path to output\n \"\"\"\n create_file(file_out)\n out = \"#size\\tfrequency of size\\n\"\n for i in range(0, len(points)):\n out = out + str(points[i]) + \"\\t\" + str(cdf[i]) + \"\\n\"\n write_out(out, file_out)\n\n\ndef get_network_statistics(\n cluster_file: Path,\n sample_id: str,\n network_statistics: Path,\n species: str,\n cluster_size_distribution: Path,\n vertex_size_distribution: Path,\n):\n \"\"\"Get network statistics\n\n Parameters\n ----------\n cluster_file : Path\n path to clustered file.\n sample_id : str\n name of sample.\n network_statistics : Path\n path to output statistics file.\n species : str\n organism type.\n cluster_size_distribution : Path\n path to output statistics file (cluster size distribution).\n vertex_size_distribution : Path\n path to output statistics file (vertex size distribution).\n \"\"\"\n create_file(network_statistics)\n (\n cpoints,\n cvdf,\n vpoints,\n vvdf,\n totalc,\n totalv,\n totalreads,\n c_sizes,\n vertices_in_max_cluster,\n ) = get_cluster_vertex_distributions(cluster_file=cluster_file)\n print_distributions(\n points=vpoints, cdf=vvdf, file_out=vertex_size_distribution\n )\n print_distributions(\n points=cpoints, cdf=cvdf, file_out=cluster_size_distribution\n )\n vrenyi, crenyi = renyi_entropy(\n cpoints=cpoints,\n cvdf=cvdf,\n vpoints=vpoints,\n vvdf=vvdf,\n totalv=totalv,\n totalreads=totalreads,\n )\n vgini, cgini = gini_index(\n cpoints=cpoints,\n cvdf=cvdf,\n vpoints=vpoints,\n vvdf=vvdf,\n )\n max_pop, max_1_pop = proportional_measures(\n c_sizes=c_sizes, totalreads=totalreads\n )\n out = (\n \"#Id\\tAnalysis\\tN reads\\tN vertices\\tVertex Gini Index\\tCluster Gini Index\\tLargest Cluster (%)\\t\"\n + \"2nd Largest Cluster (%)\\t% Vertices in largest cluster\\tVertex Renyi\\tCluster Renyi\\tGene\\tSpecies\\n\"\n )\n out = (\n out\n + str(sample_id)\n + \"\\tOVERALL\\t\"\n + str(totalreads)\n + \"\\t\"\n + str(totalv)\n + \"\\t\"\n + str(vgini)\n + \"\\t\"\n + str(cgini)\n + \"\\t\"\n + str(max_pop)\n + \"\\t\"\n + str(max_1_pop)\n + \"\\t\"\n + str(vertices_in_max_cluster * 100.0 / totalv)\n + \"\\t\"\n + str(vrenyi)\n + \"\\t\"\n + str(crenyi)\n + \"\\t\"\n + \"IGH\" # always IGH in Clatworthy lab\n + \"\\t\"\n + species\n + \"\\n\"\n )\n write_out(out, network_statistics)\n\n\ndef main():\n \"\"\"main function for step 4.\"\"\"\n OUT_STAT.mkdir(exist_ok=True, parents=True)\n get_network_statistics(\n cluster_file=CLUST_ID_FILE,\n sample_id=SAMPLE_ID,\n network_statistics=NETSTATS,\n species=ORG,\n cluster_size_distribution=CLUSTER_SIZE_DIST,\n vertex_size_distribution=VERTEX_SIZE_DIST,\n )\n get_network_statistics_per_chain(\n cluster_file=CLUST_ID_FILE,\n sample_id=SAMPLE_ID,\n per_chain_repertoire_statistics_file=NETSTATS_PER_CHAIN,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "clatworthylab/bulkBCRseq", "sub_path": "isotyper/statistics/statistics.py", "file_name": "statistics.py", "file_ext": "py", "file_size_in_byte": 12311, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 102, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 148, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 150, "usage_type": "name"}, {"api_name": "isotyper.utilities.create_file", "line_number": 163, "usage_type": "call"}, {"api_name": "isotyper.utilities.Tree", "line_number": 165, "usage_type": "call"}, {"api_name": "isotyper.utilities.READ_NUMBER_DIVISION", "line_number": 181, "usage_type": "argument"}, {"api_name": "isotyper.utilities.READ_NUMBER_DIVISION", "line_number": 186, "usage_type": "argument"}, {"api_name": "isotyper.utilities.write_out", "line_number": 255, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 258, "usage_type": "name"}, {"api_name": "isotyper.utilities.Tree", "line_number": 272, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 258, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 319, "usage_type": "name"}, {"api_name": "operator.itemgetter", "line_number": 338, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 320, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 346, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 346, "usage_type": "name"}, {"api_name": "isotyper.utilities.create_file", "line_number": 358, "usage_type": "call"}, {"api_name": "isotyper.utilities.write_out", "line_number": 362, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 366, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 368, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 370, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 371, "usage_type": "name"}, {"api_name": "isotyper.utilities.create_file", "line_number": 390, "usage_type": "call"}, {"api_name": "isotyper.utilities.write_out", "line_number": 456, "usage_type": "call"}]} +{"seq_id": "7464815936", "text": "import torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom os.path import join\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\nfrom transformers import AutoModelWithLMHead, AutoTokenizer, top_k_top_p_filtering\n\nimport re\nimport datetime\n\nfrom transformers import AutoTokenizer\n\nfrom models import get_embedding_layer, create_model, _create_model\nfrom prompt_encoder import PromptEncoder\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nSMALL_CONST = 1e-10\nBIG_CONST = -1e15\n\nclass Distill_Tuning(torch.nn.Module):\n\n def __init__(self, args, template, label_token = None):\n super().__init__()\n self.args = args\n\n # load tokenizer\n self.tokenizer = AutoTokenizer.from_pretrained(self.args.model_name_or_path)\n self.tokenizer.pad_token = self.tokenizer.eos_token\n \n # model setting\n self.model = create_model(self.args)\n # self.model.resize_token_embeddings(len(self.tokenizer))\n self.model = self.model.to(self.args.device)\n for param in self.model.parameters():\n param.requires_grad = self.args.use_lm_finetune\n \n # get model's embeddings\n self.embeddings = self.model.get_input_embeddings()\n \n\n # label information\n self.label_token = label_token\n self.label_token_ids ={}\n \n for k, v in self.label_token.items():\n print(k,v,self.tokenizer.encode(v))\n \n self.label_token_ids[k] = self.tokenizer.encode(v)\n\n self.template = template\n # load prompt encoder\n self.hidden_size = self.embeddings.embedding_dim\n \n self.pseudo_token_id = self.tokenizer.convert_tokens_to_ids(self.args.pseudo_token)\n\n self.spell_length = sum(self.template)\n self.prompt_encoder = PromptEncoder(self.template, self.hidden_size, self.tokenizer, args)\n self.prompt_encoder = self.prompt_encoder.to(self.args.device)\n \n # self.fc_loss = CrossEntropyLoss(ignore_index = self.tokenizer.eos_token_id)\n \n ### load discriminator\n if self.args.disc_embedding_checkpoint != None:\n \n self.disc_model = _create_model(self.args.disc_embedding_checkpoint[:-5]).to(self.args.device)\n self.spell_length_disc = sum(self.args.template_disc)\n self.disc_embedding = self.disc_model.get_input_embeddings()\n self.prompt_encoder_disc = PromptEncoder(self.args.template_disc, self.disc_embedding.embedding_dim, self.tokenizer, args)\n self.prompt_encoder_disc = self.prompt_encoder_disc.to(self.args.device)\n self.prompt_encoder_disc.load_state_dict(self.load_prompt(self.args.disc_embedding_checkpoint))\n else :\n self.disc_model = self.model\n self.prompt_encoder_disc = self.prompt_encoder\n \n \n def load_prompt(self, embedding_checkpoint):\n checkpoint = torch.load(embedding_checkpoint)\n prompt_embedding = checkpoint['embedding']\n return prompt_embedding\n \n def generate_soft_tokens(self, generated_tokens, past_key_values= None, position = None):\n \n if past_key_values!= None:\n last_embeds =self.embeddings(generated_tokens[:, -1]).unsqueeze(1)#get its embeddings\n with torch.no_grad():\n outputs = self.model(inputs_embeds=last_embeds,\n past_key_values = past_key_values,\n position_ids = position[:, past_key_values[0][0].shape[-2]],\n return_dict=True)\n \n else:\n attention_mask = (generated_tokens!=self.tokenizer.eos_token_id).type(torch.uint8)\n position_ids = attention_mask.long().cumsum(-1)- 1\n position_ids.masked_fill_(attention_mask == 0, 0)\n last_embeds =self.embeddings(generated_tokens) #get its embeddings\n with torch.no_grad():\n outputs = self.model(inputs_embeds=last_embeds,\n past_key_values = past_key_values,\n attention_mask = attention_mask,\n position_ids = position_ids,\n return_dict=True)\n \n next_token_logits = outputs.logits[:, -1, :]\n \n next_token_logits = self.top_k_top_p_filtering(next_token_logits.squeeze(1), top_k=self.args.ranking_scope, top_p=self.args.top_p, filter_value=BIG_CONST)\n \n return next_token_logits, outputs.past_key_values\n \n \n def top_k_top_p_filtering(self,\n logits,\n top_k = 0,\n top_p = 1.0,\n filter_value = -1e15 ,\n min_tokens_to_keep = 1,\n ):\n \"\"\"Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (batch size, vocabulary size)\n if top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n Make sure we keep at least min_tokens_to_keep per batch example in the output\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n \"\"\"\n if top_k > 0:\n top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check\n # Remove all tokens with a probability less than the last token of the top-k\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p < 1.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold (token with 0 are kept)\n sorted_indices_to_remove = cumulative_probs > top_p\n if min_tokens_to_keep > 1:\n # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)\n sorted_indices_to_remove[..., :min_tokens_to_keep] = 0\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # scatter sorted tensors to original indexing\n indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)\n logits[indices_to_remove] = filter_value\n return logits\n \n \n\n \n def _predict_scores(self, x_hs, att_mask):\n bz = len(x_hs)\n # construct query ids\n prompt_tokens = [self.pseudo_token_id]\n \n queries = self.get_query(x_hs, prompt_tokens)\n # construct label ids\n attention_mask = torch.cat([att_mask, torch.ones([att_mask.shape[0], self.spell_length_disc]).long().to(self.args.device)], dim=1)\n # get embedded input\n inputs_embeds = self.embed_input(queries)\n \n position_ids = attention_mask.long().cumsum(-1)- 1\n position_ids.masked_fill_(attention_mask == 0, 0)\n \n with torch.no_grad():\n output = self.disc_model(inputs_embeds = inputs_embeds,\n attention_mask = attention_mask,\n position_ids = position_ids,\n labels=None)\n\n logits = output.logits[:,-1,:].squeeze(1)\n \n binary_prob = torch.softmax(logits[:,[11274,14774]], dim=-1)\n \n if self.args.corpus_type == \"negative\":\n return binary_prob[:,1]\n else:\n return binary_prob[:,0] \n \n def get_query(self, x_h, prompt_tokens, x_t = None):\n \n prompt_tensor = torch.tensor(prompt_tokens* (self.spell_length_disc)).to(self.args.device)\n prompt_tensor = prompt_tensor.expand(x_h.shape[0],-1)\n if x_t != None:\n x_t = x_t.unsqueeze(1)\n return torch.cat([x_h, prompt_tensor, x_t], dim =1)\n else:\n return torch.cat([x_h, prompt_tensor], dim =1)\n \n\n def embed_input(self, queries):\n bz = queries.shape[0]\n queries_for_embedding = queries.clone()\n raw_embeds = self.disc_embedding(queries_for_embedding)\n \n replace_embeds = self.prompt_encoder_disc()\n \n replace_embeds = replace_embeds.unsqueeze(0).expand(bz,-1, -1)\n \n raw_embeds[:,-self.prompt_encoder_disc.spell_length:,: ] = replace_embeds\n \n return raw_embeds\n \n \n def get_query_head(self, x_h, prompt_tokens, x_t = None):\n \n prompt_tensor_head = torch.tensor(prompt_tokens* (self.spell_length)).to(self.args.device)\n \n trans_inputs = []\n \n index_musk = (x_h == self.tokenizer.pad_token_id).type(torch.uint8) # only calculte the token which is not eos\n \n valid_number_length = torch.sum(index_musk, 1)\n \n for index, seq in zip(valid_number_length, x_h):\n if index == x_h.shape[1]:\n trans_inputs.append(torch.cat([prompt_tensor_head,seq]))\n else:\n trans_inputs.append(torch.cat([seq[:index], prompt_tensor_head, seq[index:]]))\n \n res = torch.stack(trans_inputs, dim=0)\n if x_t != None:\n # x_t = x_t.unsqueeze(1)\n return torch.cat([res, x_t], dim =1)\n else:\n return res \n \n \n def embed_input_head(self, queries):\n bz = queries.shape[0]\n queries_for_embedding = queries.clone()\n \n queries_for_embedding[(queries == self.pseudo_token_id)] = self.tokenizer.unk_token_id\n raw_embeds = self.embeddings(queries_for_embedding)\n\n blocked_indices = (queries == self.pseudo_token_id).type(torch.uint8).nonzero().reshape((bz, self.spell_length, 2))[:, :, 1] # bz\n \n replace_embeds = self.prompt_encoder()\n for bidx in range(bz):\n for i in range(self.prompt_encoder.spell_length):\n raw_embeds[bidx, blocked_indices[bidx, i], :] = replace_embeds[i, :]\n return raw_embeds\n \n \n def generate(self, prompts_ids, max_length, desired_att = None, beta = 0.5):\n \"\"\"\n generation forward based on given prompt tokens, \n Args:\n prompt_ids: the prompt tokens\n max_length: the max len of the generation\n Returns:\n generated_texts:[generated tokens]\n \"\"\"\n cur_len = prompts_ids.shape[1]\n logits = []\n output_ids = prompts_ids\n return_dict = {}\n eos_flag = torch.ones([prompts_ids.shape[0]]).type(torch.uint8).to(self.args.device)\n \n \n prompt_tokens = [self.pseudo_token_id]\n queries = self.get_query_head(prompts_ids, prompt_tokens)\n inputs_embeds = self.embed_input_head(queries)\n\n attention_mask = torch.cat([prompts_ids!= self.tokenizer.pad_token_id , torch.ones([prompts_ids.shape[0], self.prompt_encoder.spell_length + max_length-prompts_ids.shape[1]]).long().to(self.args.device)], dim=1)\n\n position_ids = attention_mask.long().cumsum(-1)- 1\n position_ids.masked_fill_(attention_mask == 0, 0)\n\n # start = datetime.datetime.now()\n # test generation time\n\n while cur_len <= max_length:\n outputs = self.model(inputs_embeds=inputs_embeds,\n attention_mask = attention_mask[:,:inputs_embeds.shape[1]],\n position_ids = position_ids[:,:inputs_embeds.shape[1]],\n return_dict=True)\n \n next_token_logits = outputs.logits[:, -1, :]\n next_token_logits_ = self.top_k_top_p_filtering(next_token_logits, top_k=self.args.ranking_scope, top_p=1.0, filter_value=BIG_CONST)\n \n next_token_logits_prob = torch.softmax(next_token_logits_, dim=1)\n next_tokens = torch.multinomial(next_token_logits_prob, num_samples=1).squeeze(1)\n \n eos_flag = eos_flag.mul((next_tokens != self.tokenizer.eos_token_id).type(torch.uint8))# if flag = 0, it means the generation is over \n next_tokens = next_tokens.mul(eos_flag)\n next_tokens[next_tokens == 0] = self.tokenizer.eos_token_id \n output_ids = torch.cat([output_ids, next_tokens.unsqueeze(1)], dim=1)\n inputs_embeds = torch.cat([inputs_embeds, self.embeddings(next_tokens).unsqueeze(1)], dim=1)\n\n print(\"cur_len is:\",cur_len)\n cur_len = cur_len + 1\n \n# end = datetime.datetime.now()\n# print(\"runing time is:\",end-start)\n \n return_dict = {\"generated_tokens\":output_ids}\n return return_dict\n\n \n def feedback_from_discriminator(self, input_ids, logits_seq, desired_att):\n \n top_logits, top_indices = torch.topk(logits_seq, self.args.ranking_scope) # batch x topk\n \n candidates = []\n for logit_id, ids in zip(top_indices, input_ids):\n data = ids.expand(self.args.ranking_scope, -1)\n new_input_candidates = torch.cat([data, logit_id.unsqueeze(1)], dim=1) # batch x topk x seq+1\n candidates.append(new_input_candidates)\n \n \n candidates = torch.cat(candidates, dim=0)\n \n if candidates.shape[1]<30:\n pad_tensor =torch.empty(candidates.shape[0],30 - candidates.shape[1]).long().fill_(self.tokenizer.eos_token_id).to(self.args.device)\n candidates = torch.cat([pad_tensor,candidates], dim=1)\n \n pred_scores = []\n for new_input_candidates in torch.split(candidates, 120, dim=0):\n musk = (new_input_candidates != self.tokenizer.eos_token_id).type(torch.uint8)\n pred_score = self._predict_scores(new_input_candidates, musk)\n pred_scores.append(pred_score)\n\n pred_scores = torch.cat(pred_scores, dim=0)\n pred_scores = pred_scores.reshape(input_ids.shape[0], -1)\n\n res_logits = logits_seq.clone().detach()\n res_logits.scatter_(-1, top_indices, pred_scores)\n return res_logits\n \n \n \n def get_ranked_logtis(self, inputs, logits, desired_att):\n \n return_logits = []\n for i in range(inputs.shape[1]):\n tmp = self.feedback_from_discriminator(inputs[:, :i+1], logits[:,i,:], desired_att)\n return_logits.append(tmp)\n \n return torch.stack(return_logits, dim=1).detach().clone()\n \n \n \n def KL_loss(self, input_x, input_y, attention_mask):\n \"\"\"\n compute the KL loss\n \"\"\"\n m = torch.flatten(attention_mask)\n indices = torch.nonzero(m).squeeze(-1)\n \n x = input_x.reshape(-1,input_x.shape[-1])\n x = torch.index_select(x, 0, indices)\n \n y = input_y.reshape(-1,input_y.shape[-1])\n y = torch.index_select(y, 0, indices)\n \n y_ = torch.softmax(y/self.args.temperature, dim = -1)\n loss_ = -(y_ * (x+1e-20).log()).sum() / x.size(0)\n \n _y = torch.softmax(((1-y).mul(y>0.0))/self.args.temperature, dim = -1)\n _loss = -(_y * (1-x+1e-20).log()).sum() / x.size(0)\n\n return loss_ + _loss\n\n\n \n def contrast_crossEntry_loss(self, logits_prob, x_hs, sentence_labels = None):\n\n shift_prob = logits_prob[..., :-1, :].contiguous()\n shift_labels = x_hs[..., 1:].contiguous()\n\n if sentence_labels != None:\n index_negative = (sentence_labels==14774)\n index = torch.nonzero(index_negative).squeeze(1)\n shift_prob[index] = 1-shift_prob[index]\n\n loss = F.nll_loss((shift_prob.view(-1, shift_prob.size(-1))+1e-20).log(), shift_labels.view(-1), ignore_index=-100)\n \n return loss\n \n \n def get_candidate_logits(self, x_hs, att_mask):\n \n position_ids = att_mask.long().cumsum(-1)- 1\n position_ids.masked_fill_(att_mask == 0, 0)\n \n with torch.no_grad():\n output = self.model(input_ids= x_hs,\n attention_mask=att_mask,\n position_ids=position_ids,\n labels= None)\n return output.logits.detach().clone()\n \n \n def forward(self, x_hs, x_ts, att_mask):\n # construct query ids\n prompt_tokens = [self.pseudo_token_id]\n queries = self.get_query_head(x_hs, prompt_tokens)\n # construct label ids\n attention_mask = torch.cat([att_mask, torch.ones([att_mask.shape[0], self.prompt_encoder.spell_length]).long().to(self.args.device)], dim=1)\n \n position_ids = attention_mask.long().cumsum(-1)- 1\n position_ids.masked_fill_(attention_mask == 0, 0)\n \n labels = torch.clone(queries)\n \n labels.masked_fill_(attention_mask==0, -100)\n labels.masked_fill_(queries == self.pseudo_token_id, -100)\n \n # get embedded input\n inputs_embeds = self.embed_input_head(queries)\n\n output = self.model(inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n position_ids=position_ids,\n labels= None)\n \n output_logits = output.logits\n # ce_loss = self.contrast_crossEntry_loss(torch.softmax(output_logits, dim = -1), labels, sentence_labels = x_ts)\n \n _queries = queries.view(queries.size(0)*queries.size(1))\n _output_logits = output_logits.view(output_logits.size(0)*output_logits.size(1),-1)\n disc_logits = _output_logits.index_select(0, torch.nonzero(_queries != self.pseudo_token_id).squeeze(1)).view(output_logits.shape[0],-1, output_logits.shape[2])\n \n logits_candidate = self.get_candidate_logits(x_hs,att_mask)\n logits_candidate = self.top_k_top_p_filtering(logits_candidate.view(logits_candidate.shape[0]*logits_candidate.shape[1], -1), top_k= self.args.ranking_scope , top_p=self.args.top_p, filter_value=BIG_CONST).view(x_hs.shape[0],x_hs.shape[1], -1)\n\n reank_output = self.get_ranked_logtis(x_hs, logits_candidate.detach().clone(), desired_att=None)\n \n reank_output = (logits_candidate>BIG_CONST+10).mul(reank_output)\n\n kl_loss = self.KL_loss(torch.softmax(disc_logits, dim=-1), reank_output, att_mask)\n \n loss = kl_loss\n\n return loss\n \n \n\n \n", "repo_name": "littlehacker26/Discriminator-Cooperative-Unlikelihood-Prompt-Tuning", "sub_path": "distill_tuning.py", "file_name": "distill_tuning.py", "file_ext": "py", "file_size_in_byte": 19111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn", "line_number": 22, "usage_type": "attribute"}, {"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 29, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 29, "usage_type": "name"}, {"api_name": "models.create_model", "line_number": 33, "usage_type": "call"}, {"api_name": "prompt_encoder.PromptEncoder", "line_number": 59, "usage_type": "call"}, {"api_name": "models._create_model", "line_number": 67, "usage_type": "call"}, {"api_name": "prompt_encoder.PromptEncoder", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.cumsum", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 215, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 240, "usage_type": "attribute"}, {"api_name": "torch.ones", "line_number": 262, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 262, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 286, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 289, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 292, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 293, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 316, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 319, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 323, "usage_type": "call"}, {"api_name": "torch.uint8", "line_number": 324, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 328, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 344, "usage_type": "call"}, {"api_name": "torch.flatten", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 353, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 356, "usage_type": "call"}, {"api_name": "torch.index_select", "line_number": 359, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 361, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 364, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 378, "usage_type": "call"}, {"api_name": "torch.nn.functional.nll_loss", "line_number": 381, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 381, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 391, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 404, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 404, "usage_type": "call"}, {"api_name": "torch.clone", "line_number": 409, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 427, "usage_type": "call"}, {"api_name": "torch.softmax", "line_number": 436, "usage_type": "call"}]} +{"seq_id": "11550339437", "text": "import math\nfrom copy import deepcopy\nimport os\nimport shutil\nfrom typing import Callable, Tuple, Dict, Union, Any\nimport gc\nimport pathlib\nimport hashlib\nimport importlib.util\nimport sys\nimport numbers\n\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import distributed as dist\nfrom torch.cuda.amp.grad_scaler import GradScaler\nfrom torch.cuda.amp.autocast_mode import autocast\nfrom torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService\nfrom vlutils.base import Registry\nfrom vlutils.base.freqHook import ChainHook\nfrom vlutils.saver import Saver\nfrom vlutils.logger import trackingFunctionCalls\nfrom vlutils.base import Restorable\nfrom vlutils.runtime import relativePath\nfrom vlutils.config import summary\n\nimport modfire.utils.registry\nfrom modfire.utils.registry import OptimRegistry, SchdrRegistry, CriterionRegistry, ModelRegistry, DatasetRegistry, DataPipeRegistry\nfrom modfire import Consts\nfrom modfire.config import Config\nfrom modfire.train.hooks import getAllHooks\nfrom modfire.validate import Validator, metrics\nfrom modfire.utils import totalParameters, StrPath, getRichProgress, SafeTerminate\nfrom modfire.dataset import QuerySplit, Database, TrainSplit\n\nfrom .hooks import EpochFrequencyHook, checkHook, splitHooks\nfrom .utils import EMATracker, PrettyStep, getSaver, setWeightDecay\n\n\nclass TrainerBuilder(modfire.utils.registry.Registry[Callable[..., \"PalTrainer\"]]):\n pass\n\n\ndef _p(attr):\n def _decorate(self):\n msg = \"[%s] \" + attr\n return msg, self.PrettyStep\n return property(_decorate)\n\nclass LoggingTemplate:\n def __init__(self):\n self._step = 0\n self._prettyStep = PrettyStep()\n\n @property\n def PrettyStep(self):\n return self._prettyStep(self._step)\n\n\n ########### Templates ###########\n _LOG_InitSummary = _p(\"Here is the whole config during this run: \\r\\n%s\")\n _LOG_Bye = _p(\"Bye.\")\n _LOG_FindCkpt = _p(\"Found ckpt to resume at %s\")\n _LOG_ResumeAtEpoch = _p(\"Resume training at %d epochs.\")\n _LOG_StartTraining = _p(\"Start training.\")\n _LOG_RevealTensorboard = _p(\"See you at `%s`\")\n _LOG_EarlyStop = _p(\"Early stopped at epoch %4d.\")\n _LOG_SummaryInfoNomAP = _p(\"Total epoches: %d, total steps: %s, best mAP: N/A.\")\n _LOG_SummaryInfo = _p(\"Total epoches: %d, total steps: %s, best mAP: %.2f%%.\")\n _LOG_SummarySaveCkpt = _p(\"Model saved to %s`.\")\n\n\n _DEBUG_InitRank = _p(\"<%s> is located at rank `%d`\")\n _DEBUG_Inited = _p(\"<%s> created.\")\n _DEBUG_RestoreStateStart = _p(\"Restored state dict from `%s`\")\n _DEBUG_RestoreStateFinish = _p(\"Restore network parameters finished.\")\n _DEBUG_OptimizerReset = _p(\"Optimizer reset.\")\n _DEBUG_LRSchdrReset = _p(\"LR scheduler reset.\")\n _DEBUG_RegistrySummary = _p(\"Summary of %s: \\r\\n%s\")\n _DEBUG_CreateDatasetStart = _p(\"Create `config.Train.TrainSet` (\\\"%s\\\") with training pipeline: `%s`.\")\n _DEBUG_CreateQuerysetStart = _p(\"Create `config.Train.QuerySet` (\\\"%s\\\") with evaluation pipeline: `%s`.\")\n _DEBUG_CreateDatabaseStart = _p(\"Create `config.Train.Database` (\\\"%s\\\") with evaluation pipeline: `%s`.\")\n _DEBUG_CreateDatasetFinish = _p(\"Training dataset \\r\\n\\t%s \\r\\nmounted.\")\n _DEBUG_CreateDatasetMainFinish = _p(\"Train set \\r\\n\\t%s, \\r\\nquery set \\r\\n\\t%s and \\r\\ndatabase \\r\\n\\t%s \\r\\nmounted.\")\n _DEBUG_CreateModelStart = _p(\"Creating model...\")\n _DEBUG_CreateModelFinish = _p(\"Model created. #Params: %s.\")\n _DEBUG_CreateCriterionStart = _p(\"Creating criterion...\")\n _DEBUG_CreateCriterionFinish = _p(\"criterion created. #Params: %s.\")\n _DEBUG_CreateOptimizerStart = _p(\"Creating optimizer...\")\n _DEBUG_CreateOptimizerFinish = _p(\"Optimizer created.\")\n _DEBUG_CreateSchdrStart = _p(\"Creating LR scheduler...\")\n _DEBUG_CreateSchdrFinish = _p(\"LR scheduler created.\")\n _DEBUG_CallBeforRunStart = _p(\"Call `_beforeRun()`.\")\n _DEBUG_CallBeforRunFinish = _p(\"End call `_beforeRun()`.\")\n _DEBUG_CallAfterRunStart = _p(\"Call `_afterRun()`.\")\n _DEBUG_CallAfterRunFinish = _p(\"End call `_afterRun()`.\")\n _DEBUG_CallEpochBeginStart = _p(\"Epoch %4d started.\")\n _DEBUG_CallEpochBeginFinish = _p(\"End call `_epochStart()`.\")\n _DEBUG_CallEpochEndStart = _p(\"Epoch %4d finished.\")\n _DEBUG_CallEpochEndFinish = _p(\"End call `_epochFinish()`.\")\n _DEBUG_TrainLoopFinish = _p(\"Training loop finished.\")\n _DEBUG_ShowSchdrUpdate = _p(\"Lr is set to %.2e.\")\n _DEBUG_ShowTemperatureUpdate = _p(\"Temperature is set to %.2e.\")\n _DEBUG_Validation = _p(\"Start validation at epoch %4d.\")\n _DEBUG_FindOneEarlyStop = _p(\"Performance not improved for %d / %d epochs.\")\n _DEBUG_EndValidation = _p(\"End validation at epoch %4d.\")\n\n _CRITICAL_ProcessInterrupt = _p(\"Main process was interrupted, try to save necessary info.\")\n _CRITICAL_TimeoutInfo = _p(\"This post-process will be killed after %d secs if stuck.\")\n _CRITICAL_ShutdownMessage = _p(\"Find the last checkpoint at `%s`\")\n\n\nclass PalTrainer(Restorable, LoggingTemplate):\n def __init__(self, config: Config, loggingLevel: int):\n Restorable.__init__(self)\n LoggingTemplate.__init__(self)\n\n self._epoch = 0\n\n self.rank = dist.get_rank()\n self.worldSize = dist.get_world_size()\n torch.cuda.set_device(self.rank)\n self.config = config\n self.saver = getSaver(self.config.Train.SaveDir, saveName=\"saved.ckpt\", config=config.serialize(), loggerName=\"root\", reserve=False, loggingLevel=loggingLevel, disable=self.rank != 0)\n\n self.saver.info(*self._LOG_InitSummary, summary(config.serialize()))\n\n self.saver.debug(*self._DEBUG_InitRank, self.__class__.__name__, self.rank)\n\n # # Used for self.PrettyStep\n # self.lastFormatted = -1\n self._preRegistration()\n\n self._model = self._createModel()\n self._criterion = self._createCriterion()\n self._optimizer, self.optimFn = self._createOptimizer(self._model, self._criterion)\n self._scheduler, self.schdrFn = self._createScheduler(self._optimizer)\n\n self.earlyStopFlag = torch.tensor([False]).to(self.rank)\n\n self.saver.debug(*self._DEBUG_Inited, self.__class__.__name__)\n\n\n def save(self, path = None):\n self.saver.save(path, trainer=self, config=self.config.serialize())\n\n def done(self):\n self.saver.debug(summary(self.config.serialize()))\n self.saver.info(*self._LOG_Bye)\n\n def resume(self, path):\n self.saver.info(*self._LOG_FindCkpt, path)\n self.restoreStates(path)\n self.saver.info(*self._LOG_ResumeAtEpoch, self._epoch)\n\n def restoreStates(self, path: StrPath):\n self.saver.debug(*self._DEBUG_RestoreStateStart, path)\n self.saver.load(path, \"cpu\", logger=self.saver, trainer=self)\n self.saver.debug(*self._DEBUG_RestoreStateFinish)\n self.resetOptimizer()\n self.resetScheduler(*self._scheduler.last_epoch)\n\n def resetOptimizer(self):\n del self._optimizer\n self._optimizer = self.optimFn(self._model.parameters(), **self.config.Train.Optim.Params)\n\n for group in self._optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n\n self.saver.debug(self._DEBUG_OptimizerReset)\n\n def resetScheduler(self, lastEpoch=-1):\n del self._scheduler\n self._scheduler = self.schdrFn(self._optimizer, last_epoch=lastEpoch, **self.config.Train.Schdr.Params)\n self.saver.debug(self._DEBUG_LRSchdrReset)\n\n def train(self):\n beforeRunHook, afterRunHook, stepStartHook, stepFinishHook, epochStartHook, epochFinishHook = self._createHooks(self.config, self.saver, self._model, self._criterion)\n\n scaler = GradScaler()\n\n datasets = self._createDatasets()\n\n # The DistributedReadingService is too slow since it use only one worker per node.\n # NOTE: cancel the comment once if the above issue is fixed.\n # with DataLoader(datasets[\"trainSet\"].DataPipe, reading_service=DistributedReadingService()) as trainLoader:\n with DataLoader2(datasets[\"trainSet\"].DataPipe, reading_service=MultiProcessingReadingService(num_workers=min(int(math.sqrt(datasets[\"trainSet\"].BatchSize)), 16), pin_memory=True, persistent_workers=True)) as trainLoader:\n\n self._beforeRun(beforeRunHook, **datasets)\n\n batchesOneEpoch = math.ceil(len(datasets[\"trainSet\"]) / (datasets[\"trainSet\"].BatchSize * self.worldSize))\n totalBatches = batchesOneEpoch * self.config.Train.Epoch\n\n self._model.train()\n\n # A forever dataLoader\n for targets, images in trainLoader:\n if self._step % batchesOneEpoch == 0:\n self._epochStart(epochStartHook, **datasets)\n\n with autocast():\n\n # Main loop\n # Any dict used as args for model, criterion\n otherArgs = self._stepStart(stepStartHook, inputs=(targets, images))\n\n # A dict as keyword arguments for criterion\n outputs = self._model(images.to(self.rank, non_blocking=True, memory_format=torch.channels_last), **otherArgs)\n\n # loss: A scalar, stats: A dict as keyword arguments for logging\n loss, stats = self._criterion(**outputs, y=targets.to(self.rank, non_blocking=True), **otherArgs)\n\n scaler.scale(loss).backward()\n scaler.step(self._optimizer)\n scaler.update()\n self._optimizer.zero_grad()\n\n\n self._stepFinish(stepFinishHook, loss=loss, stats=stats, outputs=outputs, **otherArgs)\n if self._step % batchesOneEpoch == 0:\n try:\n self._epochFinish(epochFinishHook, **datasets)\n except StopIteration:\n break\n if self._step > totalBatches:\n break\n\n self._afterRun(afterRunHook, **datasets)\n\n\n def _preRegistration(self):\n otherPythonFiles = self.config.Train.ExternalLib\n for pyFile in otherPythonFiles:\n filePath = pathlib.Path(pyFile).absolute()\n # md5 of abs file path as module name\n moduleName = hashlib.md5(str(filePath).encode()).hexdigest()\n spec = importlib.util.spec_from_file_location(moduleName, pyFile)\n if spec is None:\n continue\n module = importlib.util.module_from_spec(spec)\n sys.modules[moduleName] = module\n spec.loader.exec_module(module)\n\n for reg in modfire.utils.registry.__all__:\n registry = getattr(modfire.utils.registry, reg)\n if issubclass(registry, Registry):\n self.saver.debug(*self._DEBUG_RegistrySummary, registry, registry.summary())\n\n @staticmethod\n def _createHooks(config: Config, saver: Saver, model, criterion):\n allHooks = getAllHooks(config.Train.Hooks)\n\n modelHooks = splitHooks(*[m.module if isinstance(m, DistributedDataParallel) else m for m in [model, criterion]])\n\n # allHooks = dict()\n for key in modelHooks.keys():\n allHooks[str(key)] = ChainHook(allHooks[str(key)], modelHooks[key])\n\n\n beforeRunHook, afterRunHook, stepStartHook, stepFinishHook, epochStartHook, epochFinishHook = allHooks[\"beforeRunHook\"], allHooks[\"afterRunHook\"], allHooks[\"stepStartHook\"], allHooks[\"stepFinishHook\"], allHooks[\"epochStartHook\"], allHooks[\"epochFinishHook\"]\n\n\n beforeRunHook = checkHook(beforeRunHook, \"BeforeRunHook\", saver)\n afterRunHook = checkHook(afterRunHook, \"AfterRunHook\", saver)\n stepStartHook = checkHook(stepStartHook, \"StepStartHook\", saver)\n stepFinishHook = checkHook(stepFinishHook, \"StepFinishHook\", saver)\n epochStartHook = checkHook(epochStartHook, \"EpochStartHook\", saver)\n epochFinishHook = checkHook(epochFinishHook, \"EpochFinishHook\", saver)\n return beforeRunHook, afterRunHook, stepStartHook, stepFinishHook, epochStartHook, epochFinishHook\n\n def _createDatasets(self) -> Dict[str, Union[TrainSplit, QuerySplit, Database]]:\n self.saver.debug(*self._DEBUG_CreateDatasetStart, self.config.Train.TrainSet.Key, self.config.Train.TrainSet.Pipeline.Key or \"default\")\n try:\n trainPipeline = trackingFunctionCalls(DataPipeRegistry.get(self.config.Train.TrainSet.Pipeline.Key), self.saver)(**self.config.Train.TrainSet.Pipeline.Params)\n except KeyError:\n trainPipeline = None\n trainSet = trackingFunctionCalls(DatasetRegistry.get(self.config.Train.TrainSet.Key), self.saver)(**self.config.Train.TrainSet.Params, pipeline=trainPipeline)\n self.saver.debug(*self._DEBUG_CreateDatasetFinish, trainSet)\n return {\n \"trainSet\": trainSet.Split\n }\n\n def _createModel(self) -> DistributedDataParallel:\n self.saver.debug(*self._DEBUG_CreateModelStart)\n modelFn = trackingFunctionCalls(ModelRegistry.get(self.config.Model.Key), self.saver)\n\n model = modelFn(**self.config.Model.Params)\n\n # EMA model for evaluation\n # deepcopy can't handle faiss objects. reject.\n # adjust = worldSize * config.Train.TrainSet.Params[\"batchSize\"] * config.Train.ModelEMASteps / config.Train.Epoch\n # alpha = 1.0 - config.Train.ModelEMADecay\n # alpha = min(1.0, alpha * adjust)\n # modelEMA = ExponentialMovingAverage(model, device=rank, decay=1.0 - alpha)\n # EMA model for evaluation\n\n model = DistributedDataParallel(model.to(memory_format=torch.channels_last).to(self.rank), device_ids=[self.rank], output_device=self.rank, find_unused_parameters=False)\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n self.saver.debug(*self._DEBUG_CreateModelFinish, totalParameters(model))\n return model\n\n def _createCriterion(self) -> DistributedDataParallel:\n self.saver.debug(*self._DEBUG_CreateCriterionStart)\n criterionFn = trackingFunctionCalls(CriterionRegistry.get(self.config.Train.Criterion.Key), self.saver)\n criterion = criterionFn(**self.config.Train.Criterion.Params).to(self.rank)\n if any((p.requires_grad for p in criterion.parameters())):\n criterion = DistributedDataParallel(criterion, device_ids=[self.rank], output_device=self.rank, find_unused_parameters=False)\n criterion = torch.nn.SyncBatchNorm.convert_sync_batchnorm(criterion)\n self.saver.debug(*self._DEBUG_CreateCriterionFinish, totalParameters(criterion))\n return criterion\n\n def _createOptimizer(self, model: DistributedDataParallel, criterion: DistributedDataParallel) -> Tuple[torch.optim.Optimizer, Callable[..., torch.optim.Optimizer]]:\n self.saver.debug(*self._DEBUG_CreateOptimizerStart)\n if \"lr\" in self.config.Train.Optim.Params and \"batchSize\" in self.config.Train.TrainSet.Params:\n batchSize = self.config.Train.TrainSet.Params[\"batchSize\"] * self.worldSize\n exponent = math.log2(batchSize)\n scale = 3 - exponent / 2\n optimCfg = deepcopy(self.config.Train.Optim)\n optimCfg.Params[\"lr\"] /= (2 ** scale)\n else:\n optimCfg = self.config.Train.Optim\n optimFn = trackingFunctionCalls(OptimRegistry.get(optimCfg.Key), self.saver)\n\n # remove weight_decay in any norm layers\n paramGroup = setWeightDecay(model, optimCfg.Params[\"weight_decay\"], 0.0) +\\\n setWeightDecay(criterion, optimCfg.Params[\"weight_decay\"], 0.0)\n\n optimizer = optimFn(paramGroup, **optimCfg.Params)\n self.saver.debug(*self._DEBUG_CreateOptimizerFinish)\n return optimizer, optimFn\n\n def _createScheduler(self, optimizer: torch.optim.Optimizer) -> Tuple[torch.optim.lr_scheduler._LRScheduler, Callable[..., torch.optim.lr_scheduler._LRScheduler]]:\n self.saver.debug(*self._DEBUG_CreateSchdrStart)\n schdrFn = trackingFunctionCalls(SchdrRegistry.get(self.config.Train.Schdr.Key), self.saver)\n scheduler = schdrFn(optimizer, **self.config.Train.Schdr.Params)\n self.saver.debug(*self._DEBUG_CreateSchdrFinish)\n return scheduler, schdrFn\n\n def _beforeRun(self, hook, *args, **kwArgs):\n self.saver.debug(*self._DEBUG_CallBeforRunStart)\n self.saver.info(*self._LOG_StartTraining)\n\n hook(self._step, self._epoch, self, *args, logger=self.saver, **kwArgs)\n\n self.saver.info(*self._LOG_RevealTensorboard, self.saver.TensorboardURL)\n self.saver.debug(*self._DEBUG_CallBeforRunFinish)\n\n def _afterRun(self, hook, *args, **kwArgs):\n self.saver.debug(*self._DEBUG_CallAfterRunStart)\n self.saver.debug(*self._DEBUG_TrainLoopFinish)\n hook(self._step, self._epoch, self, *args, logger=self.saver, **kwArgs)\n self.saver.debug(*self._DEBUG_CallAfterRunFinish)\n\n def _stepStart(self, hook, *args, **kwArgs) -> Dict[str, Any]:\n return hook(self._step, self._epoch, self, *args, logger=self.saver, **kwArgs) or dict()\n\n def _stepFinish(self, hook, *args, loss, **kwArgs):\n self._step += 1\n\n # Update AveragedModel\n # if self._step % self.config.Train.ModelEMASteps == 0:\n # self._modelEMA.update_parameters(self._model)\n # if \"warmupEpochs\" in self.config.Train.Schdr.Params and self._epoch < self.config.Train.Schdr.Params[\"warmupEpochs\"]:\n # # Reset ema buffer to keep copying weights during warmup period\n # self._modelEMA.n_averaged.fill_(0)\n\n hook(self._step, self._epoch, self, *args, logger=self.saver, loss=loss, **kwArgs)\n\n def _epochStart(self, hook, *args, **kwArgs):\n self.saver.debug(*self._DEBUG_CallEpochBeginStart, self._epoch + 1)\n\n gc.collect()\n gc.collect()\n hook(self._step, self._epoch, self, *args, logger=self.saver, **kwArgs)\n\n self.saver.debug(*self._DEBUG_CallEpochBeginFinish)\n\n def _epochFinish(self, hook, *args, **kwArgs):\n self._epoch += 1\n\n self.saver.debug(*self._DEBUG_CallEpochEndStart, self._epoch)\n\n\n dist.broadcast(self.earlyStopFlag, 0)\n if self.earlyStopFlag:\n self.saver.info(*self._LOG_EarlyStop, self._epoch)\n raise StopIteration\n\n self._scheduler.step()\n self._model.module.step()\n self.saver.debug(*self._DEBUG_ShowSchdrUpdate, self._scheduler.get_last_lr()[0])\n self.saver.debug(*self._DEBUG_ShowTemperatureUpdate, self._model.module.Temperature)\n\n hook(self._step, self._epoch, self, *args, logger=self.saver, **kwArgs)\n self.saver.debug(*self._DEBUG_CallEpochEndFinish)\n\n\n\nclass MainTrainer(PalTrainer, SafeTerminate):\n def __init__(self, config: Config, loggingLevel: int):\n PalTrainer.__init__(self, config, loggingLevel)\n SafeTerminate.__init__(self, self.saver)\n # Running depedencies\n self.progress = getRichProgress().__enter__()\n self.epochBar = self.progress.add_task(\"[----/----]\", start=False, progress=\"\", suffix=Consts.CDot * 10)\n self.trainingBar = self.progress.add_task(\"\", start=False, progress=\"[----/----]\", suffix=Consts.CDot * 10)\n\n self.validator = Validator(self.config.Train.NumReturns)\n\n self.diffTracker = EMATracker((), 0.99).cuda()\n\n # Logging and saving\n self.bestmAP = -1\n self.earlyStopCount = 0\n\n def onTerminate(self, signum, frame):\n self.saver.critical(*self._CRITICAL_ProcessInterrupt)\n self.saver.critical(*self._CRITICAL_TimeoutInfo, Consts.TimeOut)\n self.progress.__exit__(None, None, None)\n self.save(os.path.join(self.saver.SaveDir, \"last.ckpt\"))\n self.saver.critical(*self._CRITICAL_ShutdownMessage, relativePath(os.path.join(self.saver.SaveDir, \"last.ckpt\")))\n self.summary()\n\n def summary(self):\n if self.bestmAP < 0:\n self.saver.info(*self._LOG_SummaryInfoNomAP, self._epoch, self._step)\n else:\n self.saver.info(*self._LOG_SummaryInfo, self._epoch, self._step, self.bestmAP * 100)\n self.saver.info(*self._LOG_SummarySaveCkpt, relativePath(os.path.join(self.saver.SaveDir, \"[ONE_OF_A].ckpt\")))\n\n def _beforeRun(self, hook, *args, **kwArgs):\n self.progress.start_task(self.trainingBar)\n self.progress.start_task(self.epochBar)\n super()._beforeRun(hook, *args, **kwArgs)\n\n def _afterRun(self, hook, *args, **kwArgs):\n super()._afterRun(hook, *args, **kwArgs)\n self.progress.__exit__(None, None, None)\n self.summary()\n\n def _stepFinish(self, hook, *args, loss, stats, **kwArgs):\n super()._stepFinish(hook, *args, loss=loss, **kwArgs)\n\n moment = self.diffTracker(loss)\n\n task = self.progress.get_task(self.trainingBar)\n self.progress.update(self.trainingBar, advance=1, progress=f\"[{task.completed + 1:4d}/{task.total:4d}]\", suffix=f\"L = [b green]{moment:2.2f}[/]\")\n self.progress.update(self.epochBar, advance=1)\n\n if self._step % 10 != 0:\n return\n\n for key, value in stats.items():\n if isinstance(value, numbers.Number):\n self.saver.add_scalar(f\"Stat/{key}\", value, global_step=self._step)\n else:\n if value.numel() == 1:\n self.saver.add_scalar(f\"Stat/{key}\", value, global_step=self._step)\n elif len(value.shape) == 4:\n self.saver.add_images(f\"Stat/{key}\", value, global_step=self._step)\n elif len(value.shape) == 3:\n self.saver.add_image(f\"Stat/{key}\", value, global_step=self._step)\n else:\n self.saver.add_histogram(f\"Stat/{key}\", value, global_step=self._step)\n self.saver.add_scalar(\"Stat/Loss\", loss, global_step=self._step)\n self.saver.add_scalar(\"Stat/Lr\", self._scheduler.get_last_lr()[0], global_step=self._step)\n\n def _epochStart(self, hook, *args, trainSet: TrainSplit, **kwArgs):\n totalBatches = math.ceil(len(trainSet) / (trainSet.BatchSize * self.worldSize))\n self.progress.update(self.trainingBar, total=totalBatches)\n self.progress.update(self.epochBar, total=self.config.Train.Epoch * totalBatches, completed=self._step, description=f\"[{self._epoch + 1:4d}/{self.config.Train.Epoch:4d}]\")\n\n self.progress.reset(self.trainingBar)\n super()._epochStart(hook, *args, trainSet=trainSet, **kwArgs)\n\n def _createHooks(self, config: Config, saver: Saver, model, criterion):\n beforeRunHook, afterRunHook, stepStartHook, stepFinishHook, epochStartHook, epochFinishHook = super()._createHooks(config, saver, model, criterion)\n\n saver.debug(\"Add additional hooks in `MainTrainer`.\")\n\n epochFinishHook = checkHook(ChainHook(\n EpochFrequencyHook(\n (1, self.log), logger=saver\n ),\n EpochFrequencyHook(\n (config.Train.ValFreq, self.validate), logger=saver\n ), epochFinishHook), \"EpochFinishHook\", saver)\n\n beforeRunHook = checkHook(ChainHook(\n self.validate\n , beforeRunHook), \"BeforeRunHook\", saver)\n\n return beforeRunHook, afterRunHook, stepStartHook, stepFinishHook, epochStartHook, epochFinishHook\n\n def _createDatasets(self) -> Dict[str, Union[TrainSplit, QuerySplit, Database]]:\n self.saver.debug(*self._DEBUG_CreateDatasetStart, self.config.Train.TrainSet.Key, self.config.Train.TrainSet.Pipeline.Key or \"default\")\n try:\n trainPipeline = trackingFunctionCalls(DataPipeRegistry.get(self.config.Train.TrainSet.Pipeline.Key), self.saver)(**self.config.Train.TrainSet.Pipeline.Params)\n except KeyError:\n trainPipeline = None\n trainSet = trackingFunctionCalls(DatasetRegistry.get(self.config.Train.TrainSet.Key), self.saver)(**self.config.Train.TrainSet.Params, pipeline=trainPipeline)\n\n\n self.saver.debug(*self._DEBUG_CreateQuerysetStart, self.config.Train.QuerySet.Key, self.config.Train.QuerySet.Pipeline.Key or \"default\")\n try:\n queryPipeline = trackingFunctionCalls(DataPipeRegistry.get(self.config.Train.QuerySet.Pipeline.Key), self.saver)(**self.config.Train.QuerySet.Pipeline.Params)\n except KeyError:\n queryPipeline = None\n querySet = trackingFunctionCalls(DatasetRegistry.get(self.config.Train.QuerySet.Key), self.saver)(**self.config.Train.QuerySet.Params, pipeline=queryPipeline)\n\n\n self.saver.debug(*self._DEBUG_CreateDatabaseStart, self.config.Train.Database.Key, self.config.Train.Database.Key or \"default\")\n try:\n databasePipeline = trackingFunctionCalls(DataPipeRegistry.get(self.config.Train.Database.Pipeline.Key), self.saver)(**self.config.Train.Database.Pipeline.Params)\n except KeyError:\n databasePipeline = None\n database = trackingFunctionCalls(DatasetRegistry.get(self.config.Train.Database.Key), self.saver)(**self.config.Train.Database.Params, pipeline=databasePipeline)\n\n\n\n self.saver.debug(*self._DEBUG_CreateDatasetMainFinish, trainSet, database, querySet)\n return {\n \"trainSet\": trainSet.Split,\n \"database\": database.Split,\n \"querySet\": querySet.Split\n }\n\n def log(self, *_, **__):\n self.saver.add_scalar(\"Stat/Epoch\", self._epoch, self._step)\n # self.saver.add_images(\"Train/Raw\", tensorToImage(images), global_step=self._step)\n\n def validate(self, *_, database: Database, querySet: QuerySplit, **__):\n torch.cuda.empty_cache()\n\n self.saver.debug(*self._DEBUG_Validation, self._epoch)\n\n results, summary = self.validator.validate(self._model.module, database, querySet, self.progress)\n\n for metricModule in metrics.__all__:\n if metricModule != \"Visualization\":\n # [mAP, Precision, Recall]\n self.saver.add_scalar(f\"Eval/{metricModule}@{self.validator.numReturns}\", results[metricModule], global_step=self._step)\n # self.saver.add_images(f\"Eval/Visualization\", results[\"Visualization\"], global_step=self._step)\n\n self.save()\n\n self.saver.info(\"%s\", summary)\n\n mAP = results[\"mAP\"]\n if mAP > self.bestmAP:\n self.bestmAP = mAP\n self.progress.update(self.epochBar, suffix=f\"H = [b red]{self.bestmAP * 100:2.2f}[/]%\")\n shutil.copy2(self.saver.SavePath, os.path.join(self.saver.SaveDir, \"best.ckpt\"))\n self.earlyStopCount = 0\n else:\n self.earlyStopCount += 1\n self.saver.debug(*self._DEBUG_FindOneEarlyStop, self.earlyStopCount, self.config.Train.EarlyStop)\n if self.earlyStopCount >= self.config.Train.EarlyStop:\n self.earlyStop()\n\n self.saver.debug(*self._DEBUG_EndValidation, self._epoch)\n self._model.train()\n\n def earlyStop(self):\n self.earlyStopFlag.data.copy_(torch.tensor([True]))\n\n\n@TrainerBuilder.register(\"BaseTrainer\")\ndef getTrainer(rank: int, config: Config, loggingLevel: int):\n if rank == 0:\n return MainTrainer(config, loggingLevel)\n return PalTrainer(config, loggingLevel)\n", "repo_name": "VL-Group/modfire", "sub_path": "modfire/train/trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 27429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "50", "api": [{"api_name": "modfire.utils.registry.utils", "line_number": 40, "usage_type": "attribute"}, {"api_name": "modfire.utils.registry", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.PrettyStep", "line_number": 53, "usage_type": "call"}, {"api_name": "vlutils.base.Restorable", "line_number": 113, "usage_type": "name"}, {"api_name": "modfire.config.Config", "line_number": 114, "usage_type": "name"}, {"api_name": "vlutils.base.Restorable.__init__", "line_number": 115, "usage_type": "call"}, {"api_name": "vlutils.base.Restorable", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.distributed.get_rank", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.distributed.get_world_size", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.cuda.set_device", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 122, "usage_type": "attribute"}, {"api_name": "utils.getSaver", "line_number": 124, "usage_type": "call"}, {"api_name": "vlutils.config.summary", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 139, "usage_type": "call"}, {"api_name": "vlutils.config.summary", "line_number": 148, "usage_type": "call"}, {"api_name": "modfire.utils.StrPath", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.cuda.amp.grad_scaler.GradScaler", "line_number": 180, "usage_type": "call"}, {"api_name": "torchdata.dataloader2.DataLoader2", "line_number": 187, "usage_type": "call"}, {"api_name": "torchdata.dataloader2.MultiProcessingReadingService", "line_number": 187, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 187, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.cuda.amp.autocast_mode.autocast", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.channels_last", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 234, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 236, "usage_type": "call"}, {"api_name": "importlib.util.util.spec_from_file_location", "line_number": 237, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 237, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 237, "usage_type": "name"}, {"api_name": "importlib.util.util.module_from_spec", "line_number": 240, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 240, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 240, "usage_type": "name"}, {"api_name": "sys.modules", "line_number": 241, "usage_type": "attribute"}, {"api_name": "modfire.utils.registry.utils", "line_number": 244, "usage_type": "attribute"}, {"api_name": "modfire.utils.registry", "line_number": 244, "usage_type": "name"}, {"api_name": "modfire.utils.registry.utils", "line_number": 245, "usage_type": "attribute"}, {"api_name": "modfire.utils.registry", "line_number": 245, "usage_type": "name"}, {"api_name": "vlutils.base.Registry", "line_number": 246, "usage_type": "argument"}, {"api_name": "modfire.config.Config", "line_number": 250, "usage_type": "name"}, {"api_name": "vlutils.saver.Saver", "line_number": 250, "usage_type": "name"}, {"api_name": "modfire.train.hooks.getAllHooks", "line_number": 251, "usage_type": "call"}, {"api_name": "hooks.splitHooks", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 253, "usage_type": "argument"}, {"api_name": "vlutils.base.freqHook.ChainHook", "line_number": 257, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 263, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 264, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 265, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 266, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 267, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 268, "usage_type": "call"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 274, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry.get", "line_number": 274, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry", "line_number": 274, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 277, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry.get", "line_number": 277, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry", "line_number": 277, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 271, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 271, "usage_type": "name"}, {"api_name": "modfire.dataset.TrainSplit", "line_number": 271, "usage_type": "name"}, {"api_name": "modfire.dataset.QuerySplit", "line_number": 271, "usage_type": "name"}, {"api_name": "modfire.dataset.Database", "line_number": 271, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 285, "usage_type": "call"}, {"api_name": "modfire.utils.registry.ModelRegistry.get", "line_number": 285, "usage_type": "call"}, {"api_name": "modfire.utils.registry.ModelRegistry", "line_number": 285, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.channels_last", "line_number": 297, "usage_type": "attribute"}, {"api_name": "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 298, "usage_type": "attribute"}, {"api_name": "modfire.utils.totalParameters", "line_number": 299, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 283, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 304, "usage_type": "call"}, {"api_name": "modfire.utils.registry.CriterionRegistry.get", "line_number": 304, "usage_type": "call"}, {"api_name": "modfire.utils.registry.CriterionRegistry", "line_number": 304, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "line_number": 308, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 308, "usage_type": "attribute"}, {"api_name": "modfire.utils.totalParameters", "line_number": 309, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 302, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 312, "usage_type": "name"}, {"api_name": "math.log2", "line_number": 316, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 318, "usage_type": "call"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 322, "usage_type": "call"}, {"api_name": "modfire.utils.registry.OptimRegistry.get", "line_number": 322, "usage_type": "call"}, {"api_name": "modfire.utils.registry.OptimRegistry", "line_number": 322, "usage_type": "name"}, {"api_name": "utils.setWeightDecay", "line_number": 325, "usage_type": "call"}, {"api_name": "utils.setWeightDecay", "line_number": 326, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 312, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 312, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 312, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 332, "usage_type": "attribute"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 334, "usage_type": "call"}, {"api_name": "modfire.utils.registry.SchdrRegistry.get", "line_number": 334, "usage_type": "call"}, {"api_name": "modfire.utils.registry.SchdrRegistry", "line_number": 334, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 332, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 332, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 354, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 354, "usage_type": "name"}, {"api_name": "gc.collect", "line_number": 372, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.distributed.broadcast", "line_number": 384, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 384, "usage_type": "name"}, {"api_name": "modfire.utils.SafeTerminate", "line_number": 399, "usage_type": "name"}, {"api_name": "modfire.config.Config", "line_number": 400, "usage_type": "name"}, {"api_name": "modfire.utils.SafeTerminate.__init__", "line_number": 402, "usage_type": "call"}, {"api_name": "modfire.utils.SafeTerminate", "line_number": 402, "usage_type": "name"}, {"api_name": "modfire.utils.getRichProgress", "line_number": 404, "usage_type": "call"}, {"api_name": "modfire.Consts.CDot", "line_number": 405, "usage_type": "attribute"}, {"api_name": "modfire.Consts", "line_number": 405, "usage_type": "name"}, {"api_name": "modfire.Consts.CDot", "line_number": 406, "usage_type": "attribute"}, {"api_name": "modfire.Consts", "line_number": 406, "usage_type": "name"}, {"api_name": "modfire.validate.Validator", "line_number": 408, "usage_type": "call"}, {"api_name": "utils.EMATracker", "line_number": 410, "usage_type": "call"}, {"api_name": "modfire.Consts.TimeOut", "line_number": 418, "usage_type": "attribute"}, {"api_name": "modfire.Consts", "line_number": 418, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 420, "usage_type": "call"}, {"api_name": "os.path", "line_number": 420, "usage_type": "attribute"}, {"api_name": "vlutils.runtime.relativePath", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path", "line_number": 421, "usage_type": "attribute"}, {"api_name": "vlutils.runtime.relativePath", "line_number": 429, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 429, "usage_type": "call"}, {"api_name": "os.path", "line_number": 429, "usage_type": "attribute"}, {"api_name": "numbers.Number", "line_number": 454, "usage_type": "attribute"}, {"api_name": "modfire.dataset.TrainSplit", "line_number": 468, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 469, "usage_type": "call"}, {"api_name": "modfire.config.Config", "line_number": 476, "usage_type": "name"}, {"api_name": "vlutils.saver.Saver", "line_number": 476, "usage_type": "name"}, {"api_name": "hooks.checkHook", "line_number": 481, "usage_type": "call"}, {"api_name": "vlutils.base.freqHook.ChainHook", "line_number": 481, "usage_type": "call"}, {"api_name": "hooks.EpochFrequencyHook", "line_number": 482, "usage_type": "call"}, {"api_name": "hooks.EpochFrequencyHook", "line_number": 485, "usage_type": "call"}, {"api_name": "hooks.checkHook", "line_number": 489, "usage_type": "call"}, {"api_name": "vlutils.base.freqHook.ChainHook", "line_number": 489, "usage_type": "call"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 498, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry.get", "line_number": 498, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry", "line_number": 498, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 501, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry.get", "line_number": 501, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry", "line_number": 501, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 506, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry.get", "line_number": 506, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry", "line_number": 506, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 509, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry.get", "line_number": 509, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry", "line_number": 509, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 514, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry.get", "line_number": 514, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DataPipeRegistry", "line_number": 514, "usage_type": "name"}, {"api_name": "vlutils.logger.trackingFunctionCalls", "line_number": 517, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry.get", "line_number": 517, "usage_type": "call"}, {"api_name": "modfire.utils.registry.DatasetRegistry", "line_number": 517, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 495, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 495, "usage_type": "name"}, {"api_name": "modfire.dataset.TrainSplit", "line_number": 495, "usage_type": "name"}, {"api_name": "modfire.dataset.QuerySplit", "line_number": 495, "usage_type": "name"}, {"api_name": "modfire.dataset.Database", "line_number": 495, "usage_type": "name"}, {"api_name": "modfire.dataset.Database", "line_number": 532, "usage_type": "name"}, {"api_name": "modfire.dataset.QuerySplit", "line_number": 532, "usage_type": "name"}, {"api_name": "torch.cuda.empty_cache", "line_number": 533, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 533, "usage_type": "attribute"}, {"api_name": "vlutils.config.summary", "line_number": 537, "usage_type": "name"}, {"api_name": "modfire.validate.metrics.__all__", "line_number": 539, "usage_type": "attribute"}, {"api_name": "modfire.validate.metrics", "line_number": 539, "usage_type": "name"}, {"api_name": "vlutils.config.summary", "line_number": 547, "usage_type": "argument"}, {"api_name": "shutil.copy2", "line_number": 553, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 553, "usage_type": "call"}, {"api_name": "os.path", "line_number": 553, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 565, "usage_type": "call"}, {"api_name": "modfire.config.Config", "line_number": 569, "usage_type": "name"}]} +{"seq_id": "11793684984", "text": "import requests\nfrom threading import Timer\nimport logging\nimport time\nimport json\nfrom six.moves import input\nfrom glob import glob\nimport livejson\nfrom moviepy.editor import ImageSequenceClip\nimport os\n\n\"\"\"\ncams.json Docs\n\n\nEach entry consists of a URL, Detetion Type , and finally a FPS (For end of day storage video)\n\nValid entries for type are as follows.\n\n\nmax-age which uses the max-age argument of the Cache-Control tag in the header to wait.\netag which uses the ETag argument in the header to decide if it is needed to download.\nmax-age-etag uses both ETag and max-age methods. (For those stuborn URLS)\n\nExample\n\n{'cam1':{'url':'http://example.com','type':'max-age','fps':10}}\n\n\"\"\"\n\nLogLevel = 20\n\n\nclass MaxAgeDownload(object):\n def __init__(self, url,name):\n logging.getLogger(name).setLevel(LogLevel)\n self._timer = None\n self.url = url\n self.is_running = False\n self.name = name\n self.maxage = 1\n self._timer = Timer(self.maxage, self._run)\n self._timer.setName(self.name)\n\n def _run(self):\n self.is_running = False\n rep = requests.get(self.url,stream=True)\n logging.getLogger(self.name).info(\"Downloading new Image.\")\n self.maxage = int(rep.headers[\"Cache-Control\"].split(\"max-age=\")[1].split(\",\")[0]) #Get the max-age using some hacky split strings\n self.start()\n img = open(self.name+\"/\"+str(int(time.time()))+\".jpg\",\"wb+\")\n img.write(rep.raw.read())\n img.close()\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.maxage, self._run)\n self._timer.setName(self.name)\n self._timer.start()\n self.is_running = True\n \n def stop(self):\n self._timer.cancel()\n self.is_running = False\n \nclass ETagDownload(object):\n def __init__(self, url,name):\n logging.getLogger(name).setLevel(LogLevel)\n self._timer = None\n self.url = url\n self.is_running = False\n self.name = name\n self.ETag = \"\"\n self._timer = Timer(1, self._run)\n self._timer.setName(self.name)\n\n def _run(self):\n self.is_running = False\n rep = requests.get(self.url,stream=True,headers={\"If-None-Match\":self.ETag})\n \n if rep.status_code == 200:\n logging.getLogger(self.name).info(\"Downloading new Image.\")\n self.ETag = rep.headers[\"ETag\"]\n img = open(self.name+\"/\"+str(int(time.time()))+\".jpg\",\"wb+\")\n img.write(rep.raw.read())\n img.close()\n self.start()\n\n def start(self):\n if not self.is_running:\n self.is_running = True\n self._timer = Timer(60, self._run)\n self._timer.setName(self.name)\n self._timer.start()\n \n \n def stop(self):\n self._timer.cancel()\n self.is_running = False \n\nclass MaxAgeETagDownload(object):\n def __init__(self, url,name):\n logging.getLogger(name).setLevel(LogLevel)\n self._timer = None\n self.url = url\n self.is_running = False\n self.name = name\n self.etag = \"\"\n self.maxage = 1\n self._timer = Timer(self.maxage, self._run)\n self._timer.setName(self.name)\n\n def _run(self):\n self.is_running = False\n rep = requests.get(self.url,stream=True)\n logging.getLogger(self.name).info(\"Downloading new Image.\")\n self.maxage = int(rep.headers[\"Cache-Control\"].split(\"max-age=\")[1].split(\",\")[0]) #Get the max-age using some hacky split strings\n self.start()\n if not self.etag == rep.headers[\"ETag\"]:\n img = open(self.name+\"/\"+str(int(time.time()))+\".jpg\",\"wb+\")\n img.write(rep.raw.read())\n img.close()\n self.etag = rep.headers[\"ETag\"]\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.maxage, self._run)\n self._timer.setName(self.name)\n self._timer.start()\n self.is_running = True\n \n def stop(self):\n self._timer.cancel()\n self.is_running = False\n \n\n \ndef main():\n logging.basicConfig(format = '%(asctime)s [%(name)s] [%(levelname)s] %(message)s')\n log = logging.getLogger(\"TimeLapse\")\n log.setLevel(LogLevel)\n log.info(\"TimeLapse Starting!\")\n \n #Read in the Cameras!\n camfile = open(\"cams.json\")\n cameras = json.load(camfile)\n camfile.close()\n \n for x in cameras.keys():\n try:\n os.mkdir(x)\n os.mkdir(x+\"-mp4\")\n except Exception as e:\n log.error(\"Could not create folder \"+x+\"!\")\n log.error(\"Exception: \"+str(e))\n log.info(\"Added Camera \"+x)\n if cameras[x][\"type\"] == \"etag\":\n cameras[x][\"instance\"] = ETagDownload(cameras[x][\"url\"],x)\n if cameras[x][\"type\"] == \"max-age\":\n cameras[x][\"instance\"] = MaxAgeDownload(cameras[x][\"url\"],x) \n if cameras[x][\"type\"] == \"max-age-etag\":\n cameras[x][\"instance\"] = MaxAgeETagDownload(cameras[x][\"url\"],x)\n cameras[x][\"instance\"].start()\n \n while True:\n command = input(\">\")\n if command == \"list-cams\":\n for x in cameras.keys():\n log.info(x +\": Running? \"+str(cameras[x][\"instance\"].is_running))\n elif command.startswith(\"stopcam\"):\n cam = command.split(\" \")[1]\n if not cam in cameras.keys():\n log.error(\"Invalid Camera name\")\n else:\n log.info(\"Camera \"+cam+\" stopped!\")\n cameras[cam][\"instance\"].stop()\n \n elif command.startswith(\"startcam\"):\n cam = command.split(\" \")[1]\n if not cam in cameras.keys():\n log.error(\"Invalid Camera name\")\n else:\n log.info(\"Camera \"+cam+\" started!\")\n cameras[cam][\"instance\"].start()\n elif command.startswith(\"save\"):\n for x in cameras.keys():\n cameras[x][\"instance\"].stop()\n log.info(\"All Camers Stopped!\") \n for x in cameras.keys():\n log.info(\"Saving \"+x)\n data = livejson.File(x+\"-mp4/cat.json\")\n t = str(int(time.time()))\n q = ImageSequenceClip(x,fps=cameras[x][\"fps\"])\n q.write_videofile(x+\"-mp4/\"+t+\".mp4\")\n data[t]=glob(x+\"/*.jpg\")\n for y in glob(x+\"/*.jpg\"):\n os.remove(y)\n log.info(\"All Cameras started!\")\n for x in cameras.keys():\n cameras[x][\"instance\"].start()\n elif command.startswith(\"stopcams\"):\n for x in cameras.keys():\n cameras[x][\"instance\"].stop()\n log.info(\"All Camers Stopped!\") \n elif command.startswith(\"startcams\"):\n for x in cameras.keys():\n cameras[x][\"instance\"].start()\n log.info(\"All Camers Stopped!\") \n elif command == \"stop\":\n log.info(\"Stopping all cameras and halting!\")\n for x in cameras.keys():\n cameras[x][\"instance\"].stop()\n break\n elif command == \"help\": \n log.info(\"help: Shows this.\")\n log.info(\"list-cams: Lists the current cameras.\")\n log.info(\"stopcam: Stops a cameras recording.\")\n log.info(\"startcam: Starts a camera recording.\")\n log.info(\"stop: Stops this program.\")\n \n else:\n log.error(\"Command not found! \" +command)\n \n \nif __name__ == \"__main__\":\n main()", "repo_name": "wgaylord/TimeLapseChicago", "sub_path": "timelapse.py", "file_name": "timelapse.py", "file_ext": "py", "file_size_in_byte": 7691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 36, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 57, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 68, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 74, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 82, "usage_type": "call"}, {"api_name": "time.time", "line_number": 84, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 103, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 110, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 116, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}, {"api_name": "threading.Timer", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 140, "usage_type": "call"}, {"api_name": "json.load", "line_number": 146, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 151, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 152, "usage_type": "call"}, {"api_name": "six.moves.input", "line_number": 166, "usage_type": "call"}, {"api_name": "livejson.File", "line_number": 191, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "moviepy.editor.ImageSequenceClip", "line_number": 193, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 195, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 196, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "8841641152", "text": "import json\nimport requests\nimport time\nimport eel\neel.init(\"static\")\n\n@eel.expose\ndef send_sms(number,message):\n num=number.split(\",\")\n url=\"http://api.sparrowsms.com/v2/sms/?\"\n for element in num:\n params={\n 'token':'c6IdMv1UB0miVmGMOPgU',\n 'from':'Demo',\n 'to':element,\n 'text':message,\n }\n r=requests.get(url,params=params)\n status_code = r.status_code\n response = r.text\n response_json = r.json()\n print(f\"{status_code}, {response}, {response_json}\")\n eel.sends(str(response_json))\n eel.back()\neel.start(\"index.html\",size=(580,350))\n", "repo_name": "Gonelastvirus/Bulk_sms_sender", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "eel.init", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "eel.sends", "line_number": 23, "usage_type": "call"}, {"api_name": "eel.back", "line_number": 24, "usage_type": "call"}, {"api_name": "eel.expose", "line_number": 7, "usage_type": "attribute"}, {"api_name": "eel.start", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "37530598841", "text": "import json\nimport time\nimport os\nimport random\nimport re\n\nimport requests\nimport textwrap\nimport asyncio\nimport nest_asyncio\nimport httpx\nfrom PIL import Image, ImageDraw, ImageFont\nfrom miku.utils import FreqLimiter\nimport nonebot\nfrom miku.modules.sekaiutils import check_local_card_asset\nfrom miku.modules.sekaiutils import get_card_thb\nfrom miku.modules.sekaiutils import headers_sekaiviewer\nfrom miku.modules.sekaiutils import audio_update_assets\nfrom miku.modules.sekaiutils import audio_update_aliases\nfrom miku.modules.sekaiutils import audio_update_list\n\nheaders_pjsekai = {\n 'DNT': '1',\n 'Referer': 'https://pjsek.ai/',\n 'sec-ch-ua': '\" Not;A Brand\";v=\"99\", \"Google Chrome\";v=\"91\", \"Chromium\";v=\"91\"',\n 'sec-ch-ua-mobile': '?0',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'\n}\n\n@nonebot.scheduler.scheduled_job(\n 'cron',\n # year=None,\n # month=None,\n # day=None,\n # week=None,\n # day_of_week=None,\n hour='14',\n minute='5',\n # second=None,\n # start_date=None,\n # end_date=None,\n # timezone=None,\n)\nasync def card_resources_scheduler():\n bot = nonebot.get_bot()\n try:\n time.sleep(random.randint(1, 20))\n url = 'https://sekai-world.github.io/sekai-master-db-diff/cards.json'\n raw_data = requests.get(url, headers=headers_sekaiviewer)\n data = json.loads(raw_data.content)\n cards_list_dir = os.path.join(os.path.dirname(__file__), '../metas/cards_list.json')\n with open(cards_list_dir, 'w') as f:\n json.dump(data, f, indent=2, ensure_ascii=False)\n await bot.send_group_msg(group_id=773737472, message=f\"sekai中已有{len(list(data))}张卡牌。\\n你今天想要谁的陪伴呢?\")\n time.sleep(100 + random.randint(1, 80))\n for item in data:\n asset_name = item['assetbundleName']\n if item['cardRarityType'] in ('rarity_1', 'rarity_2', 'rarity_birthday'):\n after_should_exist = 0\n asset_exist_normal = check_local_thb_asset(asset_name, 'normal')\n else:\n after_should_exist = 1\n asset_exist_normal = check_local_thb_asset(asset_name, 'normal')\n asset_exist_after_training = check_local_thb_asset(asset_name, 'after_training')\n if not asset_exist_normal:\n get_card_thb(asset_name, 'normal')\n if after_should_exist and not asset_exist_after_training:\n get_card_thb(asset_name, 'after_training')\n await bot.send_group_msg(group_id=773737472, message=\"所有卡牌头图更新完成。\")\n except Exception as identifier:\n print(identifier)\n\n@nonebot.scheduler.scheduled_job(\n 'cron',\n # year=None,\n # month=None,\n # day=None,\n # week=None,\n day_of_week='0,2,4,6',\n hour='18',\n minute='5',\n # second=None,\n # start_date=None,\n # end_date=None,\n # timezone=None,\n)\nasync def song_resources_scheduler():\n bot = nonebot.get_bot()\n try:\n time.sleep(random.randint(1, 20))\n song_list, asset_list = audio_update_list()\n get_song_info = (f\"sekai中现在有{len(set(song_list))}首歌曲,\\n\"\n f\"有{len(asset_list)}段不一样的歌声。\\n\"\n f\"你今天想要听到谁的思念呢?\")\n await bot.send_group_msg(group_id=773737472, message=get_song_info)\n time.sleep(100 + random.randint(1, 80))\n audio_update_assets(asset_list)\n await bot.send_group_msg(group_id=773737472, message='歌曲文件更新完成')\n time.sleep(100 + random.randint(1, 80))\n index_list = audio_update_aliases()\n get_titles_info = (f\"sekai中现在有{len(index_list)}首歌曲。\\n\"\n f\"添加歌曲别名请艾特我发送:\\n\"\n f\"歌曲标题也叫歌曲别名\")\n await bot.send_group_msg(group_id=773737472, message=get_titles_info)\n except Exception as identifier:\n print(identifier)\n", "repo_name": "Phynon/mikubot", "sub_path": "miku/modules/sekaischeduler/resource_scheduler.py", "file_name": "resource_scheduler.py", "file_ext": "py", "file_size_in_byte": 4088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "nonebot.get_bot", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 49, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.headers_sekaiviewer", "line_number": 49, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.get_card_thb", "line_number": 66, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.get_card_thb", "line_number": 68, "usage_type": "call"}, {"api_name": "nonebot.scheduler.scheduled_job", "line_number": 30, "usage_type": "call"}, {"api_name": "nonebot.scheduler", "line_number": 30, "usage_type": "attribute"}, {"api_name": "nonebot.get_bot", "line_number": 88, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 90, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.audio_update_list", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 96, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.audio_update_assets", "line_number": 97, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 99, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 99, "usage_type": "call"}, {"api_name": "miku.modules.sekaiutils.audio_update_aliases", "line_number": 100, "usage_type": "call"}, {"api_name": "nonebot.scheduler.scheduled_job", "line_number": 73, "usage_type": "call"}, {"api_name": "nonebot.scheduler", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "74612214876", "text": "import pygame\nfrom pygame.sprite import Sprite\n\n\nclass Ship(Sprite):\n \"\"\"ship controls\"\"\"\n\n def __init__(self, ai_game) -> None:\n super().__init__()\n self.screen = ai_game.screen\n self.settings = ai_game.settings\n self.screen_rect = ai_game.screen.get_rect()\n\n # load image of ship\n self.image = pygame.image.load('imgs/player_ship.png')\n self.rect = self.image.get_rect()\n\n # appearance of a new spaceship\n self.rect.midbottom = self.screen_rect.midbottom\n\n # saving float part of moving\n self.x = float(self.rect.x)\n\n # move right flag\n self.moving_right = False\n\n # move left flag\n self.moving_left = False\n\n def update(self) -> None:\n \"\"\"\n renew the ship's position in case of flag\n :return: None\n \"\"\"\n if self.moving_right and self.rect.right < self.screen_rect.right:\n self.x += self.settings.ship_speed\n if self.moving_left and self.rect.left > 0:\n self.x -= self.settings.ship_speed\n\n # Update rect x coordinate\n self.rect.x = self.x\n\n def blitme(self) -> None:\n \"\"\"paint new spaceship\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self) -> None:\n \"\"\"\n place a new ship in the MIDBOTTOM\n :return: None\n \"\"\"\n self.rect.midbottom = self.screen_rect.midbottom\n self.x = float(self.rect.x)\n", "repo_name": "LikeKugi/Alien", "sub_path": "ship.py", "file_name": "ship.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 5, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}]} +{"seq_id": "8366396139", "text": "import logging\nfrom pathlib import PosixPath\nfrom typing import List, Optional, Dict\n\nfrom dockerup.github import GithubApp, GitHubApi\nfrom dockerup.models import File, Image, Update\nfrom dockerup.settings import Settings\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\nclass Updater:\n def __init__(self,\n path: PosixPath,\n settings: Settings,\n github_app: Optional[GithubApp]) -> None:\n self._path = path\n self._settings = settings\n self._github_app = github_app\n self._files: List[File] = []\n self._latest_tags: Dict[str, str] = {}\n\n def _get_latest_tag(self, image: str) -> str:\n repo = self._settings.supported_images[image]\n if repo not in self._latest_tags:\n latest_tag = GitHubApi(repo, 'main', self._github_app).get_latest_release()\n assert latest_tag is not None\n self._latest_tags[repo] = latest_tag\n return self._latest_tags[repo]\n\n def resolve_files(self) -> List[File]:\n # Search for files to update\n for file_path in self._settings.files:\n file = self._path / file_path\n if not file.is_file():\n logger.info(f'Skipping: {file}')\n continue\n\n logger.info(f'Discovered: {file}')\n with file.open('r') as fh:\n contents = fh.read()\n\n self._files.append(File(\n PosixPath(file_path),\n [\n Image(\n line.split(' ')[1].split(':')[0],\n line.split(' ')[1].split(':')[1] if ':' in line.split(' ')[1] else None,\n )\n for line in contents.splitlines()\n if line.startswith('FROM ')\n ],\n contents,\n []))\n return self._files\n\n def update_files(self) -> List[File]:\n _files: List[File] = []\n for file in self._files:\n updates = []\n for image in file.images:\n if image.image not in self._settings.supported_images:\n logger.info(f'Skipping: {image}')\n continue\n latest_tag = self._get_latest_tag(image.image)\n if image.tag != latest_tag:\n logger.info(f'Updating {image.image} from {image.tag} to {latest_tag}')\n updates.append(Update(image.image, image.tag, latest_tag))\n\n updates = list(set(updates))\n contents = file.contents\n for update in updates:\n contents = contents.replace(\n (\n f'FROM {update.image}:{update.previous_tag}'\n if update.previous_tag else\n f'FROM {update.image}'\n ),\n f'FROM {update.image}:{update.new_tag}'\n )\n _files.append(File(file.file_path, file.images, contents, updates))\n return _files\n", "repo_name": "InfraBits/dockerup", "sub_path": "dockerup/updater.py", "file_name": "updater.py", "file_ext": "py", "file_size_in_byte": 3044, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.Logger", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "pathlib.PosixPath", "line_number": 14, "usage_type": "name"}, {"api_name": "dockerup.settings.Settings", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 16, "usage_type": "name"}, {"api_name": "dockerup.github.GithubApp", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "dockerup.models.File", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 21, "usage_type": "name"}, {"api_name": "dockerup.github.GitHubApi", "line_number": 26, "usage_type": "call"}, {"api_name": "dockerup.models.File", "line_number": 43, "usage_type": "call"}, {"api_name": "pathlib.PosixPath", "line_number": 44, "usage_type": "call"}, {"api_name": "dockerup.models.Image", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 31, "usage_type": "name"}, {"api_name": "dockerup.models.File", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 58, "usage_type": "name"}, {"api_name": "dockerup.models.File", "line_number": 58, "usage_type": "name"}, {"api_name": "dockerup.models.Update", "line_number": 68, "usage_type": "call"}, {"api_name": "dockerup.models.File", "line_number": 81, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 57, "usage_type": "name"}, {"api_name": "dockerup.models.File", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "40418865300", "text": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_project.settings')\nimport django\ndjango.setup()\nimport random\nfrom first_app.models import Webpage,Topic,AccessRecord\nfrom faker import Faker\n\nfakegen = Faker()\n\nsample_topics = ['topic_1','topic_2','topic_3','topic_4','topic_5']\n\ndef add_topic():\n t = Topic.objects.get_or_create(topic_name = random.choice(sample_topics))[0]\n\n t.save()\n return t\n\ndef populate(N=5):\n for entry in range(N):\n print('populating ',entry)\n fake_url = fakegen.url()\n fake_names = fakegen.company()\n fake_date = fakegen.date()\n wbpg = Webpage.objects.get_or_create(topic = add_topic(), name = fake_names, url = fake_url)[0]\n access_record = AccessRecord.objects.get_or_create(name = wbpg, date = fake_date)[0]\n\nif __name__ == \"__main__\":\n print(\"populating...\")\n populate(30)\n print('populated')", "repo_name": "zhangboz/Django-learning-notes", "sub_path": "Django Level 2/first_project/populate_first_app.py", "file_name": "populate_first_app.py", "file_ext": "py", "file_size_in_byte": 902, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.environ.setdefault", "line_number": 2, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 4, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 9, "usage_type": "call"}, {"api_name": "first_app.models.Topic.objects.get_or_create", "line_number": 14, "usage_type": "call"}, {"api_name": "first_app.models.Topic.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "first_app.models.Topic", "line_number": 14, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 14, "usage_type": "call"}, {"api_name": "first_app.models.Webpage.objects.get_or_create", "line_number": 25, "usage_type": "call"}, {"api_name": "first_app.models.Webpage.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "first_app.models.Webpage", "line_number": 25, "usage_type": "name"}, {"api_name": "first_app.models.AccessRecord.objects.get_or_create", "line_number": 26, "usage_type": "call"}, {"api_name": "first_app.models.AccessRecord.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "first_app.models.AccessRecord", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "38953880538", "text": "# --- VTK-PYTHON SCRIPT FOR READING VTP, INTEGRATING VARIABLES OF INTEREST USING TRAPEZOIDAL RULE\n# --- CALCULATES PRESSURE AT OUTLETS OF THE MODEL\n# --- NOTE: TO BE RUN FROM DIRECTORY CONTAINING all_results.vtp FILE AND mesh-complete/ DIRECTORY\n# --- NOTE: THIS SCRIPT ASSUMES A TRIANGULAR SURFACE MESH\n# --- BASED ON A SCRIPT BY JUSTIN TRAN\n# --- UPDATED 8/4/17 BY CASEY FLEETER\n\n\n#---------------------------------------------------------------------#\n# RUN THIS SCRIPT FROM THE SAME DIRECTORY AS all_results.vtp #\n# SCRIPT GENERATES all_results_pressures.dat #\n#---------------------------------------------------------------------#\n\nimport sys\nimport os\nimport vtk\nimport numpy as np\n\n#------------------------------------------------------------#\n# CHANGE THE START AND END TIMES AND TIME STEP INCREMENT #\n#------------------------------------------------------------#\n# SPECIFIY TIME POINTS TO DETERMINE PRESSURE, ALONG WITH INCREMENT IN RESTART FILES\nSTART_TIME = 160\nEND_TIME = 240\nINCREMENT = 10\n\n\n#---------------------------------------------------------------------#\n# CHANGE THE NAMES OF THE CAP FILES FROM THE MESH-COMPLETE FOLDER #\n#---------------------------------------------------------------------#\n# NAMES OF CAP GEOMETRY FILES OF MODEL, WITHOUT CAP_ PREFIX\ninput_filenames = ['celiac_trunk', 'celiac_branch', 'superior_mesentaric', \\\n 'renal_right', 'renal_left', 'right_internal_iliac', \\\n 'left_internal_iliac', 'right_iliac', 'aorta_2']\n\n# PATH OF OUTPUT FILE FOR RESULTS\noutput_filename = 'all_results_pressures.dat'\noutput_collection = []\n\nif __name__ == \"__main__\":\n\n for file in input_filenames:\n\n # Load in the mesh file for the outlet you want pressures at. This can be found in the mesh-surfaces folder\n command_string = 'cd mesh-complete/mesh-surfaces'\n print(command_string)\n os.chdir('mesh-complete/mesh-surfaces')\n\n # Read geometry (mesh) information from this cap\n outlet_reader = vtk.vtkXMLPolyDataReader() # Create vtk instance\n outlet_reader.SetFileName('cap_' + file + '.vtp') # Open file\n outlet_reader.Update()\n outlet_model = vtk.vtkPolyData()\n outlet_model = outlet_reader.GetOutput() # Read file into new variable for manipulation\n numPts = outlet_model.GetNumberOfPoints() # Determine number of points in the mesh at this outlet\n outlet_IDs = outlet_model.GetPointData().GetArray(\"GlobalNodeID\") # Extract node IDs to match with full model solution\n \n\n\n # Load in the pressure information from the vtp file\n command_string = 'cd ../..'\n print(command_string)\n os.chdir('../..')\n\n # First, read in the .vtp file containing your quantities of interest\n all_results_reader = vtk.vtkXMLPolyDataReader() # Create vtk instance\n all_results_reader.SetFileName('all_results.vtp') # Open file\n all_results_reader.Update()\n all_results_model = vtk.vtkPolyData() \n all_results_model = all_results_reader.GetOutput() # Read file into new variable for manipulation\n all_results_numPts = all_results_model.GetNumberOfPoints() # Determine number of points in the mesh of the entire model\n all_results_IDs = all_results_model.GetPointData().GetArray('GlobalNodeID') # Extract node IDs of full model solution\n \n\n\n # Find the nodes on all_results that correspond to the outlet of interest\n outlet_nodes = []\n for i_node in xrange(0, numPts):\n this_ID = outlet_IDs.GetTuple1(i_node)\n \n # iterate through all nodes in model\n for i_full in xrange(0, all_results_numPts):\n full_ID = all_results_IDs.GetTuple1(i_full)\n\n if(full_ID == this_ID):\n outlet_nodes.append(i_full)\n break\n \n # Just to make sure we found all the outlet nodes in all_results\n assert(len(outlet_nodes) == numPts)\n \n # Create a Python list to hold the pressure arrays from the all_results.vtp\n pressure_vectors = []\n # keep track of how many timesteps in solution\n timestep_count = 0\n for i_array in xrange(START_TIME, END_TIME+INCREMENT, INCREMENT):\n pressure_vectors.append(vtk.vtkDoubleArray())\n\n if i_array < 10:\n pressure_vectors[timestep_count] = all_results_model.GetPointData().GetArray('pressure_' + '0000' + str(i_array))\n elif i_array < 100:\n pressure_vectors[timestep_count] = all_results_model.GetPointData().GetArray('pressure_' + '000' + str(i_array))\n elif i_array < 1000:\n pressure_vectors[timestep_count] = all_results_model.GetPointData().GetArray('pressure_' + '00' + str(i_array))\n elif i_array < 10000:\n pressure_vectors[timestep_count] = all_results_model.GetPointData().GetArray('pressure_' + '0' + str(i_array))\n else:\n pressure_vectors[timestep_count] = all_results_model.GetPointData().GetArray('pressure_' + str(i_array))\n \n timestep_count = timestep_count + 1\n \n \n \n # Integrate pressures over the surface of the outlet face to get the pressure on this face at each time\n temp_press = np.zeros(timestep_count)\n\n for i_time in xrange(0, timestep_count):\n \n # Compute the integral using trapezoidal rule\n total_area = 0.0\n # store pressure information for the entire outlet face at this time step\n curr_press = 0.0\n\n # iterate over all mesh cells on outlet face\n for i_cell in xrange(0, outlet_model.GetNumberOfCells()):\n \n # extract information about cell vertices\n temp_cell = outlet_model.GetCell(i_cell)\n pts_cell = temp_cell.GetPointIds()\n cell_pts = temp_cell.GetPoints()\n p0 = cell_pts.GetPoint(0)\n p1 = cell_pts.GetPoint(1)\n p2 = cell_pts.GetPoint(2)\n \n # compute area of mesh cell (triangular mesh assumed) \n local_area = temp_cell.TriangleArea(p0, p1, p2)\n total_area = total_area + local_area\n \n local_temp_press = 0.0\n # add contributions from each vertex of cell\n for ipt in xrange(0, pts_cell.GetNumberOfIds()):\n \n iid = pts_cell.GetId(ipt) # get node number of this point\n temp_press_vec = float(pressure_vectors[i_time].GetTuple(iid)[0]) # get pressure at this point\n # add pressure contribution of this point to the total cell pressure\n local_temp_press = local_temp_press + temp_press_vec\n \n # Complete the trapezoidal rule integration for this cell by multiplying the sum of\n # the pressures by the local area and dividing by the number of vertices\n # Add the contribution of this cell to the curr_press for the entire outlet face\n curr_press = curr_press + local_temp_press*local_area/3.0\n \n # save pressure information at the outlet face (with pressure normalized by the total area \n # of the outlet face) for the current timestep \n temp_press[i_time] = curr_press/ total_area\n\n # save pressure information for all timesteps for the current outlet face\n output_collection.append(temp_press)\n\n\n\n # Now that we have looped over all our .vtp files of interest and integrated\n # the variables, it is time to save them to the output file.\n outfile = open(output_filename, 'w')\n \n # First print a header that tells what each integrated quantity of interest is\n out_string = 'Time Step '\n for iq in xrange(0, len(input_filenames)):\n out_string = out_string + input_filenames[iq] + ' '\n out_string = out_string + '\\n'\n outfile.write(out_string)\n\n # Now print the data for each quantity of interest at each time step\n for i_time in xrange(0, timestep_count):\n\n # Print time step\n out_string = str(i_time)\n \n # Print each quantity of interest at that timestep\n for i_file in xrange(0,len(input_filenames)): \n out_string = out_string + ' ' + str(output_collection[i_file][i_time])\n \n out_string = out_string + '\\n'\n outfile.write(out_string)\n\n outfile.close()\n ", "repo_name": "melodydong/Post_Solve_Scripts", "sub_path": "Postsolve_Pressures.py", "file_name": "Postsolve_Pressures.py", "file_ext": "py", "file_size_in_byte": 8035, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.chdir", "line_number": 47, "usage_type": "call"}, {"api_name": "vtk.vtkXMLPolyDataReader", "line_number": 50, "usage_type": "call"}, {"api_name": "vtk.vtkPolyData", "line_number": 53, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 63, "usage_type": "call"}, {"api_name": "vtk.vtkXMLPolyDataReader", "line_number": 66, "usage_type": "call"}, {"api_name": "vtk.vtkPolyData", "line_number": 69, "usage_type": "call"}, {"api_name": "vtk.vtkDoubleArray", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "13677933070", "text": "import pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\n\nyears = []\n\n# read CSVs\nmovies = pd.read_csv('datas/movies.csv')\nratings = pd.read_csv('datas/ratings.csv')\n\n# merge on movieId column\ndata = pd.merge(left=movies, right=ratings, on='movieId')\nfor title in data['title']:\n year_subset = title[-5:-1]\n try: years.append(int(year_subset))\n except: years.append(9999)\n \ndata['moviePubYear'] = years\nprint(len(data[data['moviePubYear'] == 9999]))\n\ngenre_df = pd.DataFrame(data['genres'].str.split('|').tolist(), index=data['movieId']).stack()\ngenre_df = genre_df.reset_index([0, 'movieId'])\ngenre_df.columns = ['movieId', 'Genre']\ntab = data\n\nuseri,frequsers=np.unique(tab.userId,return_counts=True)#useri les id des users, frequsers les freq de chaque user\nitemi,freqitems=np.unique(tab.movieId,return_counts=True)#itemi les id des item, freqitem les freq de chaque item\nn_users=len(useri)\nn_items=len(itemi)\nprint(\"le nombre des utilisateurs est :\"+ str(n_users) + \" Et le nombre des items est: \"+ str(n_items))\n\n\"\"\"\nUn des problèmes que j'ai rencontré était le fait que les ids des users et des items n'était pas ordonnée.\nC'est à dire on peut trouver l'utilisateur 1,2,3,5 et 8 sans trouver les utilisateurs 4 ,6 et 7. \nCeci a posé un problème dans la création de la matrice user-item parce que on risque d'avoir plusieurs lignes et colonnes vides.\nPour ça, j'ai crée un tableau indice_user et un tableau indice_item qui contiennent les anciens id et les nouvelles id \npar expl (1,2,5,6)=>(1,2,3,4) puis j'ai ajouté deux colonnes sur le tableau principale qui contient ces nouveaux IDs.\n(ps) ce traitement est très couteux!\n\"\"\"\n\nindice_user = pd.DataFrame()\nindice_user[\"indice\"]=range(1,len(useri)+1)\nindice_user[\"useri\"]=useri\n\nindice_item = pd.DataFrame()\nindice_item[\"indice\"]=range(1,len(itemi)+1)\nindice_item[\"itemi\"]=itemi\n\n#create user_ID_new and Item_ID_new\nx=[]\ny=[]\nfor i in range(0,len(tab)):\n x.append((indice_user.indice[indice_user.useri==tab.userId[i]].axes[0]+1)[0])\n y.append((indice_item.indice[indice_item.itemi==tab.movieId[i]].axes[0]+1)[0])\n\ntab[\"User_ID_new\"]=x\ntab[\"Item_ID_new\"]=y\n\n#create a train_data and test_data\nfrom sklearn.model_selection import train_test_split\ntrain_data, test_data = train_test_split(tab[[\"User_ID_new\",\"Item_ID_new\",\"rating\"]], test_size=0.25,random_state=123)\n\n\n\"\"\"\nDans cette partie du projet, j'applique le deuxième sous-type du fitrage collaboratif : \"Model-based\". \nIl consiste à appliquer la matrice de factorisation (MF) : c'est une méthode d'apprentissage non supervisé de décomposition\net de réduction de dimensionnalité pour les variables cachées. \n\nLe but de la matrice de factorisation est d'apprendre les préférences cachées des utilisateurs et les attributs cachés des items\ndepuis les ratings connus dans notre jeu de données, pour enfin prédire les ratings inconnus en multipliant les matrices de varibales \ncachées des utilisateurs et des items. \n\nPour le projet, j'ai utilisé l'algorithme:\n- ALS : Alternating Least Squares\n\n\"\"\"\n\n\"\"\" \nPour le faire, j'ai commencé par créer les matrice user-item train et test. Ce sont les deux matrices qui vont croisé les notes de utilsiateurs et des items.\nPuis, j'ai créé une fonction pour faire les prédictions\n\n\"\"\"\ntrain_data_matrix = np.zeros((n_users, n_items))#matrice nulle de longuer tous les users et tous les items\nfor line in train_data.itertuples():#parcourire la ligne col par col\n train_data_matrix[line[1]-1, line[2]-1] = line[3] \n\ntest_data_matrix = np.zeros((n_users, n_items))\nfor line in test_data.itertuples():\n test_data_matrix[line[1]-1, line[2]-1] = line[3]\n\n# La fonction prediction permet de prédire les ratings inconnus en multipliant les matrices P et la transposée de Q\ndef prediction(P,Q):\n return np.dot(P.T,Q)\n\n\"\"\" Il existe plusieurs métriques d'évaluation, mais la plus populaire des métriques utilisée pour évaluer l'exactitude des ratings prédits\nest l'erreur quadratique moyenne (RMSE) que j'ai utilisé dans le projet :\nRMSE = RacineCarrée{(1/N) * sum (r_i -estimé{r_i})^2}\n\"\"\"\n\ndef rmse(I,R,Q,P):\n return np.sqrt(np.sum((I * (R - prediction(P,Q)))**2)/len(R[R > 0])) \n\n\n# Script for training model with Alternating Least Squares algorithm\ntrain_errors = []\ntest_errors = []\n\n# Index matrix for training data\nI = train_data_matrix.copy()\nI[I > 0] = 1\nI[I == 0] = 0\n\n# Index matrix for test data\nI2 = test_data_matrix.copy()\nI2[I2 > 0] = 1\nI2[I2 == 0] = 0\n\nlmbda = 0.1 \nk = 20 \nn_epochs = 2 # number of epochs\nm, n = train_data_matrix.shape # Number of users and items\nP = 3 * np.random.rand(k,m) # Latent user feature matrix\nQ = 3 * np.random.rand(k,n) # Latent item feature matrix\nQ[0,:] = train_data_matrix[train_data_matrix != 0].mean(axis=0) # Avg. rating for each movie\nE = np.eye(k) # (k x k)-dimensional idendity matrix\n\n# Repeat until convergence\nfor epoch in range(n_epochs):\n # Fix Q and estimate P\n for i, Ii in enumerate(I):\n nui = np.count_nonzero(Ii) # Number of items user i has rated\n if (nui == 0): nui = 1 # Be aware of zero counts!\n \n # Least squares solution\n Ai = np.dot(Q, np.dot(np.diag(Ii), Q.T)) + lmbda * nui * E\n Vi = np.dot(Q, np.dot(np.diag(Ii), train_data_matrix[i].T))\n P[:,i] = np.linalg.solve(Ai,Vi)\n \n # Fix P and estimate Q\n for j, Ij in enumerate(I.T):\n nmj = np.count_nonzero(Ij) # Number of users that rated item j\n if (nmj == 0): nmj = 1 # Be aware of zero counts!\n \n # Least squares solution\n Aj = np.dot(P, np.dot(np.diag(Ij), P.T)) + lmbda * nmj * E\n Vj = np.dot(P, np.dot(np.diag(Ij), train_data_matrix[:,j]))\n Q[:,j] = np.linalg.solve(Aj,Vj)\n \n train_rmse = rmse(I,train_data_matrix,Q,P)\n test_rmse = rmse(I2,test_data_matrix,Q,P)\n train_errors.append(train_rmse)\n test_errors.append(test_rmse)\n \n print(\"[Epoch %d/%d] train error: %f, test error: %f\"\n % (epoch+1, n_epochs, train_rmse, test_rmse))\n\nprint(\"Algorithm converged\")\n\n\n\n# Maitenant, après avoir obtenus toutes les valeurs de l'erreur à chaque étape,on peut tracer la courbe d'apprentissage.\n# ==> On Vérifie la performance en traçant les erreurs du train et du test\nplt.plot(range(n_epochs), train_errors, marker='o', label='Training Data')\nplt.plot(range(n_epochs), test_errors, marker='v', label='Test Data')\nplt.title('Courbe d apprentissage SGD')\nplt.xlabel('Nombre d etapes')\nplt.ylabel('RMSE')\nplt.legend()\nplt.grid()\nplt.show()", "repo_name": "AimeKoffi/Systeme-de-recommandation-CF-Model-Based", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6540, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_csv", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.eye", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.count_nonzero", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 147, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}]} +{"seq_id": "25076369598", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Created by Liuxiaozhe on 2020/9/25\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport torch\nfrom torchvision import transforms\nimport numpy as np\nimport cv2\nfrom pfld import PFLDInference, MTCNN\nfrom utils import getDistance, glotPosecube\n\n\nclass FaceAttribute:\n def __init__(self):\n #人脸检测\n self.mtcnn = MTCNN()\n #人脸关键点定位模型\n self.pfldmodel = PFLDInference().cuda()\n self.pfldmodel.load_state_dict(torch.load(\"weights/keypoints.pth\"))\n self.pfldmodel = self.pfldmodel.cuda()\n self.pfldmodel.eval()\n self.transform = transforms.Compose([transforms.ToTensor()])\n\n def main(self,img):\n transform = transforms.Compose([transforms.ToTensor()])\n with torch.no_grad():\n height, width = img.shape[:2]\n img_det = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n det = self.mtcnn.detect_face(img_det)\n # '''\n # [{'box': (160, 48, 322, 270), 'cls': array([0.9666081], dtype=float32),\n # 'pts': {'leye': (214, 136), 'reye': (276, 121), 'nose': (257, 159), 'lmouse': (238, 208),\n # 'rmouse': (288, 197)}}]\n # '''\n for i in range(len(det)): #单张图片人脸数量\n box = det[i]['box'] #人脸框tuple\n #cls = result[i]['cls'] #置信度ndarry\n pts = det[i]['pts'] #五官坐标dict\n x1, y1, x2, y2 = box #左上右下\n dis = y2 - y1\n #cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 25)) #天蓝色人脸框\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n size_w = int(max([w, h])*0.9)\n size_h = int(max([w, h]) * 0.9)\n cx = x1 + w//2\n cy = y1 + h//2\n x1 = cx - size_w//2\n x2 = x1 + size_w\n y1 = cy - int(size_h * 0.4)\n y2 = y1 + size_h\n left = 0\n top = 0\n bottom = 0\n right = 0\n if x1 < 0:\n left = -x1\n if y1 < 0:\n top = -y1\n if x2 >= width:\n right = x2 - width\n if y2 >= height:\n bottom = y2 - height\n\n x1 = max(0, x1)\n y1 = max(0, y1)\n x2 = min(width, x2)\n y2 = min(height, y2)\n cropped = img[y1:y2, x1:x2] #裁剪出的人脸\n # print(cropped.shape)\n # np_img = img[int(y1/1):y2+int(y1/1),int(x1/1):x2+int(x1/1)]\n # cv2.imshow(str(numa),np_img)\n cropped = cv2.copyMakeBorder(cropped, top, bottom, left, right, cv2.BORDER_CONSTANT, 0)\n input = cv2.resize(cropped, (112, 112))\n input = cv2.cvtColor(input, cv2.COLOR_BGR2RGB)\n input = transform(input).unsqueeze(0).cuda()\n pose, landmarks = self.pfldmodel(input)\n #poses = pose.cpu().detach().numpy()[0] * 180 / np.pi\n # 长度3 pitch是围绕X轴旋转,也叫做俯仰角。 yaw是围绕Y轴旋转,也叫偏航角。 roll是围绕Z轴旋转,也叫翻滚角\n pre_landmark = landmarks[0]\n pre_landmark = pre_landmark.cpu().detach().numpy().reshape(-1, 2) * [size_w, size_h] # 长度98\n # cv2.rectangle(img,(x1, y1), (x2, y2),(255,0,0)) #蓝色正方形\n fatigue = []\n for num, (x, y) in enumerate(pre_landmark.astype(np.int32)):\n #cv2.circle(img, (x1 - left + x, y1 - bottom + y), 1, (255, 255, 0), 1)\n #if 59 < num < 76 or num in [96,97]: #眼眶坐标\n #眼镜轮廓坐标\n # 62 70\n # 60 64 68 72\n # 66 74\n\n if num in [60, 62, 64, 66, 68, 70, 72, 74]:\n cv2.circle(img, (x1 - left + x, y1 - bottom + y), 1, (255, 255, 0), 1)\n fatigue.append((x, y))\n # print(fatigue)\n rightrow = getDistance(fatigue[0], fatigue[2])\n rightcol = getDistance(fatigue[1],fatigue[3])\n leftrow = getDistance(fatigue[4],fatigue[6])\n leftcol = getDistance(fatigue[5],fatigue[7])\n numerator = rightcol+leftcol\n denominator = rightrow+leftrow\n distance = numerator/denominator\n print('dis:'+str(distance))\n text ,color= 'eyes closed!' if distance < 0.17 else 'eyes opened',\\\n (0, 0, 255) if distance < 0.17 else (0, 255, 0)\n\n\n\n img = cv2.putText(img, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 2)\n # another way 计算\n # eye = rightcol + leftcol\n # if eye / dis < 0.03:\n # print('dis:' + str(distance))\n # print('eyes closed!')\n # else:\n # print('ok')\n\n # plotPosecube(img, poses[0], poses[1], poses[2], tdx=pts['nose'][0], tdy=pts['nose'][1],\n # size=(x2 - x1) // 2)\n cv2.imshow('example', img)\n cv2.waitKey(0)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n import glob\n F = FaceAttribute()\n f = glob.glob('D:\\points/face\\FaceAttributeClassiry-master/test/*')\n for i in f:\n img = cv2.imread(i)\n F.main(img)\n\n\n\n\n", "repo_name": "lxzatwowone1/Driver-Fatigue-Monitoring", "sub_path": "eye_close_detection.py", "file_name": "eye_close_detection.py", "file_ext": "py", "file_size_in_byte": 5679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "50", "api": [{"api_name": "warnings.filterwarnings", "line_number": 5, "usage_type": "call"}, {"api_name": "pfld.MTCNN", "line_number": 18, "usage_type": "call"}, {"api_name": "pfld.PFLDInference", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 21, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 24, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.copyMakeBorder", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 95, "usage_type": "call"}, {"api_name": "utils.getDistance", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.getDistance", "line_number": 99, "usage_type": "call"}, {"api_name": "utils.getDistance", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.getDistance", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 111, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 123, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 133, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "74224940315", "text": "#!/usr/bin/python3\n\n\"\"\"\nUploads files/firmware to Jolt over YMODEM\n\nIf the file has name \"jolt_os.bin\" or \"jolt_os.bin.gz\", it will start an OS update.\n\nIf the file ends in \".patch\" it will be applied as a patch (triggering an OS update).\n\"\"\"\n\nimport argparse\nimport os, sys\nimport serial\nimport time\nimport logging\nfrom xmodem import YMODEM\n\n# Configure logging\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\nlog = logging.getLogger('elf2jelf')\n\ndef parse_args():\n this_path = os.path.abspath(__file__)\n default_elf_fn = os.path.join(this_path, '..', 'build', 'jolt_os.bin')\n\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=str,\n help='File to send over.')\n parser.add_argument('--baudrate', type=int, default=230400,\n help=\"Baudrate\")\n parser.add_argument('--port', type=str, default='/dev/ttyUSB0',\n help=\"Serial Port\")\n parser.add_argument('--verbose', '-v', type=str, default='INFO',\n help='''\n Valid options:\n SILENT\n INFO\n DEBUG\n ''')\n parser.add_argument('--monitor', '-m', action='store_true',\n help=\"Monitor esp32 after transfer.\")\n args = parser.parse_args()\n dargs = vars(args)\n\n global log\n logging_level = args.verbose.upper()\n if logging_level == 'INFO':\n log.setLevel(logging.INFO)\n elif logging_level == 'DEBUG':\n log.setLevel(logging.DEBUG)\n else:\n raise(\"Invalid Logging Verbosity\")\n\n return (args, dargs)\n\ndef consume(ser):\n ser.read(size=100000, )\n\ndef main(args):\n args, dargs = parse_args()\n\n # need to clear DTR, then clear RTS to properly reset device\n ser = serial.Serial(dsrdtr=True)\n ser.baudrate = args.baudrate\n ser.port = args.port\n ser.dtr = 1\n ser.rts = 1\n ser.timeout = 0.5\n\n ser.open()\n\n ser.dtr = 0\n ser.rts = 0\n\n log.info(\"Waiting for device to boot\")\n time.sleep(5) # Wait for device to boot\n\n def getc(size, timeout=5):\n return ser.read(size=size)\n\n def putc(data, timeout=5):\n return ser.write(data)\n\n ymodem = YMODEM(getc, putc)\n\n firmware_names = ['jolt_os.bin', 'jolt_os.bin.gz', 'JoltOS.bin', 'JoltOS.bin.gz']\n\n basename = os.path.basename(args.input)\n log.info(\"Initiating Upload\")\n cmd = []\n if basename in firmware_names:\n cmd.append(\"upload_firmware\")\n elif basename.endswith(\".patch\"):\n cmd.append(\"upload_firmware\")\n cmd.append(\"--patch\")\n else:\n cmd.append(\"upload\")\n cmd.append(basename)\n\n cmd = ' '.join(cmd) + '\\n'\n log.debug('Sending \"%s\" command' % cmd.strip())\n ser.write(cmd.encode('ascii'));\n consume(ser)\n\n log.info(\"Sending File\")\n ymodem.send([args.input,], retry=60)\n\n if args.monitor:\n while(True):\n try:\n n = ser.inWaiting()\n if n:\n line = ser.read(n)\n print(line.strip().decode(\"utf-8\", errors='replace') )\n except IOError:\n raise IOError()\n except:\n pass\n\nif __name__ == '__main__':\n main(sys.argv)\n\n", "repo_name": "joltwallet/jolt_wallet", "sub_path": "pyutils/usb_upload.py", "file_name": "usb_upload.py", "file_ext": "py", "file_size_in_byte": 3169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 53, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.basicConfig", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 50, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 76, "usage_type": "call"}, {"api_name": "xmodem.YMODEM", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "7313067464", "text": "import argparse\nimport ast\nimport json\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nparties = [\"PS\", \"PSD\", \"BE\", \"CDS-PP\", \"PEV\", \"PCP\"]\nall_legs = [\"VII\", \"VIII\", \"IX\", \"X\", \"XI\", \"XII\", \"XIII\", \"XIV\", \"XV\"]\n\n\ndef roman_to_int(roman_str):\n roman_dict = {\"I\": 1, \"V\": 5, \"X\": 10, \"L\": 50, \"C\": 100, \"D\": 500, \"M\": 1000}\n int_val = 0\n for i in range(len(roman_str)):\n if i > 0 and roman_dict[roman_str[i]] > roman_dict[roman_str[i - 1]]:\n int_val += roman_dict[roman_str[i]] - 2 * roman_dict[roman_str[i - 1]]\n else:\n int_val += roman_dict[roman_str[i]]\n return int_val\n\n\ndef scrape_first_page(publicacao):\n pub_url_id = f\"01/{roman_to_int(publicacao['pubLeg']):02}/{int(publicacao['pubSL']):02}/{int(publicacao['pubNr']):03}/{publicacao['pubdt']}\"\n url = f\"https://debates.parlamento.pt/catalogo/r3/dar/{pub_url_id}?org=PLC\"\n print(url)\n\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n\n pagination = soup.find(\"ul\", {\"class\": \"pagination\"})\n children = pagination.findChildren(\"li\", recursive=False)\n first_page = int(children[0].text)\n\n return first_page\n\n\nclass ARValuesMissingException(Exception):\n pass\n\n\ndef process_vote_detail(detail):\n votes = {\"in_favour\": [], \"against\": [], \"abstention\": []}\n detail = detail.split(\"
\")\n if len(detail) == 1: # all in favour\n in_favour = re.findall(\"\\(.*?)\\\", detail[0])\n votes[\"in_favour\"] = list(map(str.strip, in_favour))\n if len(detail) == 2:\n in_favour = re.findall(\"\\(.*?)\\\", detail[0])\n votes[\"in_favour\"] = list(map(str.strip, in_favour))\n other = re.findall(\"\\(.*?)\\\", detail[1])\n if \"Contra:\" in detail[1]:\n votes[\"against\"] = list(map(str.strip, other))\n elif \"Abstenção:\" in detail[1]:\n votes[\"abstention\"] = list(map(str.strip, other))\n if len(detail) == 3:\n in_favour, against, abstention = detail\n in_favour = re.findall(\"\\(.*?)\\\", in_favour)\n against = re.findall(\"\\(.*?)\\\", against)\n abstention = re.findall(\"\\(.*?)\\\", abstention)\n votes[\"in_favour\"] = list(map(str.strip, in_favour))\n votes[\"against\"] = list(map(str.strip, against))\n votes[\"abstention\"] = list(map(str.strip, abstention))\n\n def clean_noisy_party_labels(votes_dict, key):\n parties_regex = rf\"^\\d+-({'|'.join(parties)})$\"\n votes_dict[key] = [i for i in votes[key] if not re.search(parties_regex, i)]\n\n clean_noisy_party_labels(votes, \"in_favour\")\n clean_noisy_party_labels(votes, \"against\")\n clean_noisy_party_labels(votes, \"abstention\")\n\n assert all(\n key in votes for key in [\"in_favour\", \"against\", \"abstention\"]\n ), f\"{votes}---{detail}\"\n\n return votes\n\n\ndef process_voting(votacao):\n row = []\n\n votacao = votacao[0][\"pt_gov_ar_objectos_VotacaoOut\"]\n\n if not all(key in votacao for key in [\"resultado\", \"detalhe\"]):\n raise ARValuesMissingException(\"Some vote required attributes are missing.\")\n\n vot_resultado = votacao[\"resultado\"]\n row.append(vot_resultado)\n vot_dict = process_vote_detail(votacao[\"detalhe\"])\n row.append(vot_dict[\"in_favour\"])\n row.append(vot_dict[\"against\"])\n row.append(vot_dict[\"abstention\"])\n\n return row\n\n\ndef add_ini_attributes(iniciativa):\n row = []\n ini_num = iniciativa.get(\"iniNr\")\n row.append(ini_num)\n ini_leg = iniciativa.get(\"iniLeg\")\n row.append(ini_leg)\n ini_tipo = iniciativa.get(\"iniDescTipo\")\n row.append(ini_tipo)\n ini_titulo = iniciativa.get(\"iniTitulo\")\n row.append(ini_titulo)\n ini_sessao = iniciativa.get(\"iniSel\")\n row.append(ini_sessao)\n dataInicioLeg = iniciativa.get(\"dataInicioleg\")\n row.append(dataInicioLeg)\n dataFimLeg = iniciativa.get(\"dataFimleg\")\n row.append(dataFimLeg)\n\n return row\n\n\ndef process_ini_authors(iniciativa):\n row = []\n\n if \"iniAutorDeputados\" in iniciativa:\n autores_deputados = iniciativa[\"iniAutorDeputados\"][\n \"pt_gov_ar_objectos_iniciativas_AutoresDeputadosOut\"\n ]\n if isinstance(autores_deputados, list):\n autores_deputados = [\n f\"{dep['idCadastro']}-{dep['nome']}-{dep['GP']}\"\n for dep in autores_deputados\n ]\n else:\n autores_deputados = f\"{autores_deputados['idCadastro']}-{autores_deputados['nome']}-{autores_deputados['GP']}\"\n else:\n autores_deputados = iniciativa[\"iniAutorOutros\"][\"nome\"]\n\n row.append(autores_deputados)\n\n return row\n\n\ndef process_publication(publicacao):\n row = []\n\n if not all(\n key in publicacao\n for key in [\"pubNr\", \"pubSL\", \"pubdt\", \"pubLeg\", \"pubTipo\", \"pag\"]\n ):\n raise ARValuesMissingException(\n \"Some publication required attributes are missing\"\n )\n\n pub_num = int(publicacao[\"pubNr\"])\n row.append(pub_num)\n pub_sessao = int(publicacao[\"pubSL\"])\n row.append(pub_sessao)\n row.append(publicacao[\"pubdt\"])\n pub_legislatura = publicacao[\"pubLeg\"]\n pub_serie = publicacao[\"pubTipo\"].split()[1]\n pages = publicacao[\"pag\"][\"string\"]\n if isinstance(pages, str):\n row.append([pages])\n else:\n row.append(pages)\n row.append(f\"dar_serie_{pub_serie}_{pub_legislatura}_{pub_sessao}_{pub_num:03}.pdf\")\n if pub_legislatura in [\"VII\", \"VIII\", \"IX\"]:\n row.append(scrape_first_page(publicacao))\n else:\n row.append(1)\n\n return row\n\n\ndef process_speaker(deputado):\n row = []\n\n row.append(deputado[\"idCadastro\"])\n row.append(deputado[\"nome\"])\n row.append(deputado[\"GP\"])\n\n return row\n\n\ndef literal_return(val):\n try:\n return ast.literal_eval(val)\n except (ValueError, SyntaxError) as e:\n return val\n\n\ndef create_vote_from_vote_lists(df):\n # explode all three vote columns\n df[\"vot_in_favour\"] = df[\"vot_in_favour\"].apply(literal_return)\n df[\"vot_against\"] = df[\"vot_against\"].apply(literal_return)\n df[\"vot_abstention\"] = df[\"vot_abstention\"].apply(literal_return)\n exp_df = (\n df.explode(\"vot_in_favour\").explode(\"vot_against\").explode(\"vot_abstention\")\n )\n # compare labels with votes to find matches\n exp_df = exp_df.eq(exp_df.pop(\"dep_parl_group\"), axis=0)\n # remove all False rows and get the matches in each row\n exp_df = exp_df[exp_df.any(1)].idxmax(1)\n # remove duplicated indices\n df[\"vote\"] = exp_df[~exp_df.index.duplicated()]\n\n\ndef get_value_from_key(key, dictionary):\n for k, v in (\n dictionary.items()\n if isinstance(dictionary, dict)\n else enumerate(dictionary)\n if isinstance(dictionary, list)\n else []\n ):\n if k == key:\n yield v\n elif isinstance(v, (dict, list)):\n for result in get_value_from_key(key, v):\n yield result\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Init metadata dataset\")\n parser.add_argument(\n \"--legs\",\n type=str,\n nargs=\"+\",\n default=all_legs,\n choices=all_legs,\n help=\"legislatures\",\n )\n parser.add_argument(\n \"--metadata_dir\",\n type=Path,\n default=\"data/initiatives\",\n help=\"file directory with initiatives metadata\",\n )\n parser.add_argument(\n \"--output_path\",\n type=str,\n default=\"data/initial_corpus_meta.csv\",\n help=\"output file path\",\n )\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n print(f\"Selected legs: {args.legs}\")\n\n rows = []\n ini_count = 0\n for leg_num in args.legs:\n print(f\"Processing Legislatura{leg_num}\")\n with open(args.metadata_dir / f\"Iniciativas{leg_num}.json\") as f:\n json_data = json.load(f)\n\n iniciativas = json_data[\n \"ArrayOfPt_gov_ar_objectos_iniciativas_DetalhePesquisaIniciativasOut\"\n ][\"pt_gov_ar_objectos_iniciativas_DetalhePesquisaIniciativasOut\"]\n\n for i, iniciativa in tqdm(\n enumerate(iniciativas),\n desc=\"Iterating over initiatives\",\n total=len(iniciativas),\n ):\n try:\n # skip joint initiatives\n conjuntas = get_value_from_key(\"iniciativasConjuntas\", iniciativa)\n if list(conjuntas):\n raise ARValuesMissingException(\"Joint initiatives.\")\n\n discursos = list(\n get_value_from_key(\n \"pt_gov_ar_objectos_peticoes_OradoresOut\", iniciativa\n )\n )\n if not discursos:\n raise ARValuesMissingException(\"No speeches.\")\n\n eventos = iniciativa[\"iniEventos\"][\n \"pt_gov_ar_objectos_iniciativas_EventosOut\"\n ]\n\n oradores, votacao = None, None\n # print(f\"{len(eventos)} events\")\n for evento in eventos:\n if evento[\"fase\"] == \"Discussão generalidade\":\n oradores = list(get_value_from_key(\"oradores\", evento))\n\n if evento[\"fase\"] == \"Votação na generalidade\":\n votacao = list(get_value_from_key(\"votacao\", evento))\n\n if not oradores or not votacao:\n print(\"Missing speaker or vote information.\")\n continue\n\n speakers = oradores[0][\"pt_gov_ar_objectos_peticoes_OradoresOut\"]\n if isinstance(speakers, dict):\n speakers = [speakers]\n\n # print(f\"{len(speakers)} speakers\")\n ini_count += 1\n\n for orador in speakers:\n row = []\n publicacao = list(get_value_from_key(\"publicacao\", orador))\n publicacao = publicacao[0][\"pt_gov_ar_objectos_PublicacoesOut\"]\n\n row.extend(add_ini_attributes(iniciativa))\n row.extend(process_ini_authors(iniciativa))\n row.extend(process_voting(votacao))\n row.extend(process_publication(publicacao))\n\n deputado = list(get_value_from_key(\"deputados\", orador))\n if not deputado:\n continue\n deputado = deputado[0]\n row.extend(process_speaker(deputado))\n\n # print(\"Adding row...\")\n rows.append(row)\n\n # print(f\"Initiative #{i + 1} done\")\n\n except ARValuesMissingException as e:\n print(f\"Skipping initiative #{i + 1}. {e}\")\n\n print(f\"Total initiatives processed: {ini_count}\")\n\n df = pd.DataFrame(\n rows,\n columns=[\n \"ini_num\",\n \"ini_leg\",\n \"ini_type\",\n \"ini_title\",\n \"ini_session\",\n \"leg_begin_date\",\n \"leg_end_date\",\n \"authors\",\n \"vot_results\",\n \"vot_in_favour\",\n \"vot_against\",\n \"vot_abstention\",\n \"pub_num\",\n \"pub_session\",\n \"pub_date\",\n \"pages\",\n \"pdf_file_path\",\n \"doc_first_page\",\n \"dep_id\",\n \"dep_name\",\n \"dep_parl_group\",\n ],\n )\n\n # process votes\n create_vote_from_vote_lists(df)\n\n df.to_csv(args.output_path, index=False)\n", "repo_name": "afonso-sousa/pt_parliamentary_minutes", "sub_path": "init_corpus_meta.py", "file_name": "init_corpus_meta.py", "file_ext": "py", "file_size_in_byte": 11545, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 32, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 53, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 55, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 62, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 63, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 64, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 188, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 225, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 236, "usage_type": "name"}, {"api_name": "json.load", "line_number": 261, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 267, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 336, "usage_type": "call"}]} +{"seq_id": "31264617840", "text": "import pychrono as chrono\nimport pychrono.fea as fea\nimport pychrono.irrlicht as chronoirr\nimport pychrono.pardisomkl as mkl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nchrono.SetChronoDataPath('./chrono_data/')\n\n# Unit conversion\nM_TO_L = 1e0\nKG_TO_W = 1e0\nS_TO_T = 1e0\nPI = np.pi\n\n# Constants\nbeam_thickness = 0.003*M_TO_L\nbeam_length = 0.50*M_TO_L\nbeam_height = 0.04*M_TO_L\n\ng = -9.81*M_TO_L/S_TO_T**2 # m/s^2\n\nrho = 1220*KG_TO_W/(M_TO_L**3) # kg/m^3\nE = 26e7*KG_TO_W/M_TO_L/S_TO_T**2 # kg*m/s^2/m^2\nnu = 0.38\nmb = 0.01\n\nfreq = 1.0\namplitude = PI/2/S_TO_T\n\nstep = 1e-3*S_TO_T\ntfinal = 5*S_TO_T\n\n# Helper functions\ndef cast_node(nb):\n feaNB = fea.CastToChNodeFEAbase(nb)\n nodeFead = fea.CastToChNodeFEAxyzD(feaNB)\n return nodeFead\n\n# Chrono Simulation\nmysystem = chrono.ChSystemSMC()\nmysystem.Set_G_acc(chrono.ChVectorD(0,0,g))\n\nground = chrono.ChBodyEasyBox(0.1, 0.1, 0.1, 0, False)\nground.SetPos(chrono.ChVectorD(0,0,0))\nground.SetBodyFixed(True)\nmysystem.Add(ground)\n\nslider = chrono.ChBodyEasyBox(0.01, 0.01, beam_height, 0, True)\nslider.SetPos(chrono.ChVectorD(-beam_length/2,0,0))\n# mslider.SetBodyFixed(True)\nmysystem.Add(slider)\n\nclass MotorSpeed(chrono.ChFunction) :\n def __init__(self):\n super().__init__()\n\n def Get_y(self, x) :\n period = 1/freq\n t = np.mod(x, period)\n\n if t < period/4 or t > period*3/4:\n return amplitude\n else:\n return -amplitude\n\nmotor = chrono.ChLinkMotorRotationSpeed()\nmotor.Initialize(\n slider,\n ground,\n chrono.ChFrameD(chrono.ChVectorD(-beam_length/2,0,0),chrono.QUNIT)\n)\nmotor_speed = chrono.ChFunction_Sine(0,freq,amplitude)\n# motor_speed = MotorSpeed()\nmotor.SetMotorFunction(motor_speed)\nmysystem.Add(motor)\n\nbody = fea.ChMesh()\n\nnum_div_x = 25\nnum_div_z = 2\nnum_node_x = num_div_x+1\nnum_node_z = num_div_z+1\n\nnum_elements = num_div_x*num_div_z\nnum_nodes = num_node_x*num_node_z\n\ndx = beam_length/num_div_x\ndy = beam_thickness\ndz = beam_height/num_div_z # rad\n\n# Nodes\nfor k in range(num_node_z):\n for i in range(num_node_x):\n # Position of node\n x = i*dx-beam_length/2\n y = 0\n z = k*dz\n\n dirX = 0\n dirY = -1\n dirZ = 0\n\n # If nodes added to element in CCW then -y\n node = fea.ChNodeFEAxyzD(\n chrono.ChVectorD(x,y,z),\n chrono.ChVectorD(dirX,dirY,dirZ),\n )\n node.SetMass(0)\n\n if i <= 1:\n joint = fea.ChLinkPointFrameGeneric(True,True,True)\n joint.Initialize(node,slider)\n mysystem.Add(joint)\n\n body.AddNode(node)\n\n\n# Elements\nmat = fea.ChMaterialShellANCF(rho, E, nu)\nfor k in range(num_div_z):\n for i in range(num_div_x):\n nodeA = i+k*num_node_x\n nodeB = i+k*num_node_x+1\n nodeC = i+(k+1)*num_node_x+1\n nodeD = i+(k+1)*num_node_x\n\n element = fea.ChElementShellANCF()\n element.SetNodes(\n cast_node(body.GetNode(nodeA)),\n cast_node(body.GetNode(nodeB)),\n cast_node(body.GetNode(nodeC)),\n cast_node(body.GetNode(nodeD))\n )\n element.SetDimensions(dx, dz)\n element.AddLayer(dy, 0*chrono.CH_C_DEG_TO_RAD, mat)\n\n element.SetAlphaDamp(mb)\n element.SetGravityOn(False)\n\n body.AddElement(element)\n\nmysystem.Add(body)\n\n# Visuals\nvbody = fea.ChVisualizationFEAmesh(body)\nvbody.SetFEMdataType(fea.ChVisualizationFEAmesh.E_PLOT_SURFACE)\nvbody.SetWireframe(True)\nbody.AddAsset(vbody)\n\n# Solver and stepper\nmkl_solver = mkl.ChSolverPardisoMKL()\nmysystem.SetSolver(mkl_solver)\n\nhht_stepper = chrono.ChTimestepperHHT(mysystem)\nhht_stepper.SetStepControl(False)\nmysystem.SetTimestepper(hht_stepper)\n\napplication = chronoirr.ChIrrApp(mysystem, \"Curve beam\", chronoirr.dimension2du(1024, 768), chronoirr.VerticalDir_Z)\napplication.AddTypicalSky()\napplication.AddTypicalLights()\napplication.AddTypicalCamera(chronoirr.vector3df(0, 0.5, 0.5),chronoirr.vector3df(0, 0, 0))\napplication.AssetBindAll()\napplication.AssetUpdateAll()\n# application.SetShowInfos(True)\n# application.SetVideoframeSaveInterval(int(1/step/25)) # N frame per unit time\n# application.SetVideoframeSave(True)\n\napplication.SetTimestep(step)\n\nt = []\nf = []\nwhile application.GetDevice().run():\n print('time: {:.4f}'.format(mysystem.GetChTime()))\n t.append(mysystem.GetChTime())\n motor_react_force = motor.Get_react_force()\n f.append([motor_react_force.x,motor_react_force.y,motor_react_force.z])\n\n application.BeginScene()\n application.DrawAll()\n\n # Draw axis for scale and orientation\n chronoirr.drawSegment(application.GetVideoDriver(),chrono.ChVectorD(0,0,0),chrono.ChVectorD(1,0,0),chronoirr.SColor(1,255,0,0))\n chronoirr.drawSegment(application.GetVideoDriver(),chrono.ChVectorD(0,0,0),chrono.ChVectorD(0,1,0),chronoirr.SColor(1,0,255,0))\n chronoirr.drawSegment(application.GetVideoDriver(),chrono.ChVectorD(0,0,0),chrono.ChVectorD(0,0,1),chronoirr.SColor(1,0,0,255))\n\n application.DoStep()\n application.EndScene()\n\n if mysystem.GetChTime() > tfinal: # in system seconds\n application.GetDevice().closeDevice()\n\nt = np.array(t)\nf = np.array(f)\n\nplt.figure()\nplt.plot(t,f[:,0])\n", "repo_name": "idealabasu/code_pychrono_simulation", "sub_path": "beam_ANCF.py", "file_name": "beam_ANCF.py", "file_ext": "py", "file_size_in_byte": 5170, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pychrono.SetChronoDataPath", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pychrono.fea.CastToChNodeFEAbase", "line_number": 36, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 36, "usage_type": "name"}, {"api_name": "pychrono.fea.CastToChNodeFEAxyzD", "line_number": 37, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 37, "usage_type": "name"}, {"api_name": "pychrono.ChSystemSMC", "line_number": 41, "usage_type": "call"}, {"api_name": "pychrono.ChVectorD", "line_number": 42, "usage_type": "call"}, {"api_name": "pychrono.ChBodyEasyBox", "line_number": 44, "usage_type": "call"}, {"api_name": "pychrono.ChVectorD", "line_number": 45, "usage_type": "call"}, {"api_name": "pychrono.ChBodyEasyBox", "line_number": 49, "usage_type": "call"}, {"api_name": "pychrono.ChVectorD", "line_number": 50, "usage_type": "call"}, {"api_name": "pychrono.ChFunction", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.mod", "line_number": 60, "usage_type": "call"}, {"api_name": "pychrono.ChLinkMotorRotationSpeed", "line_number": 67, "usage_type": "call"}, {"api_name": "pychrono.ChFrameD", "line_number": 71, "usage_type": "call"}, {"api_name": "pychrono.ChVectorD", "line_number": 71, "usage_type": "call"}, {"api_name": "pychrono.QUNIT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pychrono.ChFunction_Sine", "line_number": 73, "usage_type": "call"}, {"api_name": "pychrono.fea.ChMesh", "line_number": 78, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 78, "usage_type": "name"}, {"api_name": "pychrono.fea.ChNodeFEAxyzD", "line_number": 105, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 105, "usage_type": "name"}, {"api_name": "pychrono.ChVectorD", "line_number": 106, "usage_type": "call"}, {"api_name": "pychrono.ChVectorD", "line_number": 107, "usage_type": "call"}, {"api_name": "pychrono.fea.ChLinkPointFrameGeneric", "line_number": 112, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 112, "usage_type": "name"}, {"api_name": "pychrono.fea.ChMaterialShellANCF", "line_number": 120, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 120, "usage_type": "name"}, {"api_name": "pychrono.fea.ChElementShellANCF", "line_number": 128, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 128, "usage_type": "name"}, {"api_name": "pychrono.CH_C_DEG_TO_RAD", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pychrono.fea.ChVisualizationFEAmesh", "line_number": 146, "usage_type": "call"}, {"api_name": "pychrono.fea", "line_number": 146, "usage_type": "name"}, {"api_name": "pychrono.fea.ChVisualizationFEAmesh", "line_number": 147, "usage_type": "attribute"}, {"api_name": "pychrono.fea", "line_number": 147, "usage_type": "name"}, {"api_name": "pychrono.pardisomkl.ChSolverPardisoMKL", "line_number": 152, "usage_type": "call"}, {"api_name": "pychrono.pardisomkl", "line_number": 152, "usage_type": "name"}, {"api_name": "pychrono.ChTimestepperHHT", "line_number": 155, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.ChIrrApp", "line_number": 159, "usage_type": "call"}, {"api_name": "pychrono.irrlicht", "line_number": 159, "usage_type": "name"}, {"api_name": "pychrono.irrlicht.dimension2du", "line_number": 159, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.VerticalDir_Z", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pychrono.irrlicht.vector3df", "line_number": 162, "usage_type": "call"}, {"api_name": "pychrono.irrlicht", "line_number": 162, "usage_type": "name"}, {"api_name": "pychrono.irrlicht.drawSegment", "line_number": 183, "usage_type": "call"}, {"api_name": "pychrono.irrlicht", "line_number": 183, "usage_type": "name"}, {"api_name": "pychrono.ChVectorD", "line_number": 183, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.SColor", "line_number": 183, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.drawSegment", "line_number": 184, "usage_type": "call"}, {"api_name": "pychrono.irrlicht", "line_number": 184, "usage_type": "name"}, {"api_name": "pychrono.ChVectorD", "line_number": 184, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.SColor", "line_number": 184, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.drawSegment", "line_number": 185, "usage_type": "call"}, {"api_name": "pychrono.irrlicht", "line_number": 185, "usage_type": "name"}, {"api_name": "pychrono.ChVectorD", "line_number": 185, "usage_type": "call"}, {"api_name": "pychrono.irrlicht.SColor", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}]} +{"seq_id": "30242928739", "text": "import json\nimport requests\ndef post():\n dict_data={\"ename\":\"loo\",\"email\":\"p@mail.com\",\"eaddress\":\"ppm\",\"ephone_number\":123456,\n \"esalary\":50}\n dump_data=json.dumps(dict_data)\n response=requests.post(\"http://127.0.0.1:8000/register/singleurl/\",data=dump_data)\n print(response.status_code)\n print(response.json())\ndef getWith_id_or_none(id=None):\n data={}\n if id is not None:\n data={\"id\":id}\n response=requests.get(\"http://127.0.0.1:8000/register/singleurl/\",data=json.dumps(data))\n print(response.json())\n print(response.status_code)\ndef put(id):\n data={\"id\":id,\"ename\":\"ravi\",\"email\":\"h@hg.com\",\"eaddress\":\"tyj\",\"ephone_number\":123456}\n jason_dump=json.dumps(data)\n response=requests.put(\"http://127.0.0.1:8000/register/singleurl/\",data=jason_dump)\n print(response.json())\n print(response.status_code) \ndef delete(id):\n data={\"id\":id}\n response=requests.delete(\"http://127.0.0.1:8000/register/singleurl/\",data=json.dumps(data))\n print(response.json())\n print(response.status_code)\ndelete(1)", "repo_name": "sunilpedada/djangoAPI-normal-", "sub_path": "testpro/toTESTcrud.py", "file_name": "toTESTcrud.py", "file_ext": "py", "file_size_in_byte": 1058, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "json.dumps", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 25, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "8511548619", "text": "import psycopg2\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\ndef get_db_connection() -> psycopg2.extensions.connection:\n \"\"\"establishing connection with database\"\"\"\n conn = psycopg2.connect(\n database=\"test_database\",\n user=\"test_user\",\n password=\"test@123\",\n host=\"127.0.0.1\",\n port=\"5432\",\n )\n return conn\n\n\nSQL1 =\"\"\"\nPREPARE SQL (text) AS\nSELECT * FROM netbsd.feedbacks f WHERE f.confirmation_no=$1;\nEXECUTE SQL(%s);\n\"\"\"\n\n\nSQL2 = \"\"\"\nPREPARE SQL (text, bool, text, bool, text, bool, text) AS\nINSERT INTO netbsd.feedbacks VALUES($1, $2, $3, $4, $5, $6, $7);\nEXECUTE SQL(%s, %s, %s, %s, %s, %s, %s);\n\"\"\"\n\n\n@app.route('/')\ndef index() -> str:\n return render_template('index.html')\n\n\n@app.route('/validate', methods=['POST'])\ndef validate() -> str:\n fid=request.form['feed']\n conn = get_db_connection()\n cur = conn.cursor()\n cur.execute(SQL1,fid) ### type error\n identifier = cur.fetchall()\n cur.close()\n conn.close()\n if not identifier:\n return render_template('valid.html',fid=fid)\n return render_template('invalid.html',identifier=identifier)\n\n\n@app.route('/store/', methods=['POST'])\ndef store(fid: str) -> str:\n answer1=request.form[\"answer1\"]\n name=request.form[\"name\"]\n answer2=request.form[\"answer2\"]\n email=request.form[\"email\"]\n answer3=request.form[\"answer3\"]\n notification_email=request.form[\"email\"]\n conn = get_db_connection()\n cur = conn.cursor()\n cur.execute(SQL2,\n (\n fid,\n answer1,\n name,\n answer2,\n email,\n answer3,\n notification_email\n )\n )\n cur.close()\n conn.commit()\n conn.close()\n return render_template('thank_you.html')\n", "repo_name": "VivekKSah/NetBSD_project_2022", "sub_path": "feedback_site/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "psycopg2.extensions", "line_number": 6, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "23135946169", "text": "import requests\nfrom lxml import etree\nfrom hashlib import md5\n\nurl = 'http://www.biquge.info/32_32050/'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'}\n\nhtml = requests.get(url=url, headers=headers).content.decode('utf-8', 'ignore')\n\np = etree.HTML(html)\nr_list = p.xpath('//div[@id=\"list\"]/dl//dd[1]/a/@href')\nprint(r_list)\nfor href in r_list:\n two_url = 'http://www.biquge.info/32_32050/' + href\n print(two_url)\n two_html=requests.get(url=two_url, headers=headers).content.decode('utf-8', 'ignore')\n new_p = etree.HTML(two_html)\n story_info = new_p.xpath('//div[@id=\"content\"]/text()')\n for item in story_info:\n print(item)\n", "repo_name": "sjk052026/test2020", "sub_path": "spider/day05/XiaoshuoSpider.py", "file_name": "XiaoshuoSpider.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 11, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 11, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 18, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "5375266859", "text": "import os, re, json, sys\n\nfile_pattern = re.compile(r'^sw\\_([0-9]+?)\\_([0-9]+?)\\.utt\\.txt$')\nline_pattern = re.compile(r'^(.*?)\\t(.*?)\\t(.*?)$')\n\ntmp_da = {'A': None, 'B': None}\n\ndef preprocess(dir_path, filename):\n with open(os.path.join(dir_path, filename), 'r') as f, \\\n open(os.path.join('./data/corpus/', filename[:-7] + 'jsonlines'), 'w') as out_f:\n data = f.read().split('\\n')\n prev_caller = None\n das = []\n sentences = []\n\n for line in data:\n m = line_pattern.search(line)\n if not m is None:\n current_caller = m.group(1)\n if m.group(2) == '+':\n da = tmp_da[current_caller]\n else:\n da = m.group(2)\n tmp_da[current_caller] = da\n assert da is not None, filename\n if current_caller == prev_caller:\n das.append(da)\n sentences.append(m.group(3))\n else:\n if len(das) > 0 and len(sentences) > 0:\n out_f.write(json.dumps({'caller': prev_caller,\n 'DA': das,\n 'sentence': sentences}))\n out_f.write('\\n')\n das = [da]\n sentences = [m.group(3)]\n prev_caller = current_caller\n\ndef FileIter():\n for i in range(14):\n dir_path = os.path.join('./data/swda', 'sw{:02}utt'.format(i))\n print('preprocessing in {}'.format(dir_path))\n files = [f for f in os.listdir(dir_path) if file_pattern.match(f)]\n for i, filename in enumerate(files, 1):\n preprocess(dir_path, filename)\n print('\\rFinish preprocess {}/{} files'.format(i, len(files)), end='')\n print()\n\ndef test1file():\n preprocess('./data/swda/sw00utt', 'sw_0002_4330.utt.txt')\n\nif __name__ == '__main__':\n FileIter()", "repo_name": "ananyaganesh/ftmp", "sub_path": "preprocess.py", "file_name": "preprocess.py", "file_ext": "py", "file_size_in_byte": 1990, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.compile", "line_number": 3, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "36901816613", "text": "import argparse\nimport logging\nimport os\nimport sys\nimport textwrap\n\nfrom . import eval, install\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n\n # Add the current working directory to sys.path, similar to python's\n # unittesting frameworks.\n sys.path.insert(0, os.getcwd())\n\n if args is None:\n args = sys.argv[1:]\n\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n \"--verbose\",\n \"-v\",\n default=False,\n action=\"store_true\",\n help=\"Include additional details, including full stack traces on errors.\",\n )\n\n parser = argparse.ArgumentParser(\n description=textwrap.dedent(\n \"\"\"braintrust is a cli tool to work with Braintrust.\n To see help for a specific subcommand, run `braintrust --help`,\n e.g. `braintrust eval --help`\"\"\"\n )\n )\n subparsers = parser.add_subparsers(help=\"sub-command help\", dest=\"subcommand\", required=True)\n\n for module in [eval, install]:\n module.build_parser(subparsers, parent_parser)\n\n args = parser.parse_args(args=args)\n level = logging.DEBUG if args.verbose else logging.INFO\n logging.basicConfig(format=\"%(asctime)s %(levelname)s [%(name)s]: %(message)s\", level=level)\n\n return args.func(args)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "repo_name": "braintrustdata/braintrust-sdk", "sub_path": "py/src/braintrust/cli/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sys.path.insert", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.INFO", "line_number": 42, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "42786474139", "text": "from django.shortcuts import render\nfrom .parsing import record_db\nimport sqlite3\nfrom datetime import datetime\n\n\n#@cache_page(100)\ndef currencies(request):\n #record_db()\n content = get_currencies()\n last = len(content) - 26\n return render(request, 'base.html', {'content': content[last:]})\n\n\ndef get_currencies():\n date_now = str(datetime.now())[:10]\n conn = sqlite3.connect('db.sqlite3')\n cursor = conn.cursor()\n cursor.execute(f'SELECT country, unit, course, date FROM courses WHERE date LIKE \"{date_now}%\"')\n content = cursor.fetchall()\n conn.close()\n return content\n", "repo_name": "Andrei-Red/parsing-currencies", "sub_path": "parsing_currencies/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 604, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "13175215069", "text": "# Import relevant Python modules\nimport operator\nimport math\nimport random\nimport numpy as np\nfrom matplotlib import pyplot\n\n# Import DEAP modules\nfrom deap import algorithms\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom deap import gp\n\n# ====================================================================\n# PARAMETERS - change the parameters in this section\n# Population size: 500 vs 2000\n# Tournament Size: 2 vs 5\n# repeat at least 10 times per setting\n\nno_generations = 75 # number of generations\nno_population = 1000 # population size\nno_tournaments = 5 # tournament size\n\n# other parameters that you can change and explore\np_xo = 0.7 # XO rate\np_m = 0.3 # Mutation rate\nUseSqError = True # use Least Squares approach \n\n# ====================================================================\n# Define your Problems/Target Functions and create sample data\n# Assume you make measurments at specific test points:\n\n# test_points = np.linspace(-math.pi,math.pi, 65).tolist()\ntest_points = [2, 3, 4, 5, 6, 7]\n## Defining the functions. ##\n#measurement = lambda x: x**6-2*x**4-13*x**2 # Function 1\n#measurement = lambda x: math.sin(math.pi/4 + 3*x) # Function 2\n\n# ====================================================================\n\ntarget = [1606, 2398, 3399, 4277, 4956, 5752]\nmeasurement = lambda idx, x: target[idx] - x\n#target = np.empty(len(test_points))\n#for i in range(len(test_points)): target[i] = measurement(test_points[i])\n\nfig, ax = pyplot.subplots(figsize=(15,4))\nax.scatter(test_points, target)\nax.set_xlabel('Test points')\nax.set_ylabel('Measurements')\nax.set_title('Data set')\npyplot.show()\n\n# Define new functions\ndef protectedDiv(left, right):\n return left / right if right else 1 \n \n# create Primitive set & classes \nif \"pset\" not in globals():\n pset = gp.PrimitiveSet(\"MAIN\", 1)\n pset.addPrimitive(operator.add, 2)\n pset.addPrimitive(operator.sub, 2)\n pset.addPrimitive(operator.mul, 2)\n pset.addPrimitive(protectedDiv, 2)\n pset.addPrimitive(operator.neg, 1)\n pset.addPrimitive(math.cos, 1)\n pset.addPrimitive(math.sin, 1)\n pset.addTerminal(1)\n pset.addTerminal(-1) \n pset.addEphemeralConstant(\"rand101\", lambda: random.randint(-1,1))\n pset.renameArguments(ARG0='x')\n\n creator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,))\n creator.create(\"Individual\", gp.PrimitiveTree, fitness=creator.FitnessMin)\n \ntoolbox = base.Toolbox()\ntoolbox.register(\"expr\", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)\ntoolbox.register(\"individual\", tools.initIterate, creator.Individual, toolbox.expr)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\ntoolbox.register(\"compile\", gp.compile, pset=pset)\n\n\ndef evalSymbReg(individual):\n # Transform the tree expression in a callable function\n func = toolbox.compile(expr=individual)\n\n if UseSqError:\n # squared error\n error = (abs(func(x) - measurement(idx, x))**2 for idx, x in enumerate(test_points)) \n else:\n # Absolute distance between target curve and solution\n error = (abs(func(x) - measurement(idx, x)) for idx, x in enumerate(test_points)) \n\n return math.fsum(error)/len(test_points),\n\n\ntoolbox.register(\"evaluate\", evalSymbReg)\ntoolbox.register(\"select\", tools.selTournament, tournsize=no_tournaments)\ntoolbox.register(\"mate\", gp.cxOnePoint)\ntoolbox.register(\"expr_mut\", gp.genFull, min_=0, max_=2)\ntoolbox.register(\"mutate\", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)\n\ntoolbox.decorate(\"mate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=64))\ntoolbox.decorate(\"mutate\", gp.staticLimit(key=operator.attrgetter(\"height\"), max_value=64))\n\nrandom.seed()\n\npop = toolbox.population(n=no_population)\nhof = tools.HallOfFame(1)\n\nstats_fit = tools.Statistics(lambda ind: ind.fitness.values)\nstats_size = tools.Statistics(len)\nmstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)\nmstats.register(\"mdn\", np.median)\nmstats.register(\"avg\", np.mean)\nmstats.register(\"std\", np.std)\nmstats.register(\"min\", np.min)\nmstats.register(\"max\", np.max)\n\npop, log = algorithms.eaSimple(pop, toolbox, p_xo, p_m, no_generations, stats=mstats, halloffame=hof, verbose=True)\n\n# Plot Fitness and Size\nx = np.arange(0, no_generations+1)\ns = log.chapters['size'].select(\"mdn\")\nf = log.chapters['fitness'].select(\"mdn\")\n\nfig, ax = pyplot.subplots()\nax.plot(x, f/max(f), 'k--', label='Fitness')\nax.plot(x, s/max(s), 'k:', label='Size')\nax.set_xlabel('Generations')\nax.set_ylabel('Normalised Fitness/Size')\nax.set_title('Median')\nlegend = ax.legend(shadow=True, fontsize='x-large')\nprint('Fitnes: [' + str(min(f))+', '+str(max(f))+']')\nprint('Size: [' + str(min(s))+', '+str(max(s))+']')\nprint('Evaluations: ' +str(sum(log.select(\"nevals\"))))\n\npyplot.show()\n\n# Best individual \nprint(hof[0])\n\n# Plot comparison Tagret vs. evolved solution\n\nx = test_points + [8]\nf = toolbox.compile(expr=hof[0])\n\ny = np.empty(len(x))\nfor i in range(len(x)): y[i] = f(x[i])\n\nprint(x)\nprint(y)\n\ntarget2 = target + [0]\n\nfig, ax = pyplot.subplots()\nax.plot(x, y, 'r-', label='Best Solution')\nax.plot(x, target2, 'k-', label='Target func')\n#legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')\nlegend = ax.legend(shadow=True, fontsize='x-large')\n\npyplot.show()\n\nprint(\"8 Users\")\nprint(f(8))\nprint(\"200 Users\")\nprint(f(200))\n", "repo_name": "EB1811/randomPythonScripts", "sub_path": "GeneticRegression.py", "file_name": "GeneticRegression.py", "file_ext": "py", "file_size_in_byte": 5350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "deap.gp.PrimitiveSet", "line_number": 60, "usage_type": "call"}, {"api_name": "deap.gp", "line_number": 60, "usage_type": "name"}, {"api_name": "operator.add", "line_number": 61, "usage_type": "attribute"}, {"api_name": "operator.sub", "line_number": 62, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 63, "usage_type": "attribute"}, {"api_name": "operator.neg", "line_number": 65, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 66, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 67, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 70, "usage_type": "call"}, {"api_name": "deap.creator.create", "line_number": 73, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 73, "usage_type": "name"}, {"api_name": "deap.base.Fitness", "line_number": 73, "usage_type": "attribute"}, {"api_name": "deap.base", "line_number": 73, "usage_type": "name"}, {"api_name": "deap.creator.create", "line_number": 74, "usage_type": "call"}, {"api_name": "deap.creator", "line_number": 74, "usage_type": "name"}, {"api_name": "deap.gp.PrimitiveTree", "line_number": 74, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 74, "usage_type": "name"}, {"api_name": "deap.creator.FitnessMin", "line_number": 74, "usage_type": "attribute"}, {"api_name": "deap.base.Toolbox", "line_number": 76, "usage_type": "call"}, {"api_name": "deap.base", "line_number": 76, "usage_type": "name"}, {"api_name": "deap.gp.genHalfAndHalf", "line_number": 77, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 77, "usage_type": "name"}, {"api_name": "deap.tools.initIterate", "line_number": 78, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 78, "usage_type": "name"}, {"api_name": "deap.creator.Individual", "line_number": 78, "usage_type": "attribute"}, {"api_name": "deap.creator", "line_number": 78, "usage_type": "name"}, {"api_name": "deap.tools.initRepeat", "line_number": 79, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 79, "usage_type": "name"}, {"api_name": "deap.gp.compile", "line_number": 80, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 80, "usage_type": "name"}, {"api_name": "math.fsum", "line_number": 94, "usage_type": "call"}, {"api_name": "deap.tools.selTournament", "line_number": 98, "usage_type": "attribute"}, {"api_name": "deap.tools", "line_number": 98, "usage_type": "name"}, {"api_name": "deap.gp.cxOnePoint", "line_number": 99, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 99, "usage_type": "name"}, {"api_name": "deap.gp.genFull", "line_number": 100, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 100, "usage_type": "name"}, {"api_name": "deap.gp.mutUniform", "line_number": 101, "usage_type": "attribute"}, {"api_name": "deap.gp", "line_number": 101, "usage_type": "name"}, {"api_name": "deap.gp.staticLimit", "line_number": 103, "usage_type": "call"}, {"api_name": "deap.gp", "line_number": 103, "usage_type": "name"}, {"api_name": "operator.attrgetter", "line_number": 103, "usage_type": "call"}, {"api_name": "deap.gp.staticLimit", "line_number": 104, "usage_type": "call"}, {"api_name": "deap.gp", "line_number": 104, "usage_type": "name"}, {"api_name": "operator.attrgetter", "line_number": 104, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 106, "usage_type": "call"}, {"api_name": "deap.tools.HallOfFame", "line_number": 109, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 109, "usage_type": "name"}, {"api_name": "deap.tools.Statistics", "line_number": 111, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 111, "usage_type": "name"}, {"api_name": "deap.tools.Statistics", "line_number": 112, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 112, "usage_type": "name"}, {"api_name": "deap.tools.MultiStatistics", "line_number": 113, "usage_type": "call"}, {"api_name": "deap.tools", "line_number": 113, "usage_type": "name"}, {"api_name": "numpy.median", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.std", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 118, "usage_type": "attribute"}, {"api_name": "deap.algorithms.eaSimple", "line_number": 120, "usage_type": "call"}, {"api_name": "deap.algorithms", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "23375592890", "text": "from Project.Classes.MotorChain import MotorChain\nfrom Project.Domain import MaterialsDomain\nimport math\nfrom scipy import optimize\n\nACCEPTABLE_ERROR = 0.0001\n\nclass ThickVessel (MotorChain):\n def calculateCircumferentialStress (self):\n p = self.workPressure\n ri = self.internalRadius\n re = self.internalRadius + self.thickness\n self.circumferentialStress = p * ((ri ** 2) / (re ** 2 - ri ** 2 )) \\\n * (1 + (re ** 2)/(ri ** 2))\n\n def calculateHeatCircumferentialStress (self):\n E = self.material['elasticityModule']\n a = self.material['thermalExpansioCoeficient']\n v = self.material['poissonRatio']\n ri = self.internalRadius\n re = ri + self.thickness\n dT = self.additionalHeatStress.temperatureVariation\n self.additionalHeatStress.circumferentialStress = (E*a*dT)/(2*(1-v)) \\\n * ((2*(re/ri) ** 2)/(-1 + (re/ri) ** 2) - (1/math.log(re/ri)))\n\n def calculateMaxRadialStress (self):\n p = self.workPressure\n ri = self.internalRadius\n re = self.internalRadius + self.thickness\n self.radialStress = p * ((ri ** 2) / (re ** 2 - ri ** 2 )) \\\n * (1 - (re ** 2)/(ri ** 2))\n\n def calculateHeatMaxRadialStress (self):\n self.additionalHeatStress.radialStress = 0\n\n\n def calculateLongitudinalStress (self):\n p = self.workPressure\n ri = self.internalRadius\n re = self.internalRadius + self.thickness\n self.longitudinalStress = (p * ri ** 2)/((re ** 2)- (ri ** 2))\n\n def calculateHeatLongitudinalStress (self):\n E = self.material['elasticityModule']\n a = self.material['thermalExpansioCoeficient']\n v = self.material['poissonRatio']\n ri = self.internalRadius\n re = ri + self.thickness\n dT = self.additionalHeatStress.temperatureVariation\n self.additionalHeatStress.longitudinalStress = (E*a*dT)/(2*(1-v)) \\\n * ((2*(re/ri) ** 2)/(-1 + (re/ri) ** 2) - (1/math.log(re/ri)))\n\n def calculateThickness (self):\n radiusRatio = optimize.newton(self.calculateVonMissesByRadiusRatio,1.1)\n self.thickness = (radiusRatio * self.internalRadius) - self.internalRadius\n\n def calculateThicknessWithHeatAddition (self):\n radiusRatio = optimize.newton(self.calculateVonMissesByRadiusRatioWithHeatAddition, 1.1)\n self.thickness = (radiusRatio * self.internalRadius) - self.internalRadius\n\n def calculatePrincipalStresses (self):\n self.calculateCircumferentialStress()\n self.calculateLongitudinalStress()\n self.calculateMaxRadialStress()\n self.calculateNozzleReinforcementThickness()\n if self.hasAditionalHeatStress:\n self.calculateAdditionHeatStress()\n self.calculateVonMisesStress()\n \n def calculateVonMissesByRadiusRatio(self, x):\n p = self.workPressure \n sigma_e = self.material['yeldStrength']/1.5\n return ((math.sqrt(3) * p * (x**2))/((x**2) -1)) - sigma_e\n\n def calculateVonMissesByRadiusRatioWithHeatAddition(self, x):\n p = self.workPressure \n sigma_e = self.material['yeldStrength']/1.5\n S = self.getThermicalS()\n f1 = ((x**4)*(3*p**2 + 6*p*S + 4*S**2))/((x**2 -1)**2)\n f2 = (S*(x**2)*(4*S + 3*p))/((x**2 -1)*math.log(x))\n f3 = (S**2)/(math.log(x)**2)\n return math.sqrt(f1 - f2 + f3) - sigma_e\n\n\n @classmethod\n def motorChainSMCalculation(cls, motorChain):\n newMotorChain = cls(motorChain)\n material = material = \\\n MaterialsDomain.Materials.getMaterialById(newMotorChain.materialId)\n newMotorChain.material = material\n newMotorChain.calculatePrincipalStresses()\n newMotorChain.calculateSM()\n return newMotorChain\n \n\n @classmethod\n def motorChainThicknessCalculation(cls, motorChain):\n newMotorChain = cls(motorChain)\n material = material = \\\n MaterialsDomain.Materials.getMaterialById(newMotorChain.materialId)\n newMotorChain.material = material\n if newMotorChain.hasAditionalHeatStress:\n newMotorChain.calculateThicknessWithHeatAddition()\n else:\n newMotorChain.calculateThickness()\n return newMotorChain", "repo_name": "samuelrcm2/POCmf", "sub_path": "POCmf/Project/Classes/ThickVessel.py", "file_name": "ThickVessel.py", "file_ext": "py", "file_size_in_byte": 4241, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "Project.Classes.MotorChain.MotorChain", "line_number": 8, "usage_type": "name"}, {"api_name": "math.log", "line_number": 24, "usage_type": "call"}, {"api_name": "math.log", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.optimize.newton", "line_number": 54, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 54, "usage_type": "name"}, {"api_name": "scipy.optimize.newton", "line_number": 58, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 58, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 73, "usage_type": "call"}, {"api_name": "math.log", "line_number": 80, "usage_type": "call"}, {"api_name": "math.log", "line_number": 81, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 82, "usage_type": "call"}, {"api_name": "Project.Domain.MaterialsDomain.Materials.getMaterialById", "line_number": 89, "usage_type": "call"}, {"api_name": "Project.Domain.MaterialsDomain.Materials", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Project.Domain.MaterialsDomain", "line_number": 89, "usage_type": "name"}, {"api_name": "Project.Domain.MaterialsDomain.Materials.getMaterialById", "line_number": 100, "usage_type": "call"}, {"api_name": "Project.Domain.MaterialsDomain.Materials", "line_number": 100, "usage_type": "attribute"}, {"api_name": "Project.Domain.MaterialsDomain", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "28046750987", "text": "from enum import unique\r\nfrom turtle import update\r\nfrom LoginPage import *\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtGui import *\r\nfrom PySide2.QtWidgets import *\r\nfrom PySide2.QtCore import QTimer\r\nfrom PySide2.QtWidgets import QLCDNumber\r\nimport pandas as pd\r\nimport json\r\n\r\n\r\nclass Ui_widget_game(object):\r\n def setupUi(self, widget_game):\r\n if not widget_game.objectName():\r\n widget_game.setObjectName(u\"widget_game\")\r\n widget_game.resize(483, 550)\r\n self.frame_game = QFrame(widget_game)\r\n self.frame_game.setObjectName(u\"frame_game\")\r\n self.frame_game.setGeometry(QRect(10, 10, 461, 531))\r\n self.frame_game.setStyleSheet(u\"background-color: rgb(35, 35, 35);\\n\"\r\n\"font: 75 10pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"border-radius:20px;\")\r\n self.frame_game.setFrameShape(QFrame.StyledPanel)\r\n self.frame_game.setFrameShadow(QFrame.Raised)\r\n self.label_card = QLabel(self.frame_game)\r\n self.label_card.setObjectName(u\"label_card\")\r\n\r\n self.label_card.setGeometry(QRect(50, 120, 361, 251))\r\n self.label_card.setStyleSheet(u\"color:black;\\n\"\r\n\"background-color: rgb(83, 83, 83);\\n\"\r\n\"font: 75 20pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"\\n\"\r\n\"\")\r\n self.label_language = QLabel(self.frame_game)\r\n self.label_language.setObjectName(u\"label_language\")\r\n self.label_language.setGeometry(QRect(190, 70, 61, 41))\r\n self.label_language.setStyleSheet(u\"color:white;\\n\"\r\n\"font: 75 11pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"\\n\"\r\n\"\")\r\n self.button_ok = QPushButton(self.frame_game)\r\n self.button_ok.setObjectName(u\"button_ok\")\r\n self.button_ok.setGeometry(QRect(60, 390, 161, 81))\r\n self.button_ok.setStyleSheet(u\"background-color: white;\\n\"\r\n\"color: rgb(0, 0, 0);\\n\"\r\n\"font: 75 12pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"border:none;\\n\"\r\n\"border-radius:20px;\\n\"\r\n\"\")\r\n self.button_cancel = QPushButton(self.frame_game)\r\n self.button_cancel.setObjectName(u\"button_cancel\")\r\n self.button_cancel.setGeometry(QRect(240, 390, 161, 81))\r\n self.button_cancel.setStyleSheet(u\"background-color:white;\\n\"\r\n\"font: 75 12pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"color: rgb(0, 0, 0);\\n\"\r\n\"border:none;\\n\"\r\n\"border-radius:20px;\\n\"\r\n\"\")\r\n self.label_time = QLabel(self.frame_game)\r\n self.label_time.setObjectName(u\"label_language\")\r\n self.label_time.setGeometry(QRect(367, 20, 71, 22))\r\n self.label_time.setStyleSheet(u\"color:white;\\n\"\r\n\"font: 75 11pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"\\n\"\r\n\"\")\r\n self.button_back = QPushButton(self.frame_game)\r\n self.button_back.setObjectName(u\"button_back\")\r\n self.button_back.setGeometry(QRect(50, 20, 111, 41))\r\n self.button_back.setStyleSheet(u\"QPushButton{background-color: rgb(58, 58, 58);\\n\"\r\n\"color:white;\\n\"\r\n\"border:none;\\n\"\r\n\"border-radius:20px;}\\n\"\r\n\"QPushButton::hover{\\n\"\r\n\"color:red;}\")\r\n self.label_check = QLabel(self.frame_game)\r\n self.label_check.setObjectName(u\"label_check\")\r\n self.label_check.setGeometry(QRect(350, 130,60, 21))\r\n self.label_check.setStyleSheet(u\"background-color: rgb(83, 83, 83);\\n\"\r\n\"font: 75 12pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"border:none;\\n\"\r\n\"border-radius:20px;\")\r\n\r\n self.label_word = QLabel(self.frame_game)\r\n self.label_word.setObjectName(u\"label_word\")\r\n self.label_word.setGeometry(QRect(190, 230, 70, 30))\r\n self.label_word.setStyleSheet(u\"background-color: rgb(83, 83, 83);\\n\"\r\n\"font: 75 12pt \\\"Yu Gothic UI\\\";\\n\"\r\n\"border:none;\\n\"\r\n\"border-radius:20px;\")\r\n\r\n self.retranslateUi(widget_game)\r\n\r\n QMetaObject.connectSlotsByName(widget_game)\r\n # setupUi\r\n\r\n def retranslateUi(self, widget_game):\r\n widget_game.setWindowTitle(QCoreApplication.translate(\"widget_game\", u\"FLASHCARDS FOR DUTCH LEARNING\", None))\r\n self.label_card.setText(\"\")\r\n self.label_language.setText(QCoreApplication.translate(\"widget_game\", u\"DUTCH\", None))\r\n self.button_ok.setText(QCoreApplication.translate(\"widget_game\", u\"OK\", None))\r\n self.button_cancel.setText(QCoreApplication.translate(\"widget_game\", u\"CANCEL\", None))\r\n #self.timeEdit.setDisplayFormat(QCoreApplication.translate(\"widget_game\", u\"mm:ss\", None))\r\n self.label_time.setText(QCoreApplication.translate(\"widget_game\", u\"TIME\", None))\r\n self.button_back.setText(QCoreApplication.translate(\"widget_game\", u\"BACK\", None))\r\n#if QT_CONFIG(whatsthis)\r\n self.label_check.setWhatsThis(QCoreApplication.translate(\"widget_game\", u\"


\", None))\r\n#endif // QT_CONFIG(whatsthis)\r\n self.label_check.setText(QCoreApplication.translate(\"widget_game\", u\"0/20\", None))\r\n self.label_word.setText(QCoreApplication.translate(\"widget_game\", u\"Vocabulary\", None))\r\n # retranslateUi \r\n self.label_word.setText(\"START!\")\r\n \r\n \r\nclass Get_GamePage(QWidget, Ui_widget_game):\r\n def __init__(self):\r\n super(Get_GamePage,self).__init__()\r\n self.setupUi(self)\r\n #buttons functions\r\n self.button_ok.clicked.connect(self.my_word)\r\n self.button_cancel.clicked.connect(self.my_canceled_word)\r\n #dataframe of the cards\r\n self.df=pd.read_json(\"C:/Users/conko/OneDrive/Desktop/FlashCards/FlashCards_Team1/translatedCards.json\")\r\n\r\n #get the current username\r\n my_login_object=Login_Page()\r\n self.username=my_login_object.username\r\n\r\n #get the current level of player\r\n url=\"C:/Users/conko/OneDrive/Desktop/FlashCards/FlashCards_Team1/Loginsystem.json\"\r\n with open(url, 'r+') as f:\r\n data = json.load(f)\r\n res=list(filter(lambda x: x[\"username\"] == self.username, data))\r\n self.df_level=res[0]['level']\r\n \r\n #create vocabulary lists\r\n self.woorden_lijst=self.df[self.df['Unnamed: 0']==f'Level{self.df_level}']['Nederlands']\r\n self.vocab_list=self.df[self.df['Unnamed: 0']==f'Level{self.df_level}']['English']\r\n print(self.woorden_lijst )\r\n\r\n #it adjust the counter according to length of the list \r\n c=self.df[self.df['Unnamed: 0']==f'Level{self.df_level}']['English'].index \r\n self.counter=c[0]\r\n\r\n #unknown vocaabulary\r\n self.unknown_words=[]\r\n\r\n #white button sign\r\n self.flag=True\r\n\r\n #other functions \r\n def update_background(self):\r\n #it makes white buttons colorful\r\n if self.flag:\r\n\r\n self.button_ok.setStyleSheet(\"background-color: white;\")\r\n self.button_cancel.setStyleSheet(\"background-color: white;\")\r\n else: \r\n self.button_ok.setStyleSheet(\"background-color: green ;\")\r\n self.button_cancel.setStyleSheet(\"background-color: red ;\")\r\n \r\n self.flag = not self.flag \r\n\r\n \r\n def configureButton(self, button, begin, duration):\r\n\r\n end = begin.addSecs(duration)#duration: configuration total duration\r\n \r\n now = QTime.currentTime()\r\n button.setEnabled(begin <= now <= end)\r\n\r\n #This part works after 3 secs are finished\r\n if now < begin: \r\n self.update_background()\r\n\r\n #Set enable button_ok and button_cancel\r\n QTimer.singleShot(\r\n now.msecsTo(begin), lambda: button.setEnabled(True))\r\n\r\n #activate Engels \r\n my_word=self.df[self.df['Unnamed: 0']==f'Level{self.df_level}']['English'][self.counter] \r\n QTimer.singleShot(\r\n now.msecsTo(begin),lambda: self.label_word.setText(my_word))\r\n\r\n #change Language Title to English\r\n change_title=\"ENGLISH\"\r\n QTimer.singleShot(\r\n now.msecsTo(begin),lambda: self.label_language.setText(change_title)) \r\n\r\n #This part works when 3 sec begins again / deactivate butons \r\n if now < end:\r\n self.update_background()\r\n QTimer.singleShot(\r\n now.msecsTo(end), lambda: button.setEnabled(False))\r\n \r\n\r\n def activateButton(self):\r\n #set 3 seconds to activate button_ok \r\n begin = QTime.currentTime().addSecs(1)#delay the begin time 3 sec\r\n self.configureButton(self.button_ok, begin, 120)#every activating has 120 sec total duration to keep in activate\r\n #set 3 seconds to activate button_cancel\r\n begin = QTime.currentTime().addSecs(1)\r\n self.configureButton(self.button_cancel, begin, 120)\r\n #change language title to Dutch\r\n change_title=\"DUTCH\"\r\n self.label_language.setText(change_title)\r\n print(\"activitate button calisti\")\r\n \r\n def flashcard(self): \r\n\r\n try: \r\n #activate the buttons with timer\r\n self.activateButton()\r\n \r\n #test \r\n print(self.woorden_lijst[self.counter]) \r\n \r\n #run the game/print word on the screen\r\n my_word=self.woorden_lijst[self.counter]\r\n self.label_word.setText(my_word)\r\n\r\n #check label update etme\r\n check_lbl=f\"{self.counter}\"+f\"/{self.woorden_lijst.index[19]}\"\r\n self.label_check.setText(check_lbl)\r\n\r\n #update counter\r\n self.counter +=1\r\n\r\n except KeyError:\r\n #Later we can add a back button\r\n print(\"Key Error Olustu\")\r\n self.label_word.setText(\"KLAAR\")\r\n print(self.unknown_words)\r\n #self.update_level()\r\n\r\n #create a new list \r\n def my_canceled_word(self):\r\n print(\"CANCEL CLICKED\")\r\n self.flashcard() \r\n self.unknown_words.append(self.df[self.df['Unnamed: 0']==f'Level{self.df_level}']['Nederlands'][self.counter-2])\r\n \r\n #update level in the json file\r\n def update_level(self):\r\n #set the current level to the json file\r\n url=\"C:/Users/conko/OneDrive/Desktop/FlashCards/FlashCards_Team1/Loginsystem.json\"\r\n with open(url, 'r+') as f:\r\n data = json.load(f)\r\n data[0]['level'] = self.df_level+1 \r\n f.seek(0) \r\n json.dump(data, f )\r\n f.truncate() \r\n\r\n #game works with this method\r\n def my_word(self):\r\n #call the flashcard function\r\n self.flashcard() \r\n\r\n #update list(unknown vocabulary list)\r\n def last_list(self):\r\n pass\r\n #update counter \r\n def last_counter(self):\r\n pass \r\n\r\n\r\ndef main():\r\n import sys\r\n app = QApplication(sys.argv)\r\n get_GamePage= Get_GamePage()\r\n get_GamePage.show()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n ", "repo_name": "emretmz/FlashCards", "sub_path": "FlashCards_Team1/src/GamePage.py", "file_name": "GamePage.py", "file_ext": "py", "file_size_in_byte": 11922, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.read_json", "line_number": 123, "usage_type": "call"}, {"api_name": "json.load", "line_number": 132, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QTimer.singleShot", "line_number": 177, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 177, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QTimer.singleShot", "line_number": 182, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 182, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QTimer.singleShot", "line_number": 187, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 187, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QTimer.singleShot", "line_number": 193, "usage_type": "call"}, {"api_name": "PySide2.QtCore.QTimer", "line_number": 193, "usage_type": "name"}, {"api_name": "json.load", "line_number": 247, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 250, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 268, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "13991963631", "text": "from django.conf import settings\n\nfrom .compat import get_model\n\ntry:\n from django.contrib.auth import get_user_model\n UserModel = get_user_model\nexcept ImportError:\n UserModel = lambda: get_model('auth', 'User')\n\n\ndef UserModelString():\n try:\n return settings.AUTH_USER_MODEL\n except AttributeError:\n return 'auth.User'\n\n\ndef UsernameField():\n return getattr(UserModel(), 'USERNAME_FIELD', 'username')\n", "repo_name": "ejakait/bitkoin", "sub_path": "bitkoin/django-registration-master/registration/users.py", "file_name": "users.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 7, "usage_type": "name"}, {"api_name": "compat.get_model", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "26208222068", "text": "\"\"\"\nAbstract classes and wrappers for LLMs, chatbots, and prompts.\n\"\"\"\n\n# TODO Make all prompts to text completion be strings, so people can fill prompts in. Or support both if need be...\n# TODO Add tests, etc... Make sure everything still works.\n\nimport requests\nimport json\nimport re\n\n# Imports for external APIs\nimport openai\nimport cohere\n\n# Hugging Face and PyTorch imports\nfrom transformers import pipeline\nimport torch\n\ndef _clean_messages_to_prompt(messages):\n \"\"\"\n Converts an array of messages in the form {\"role\": , \"content\":} into a String.\n\n This is influened by the OpenAI chat completion API.\n \"\"\"\n out_text = \"\\n\".join([f\"{str(m['role'])}: {str(m['content'])}\" for m in messages])\n return out_text\n\ndef _get_stop_sequences_from_messages(messages):\n \"\"\"\n Generetes a list of strings of stop sequences from an array of messages in the form {\"role\": , \"content\":}.\n \"\"\"\n roles = set()\n for m in messages:\n roles.add(m[\"role\"])\n stop_sequences = [f\"\\n{r}:\" for r in roles]\n return stop_sequences\n\nclass LanguageModelWrapper():\n \"\"\"\n Abstract wrapper for large language models.\n \"\"\"\n\n def __init__(self):\n pass\n\n def __repr__(self):\n pass\n\n def complete_chat(self, messages):\n \"\"\"\n Takes an array of messages in the form {\"role\": , \"content\":} and generate a response.\n\n This is influened by the OpenAI chat completion API.\n \"\"\"\n pass\n\n def text_completion(self, prompt, stop_sequences=[]):\n \"\"\"\n Standardizes text completion for large language models.\n \"\"\"\n pass\n\nclass Prompt():\n \"\"\"\n Prompts are used to generate text completions. Prompts can be simple Strings. They can also include variables surrounded by curly braces. For example:\n Hello {name}!\n In this case, 'name' can be filled using the fill_prompts() function. This makes it easier to loop through prompts that follow a specific pattern or structure.\n \"\"\"\n\n def __init__(self, prompt):\n self.prompt = prompt\n\n def __repr__(self):\n return self.prompt\n\n def get_prompt(self):\n \"\"\"\n Return the raw prompt command (i.e., does not fill in variables.)\n \"\"\"\n return self.prompt\n \n def fill_prompts(self, **kwargs):\n \"\"\"\n Return a prompt with variables filled in.\n \"\"\"\n pattern = r'\\{\\s*[a-zA-Z0-9_]+\\s*\\}'\n matches = re.findall(pattern, self.prompt)\n new_prompt = self.prompt\n for m in matches:\n keyword = m.replace(\"{\", \"\").replace(\"}\", \"\").strip()\n if keyword in kwargs:\n new_prompt = new_prompt.replace(m, kwargs[keyword])\n return new_prompt\n\nclass BloomWrapper():\n \"\"\"\n Wrapper for Hugging Face's BLOOM model. Requires access to Hugging Face's inference API.\n \"\"\"\n def __init__(self, apikey):\n self.apikey = apikey\n\n def __repr__(self):\n return f\"BloomWrapper()\"\n\n def complete_chat(self, messages, append_role=None):\n \"\"\"\n Mimicks a chat scenario with BLOOM, via a list of {\"role\": , \"content\":} objects.\n \"\"\"\n\n prompt_preamble = \"You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\\n\"\n prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}:\"\n\n API_URL = \"https://api-inference.huggingface.co/models/bigscience/bloom\"\n headers = {\"Authorization\": f\"Bearer {self.apikey}\"}\n\n response = requests.post(API_URL, headers=headers, json={\"inputs\": prompt_text}).json()\n\n all_text = response[0]['generated_text']\n new_text = all_text[len(prompt_text):]\n\n # We only return the first line of text.\n newline_location = new_text.find(\"\\n\") \n if newline_location > 0:\n new_text = new_text[:newline_location]\n\n return new_text\n\n def text_completion(self, prompt):\n \"\"\"\n Completes text via BLOOM (Hugging Face).\n \"\"\"\n API_URL = \"https://api-inference.huggingface.co/models/bigscience/bloom\"\n headers = {\"Authorization\": f\"Bearer {self.apikey}\"}\n\n response = requests.post(API_URL, headers=headers, json={\"inputs\": prompt}).json()\n all_text = response[0]['generated_text']\n new_text = all_text[len(prompt):]\n return new_text\n\nclass OpenAIGPTWrapper():\n \"\"\"\n Wrapper for the OpenAI API. Supports all major text and chat completion models by OpenAI.\n \"\"\"\n\n def __init__(self, apikey, model=\"gpt-3.5-turbo\"):\n openai.api_key = apikey\n self.model = model\n\n def __repr__(self):\n return f\"OpenAIGPTWrapper(model={self.model})\"\n \n def complete_chat(self, messages, append_role=None):\n \"\"\"\n Completes chat with OpenAI. If using GPT 3.5 or 4, will simply send the list of {\"role\": , \"content\":} objects to the API.\n\n If using an older model, it will structure the messages list into a prompt first.\n \"\"\"\n\n if self.model.find('gpt-4') >= 0 or self.model.find('gpt-3.5') >= 0:\n\n response = openai.ChatCompletion.create(\n model=self.model,\n messages=messages\n )\n top_response_content = response['choices'][0]['message']['content']\n return top_response_content\n\n else:\n\n prompt_text = _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}: \"\n prompt_text = prompt_text.strip()\n\n response = openai.Completion.create(\n model=self.model,\n prompt=prompt_text,\n stop=_get_stop_sequences_from_messages(messages)\n )\n\n top_response_content = response['choices'][0]['text']\n return top_response_content\n \n # Note that this currently will error out with GPT 3.5 or above as they are chat models.\n # TODO Add error catching.\n def text_completion(self, prompt, stop_sequences=[]):\n \"\"\"\n Completes text via OpenAI. Note that this doesn't support GPT 3.5 or later.\n \"\"\"\n\n if len(stop_sequences) == 0:\n response = openai.Completion.create(\n model=self.model,\n prompt=prompt\n )\n else:\n response = openai.Completion.create(\n model=self.model,\n prompt=prompt,\n stop = stop_sequences\n )\n top_response_content = response['choices'][0]['text']\n return top_response_content\n\nclass ClaudeWrapper():\n \"\"\"\n Wrapper for Anthropic's Claude large language model.\n\n We've opted to call Anthropic's API directly rather than using their Python offering.\n \"\"\"\n\n def __init__(self, apikey, model=\"claude-v1\"):\n self.apikey = apikey\n self.model = model\n\n def __repr__(self):\n return f\"ClaudeWrapper(model={self.model})\"\n \n def complete_chat(self, messages, append_role=None):\n \"\"\"\n Completes chat with Claude. Since Claude doesn't support a chat interface via API, we mimick the chat via the a prompt.\n \"\"\"\n\n r_headers = {\"X-API-Key\":self.apikey, \"Accept\":\"application/json\"}\n\n prompt_text = _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}: \"\n\n r_data = {\"prompt\": prompt_text,\n \"model\": self.model,\n \"max_tokens_to_sample\": 500,\n \"stop_sequences\": _get_stop_sequences_from_messages(messages)\n }\n\n resp = requests.post(\"https://api.anthropic.com/v1/complete\", headers=r_headers, json=r_data)\n completion = json.loads(resp.text)[\"completion\"].strip()\n\n return completion\n \n def text_completion(self, prompt, stop_sequences=[]):\n \"\"\"\n Completes text based on provided prompt.\n \"\"\"\n\n r_headers = {\"X-API-Key\":self.apikey, \"Accept\":\"application/json\"}\n r_data = {\"prompt\": prompt,\n \"model\": self.model,\n \"max_tokens_to_sample\": 500,\n \"stop_sequences\": stop_sequences\n }\n\n resp = requests.post(\"https://api.anthropic.com/v1/complete\", headers=r_headers, json=r_data)\n completion = json.loads(resp.text)[\"completion\"].strip()\n return completion\n\n# TODO Might want to add stop sequences (new lines, roles) to make this better.\nclass GPT2Wrapper(LanguageModelWrapper):\n \"\"\"\n Wrapper for GPT-2 implementation (via Hugging Face). \n \"\"\"\n\n def __init__(self):\n self.model_name = \"GPT-2\"\n\n def __repr__(self):\n return f\"GPT2Wrapper()\"\n \n def complete_chat(self, messages, append_role=None, max_length=300):\n \"\"\"\n Mimicks a chat scenario via a list of {\"role\": , \"content\":} objects. \n \"\"\"\n\n prompt_preamble = \"You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\\n\"\n prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}:\"\n\n generator = pipeline('text-generation', model='gpt2')\n resps = generator(prompt_text, max_length=max_length, num_return_sequences=1)\n resp = resps[0]['generated_text']\n resp = resp[len(prompt_text):] # Strip out the original text.\n return resp\n\n def text_completion(self, prompt, max_length=200):\n \"\"\"\n Completes text via GPT-2.\n \"\"\"\n generator = pipeline('text-generation', model='gpt2')\n resps = generator(prompt, max_length=max_length, num_return_sequences=1)\n resp = resps[0]['generated_text']\n resp = resp[len(prompt):] # Strip out the original text.\n return resp\n\nclass DollyWrapper():\n \"\"\"\n Implementation of Dolly 2.0 (via Hugging Face).\n \"\"\"\n\n def __init__(self):\n self.model_name = 'dolly-v2-12b'\n self.generate_text = pipeline(model=\"databricks/dolly-v2-12b\", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map=\"auto\")\n\n def __repr__(self):\n return f\"DollyWrapper(model={self.model})\"\n\n def complete_chat(self, messages, append_role=None):\n \"\"\"\n Mimicks a chat scenario via a list of {\"role\": , \"content\":} objects. \n \"\"\"\n\n prompt_preamble = \"You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\\n\"\n\n prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}:\"\n\n resp = self.generate_text(prompt_text)\n\n return resp\n\n def text_completion(self, prompt):\n \"\"\"\n Complates\n \"\"\"\n resp = self.generate_text(prompt)\n return resp\n\nclass CohereWrapper():\n \"\"\"\n Wrapper for Cohere's API. Defaults to their 'xlarge' model.\n \"\"\"\n\n def __init__(self, apikey, model=\"xlarge\"):\n self.apikey = apikey\n self.model = model\n\n def __repr__(self):\n return f\"CohereWrapper(model={self.model})\"\n\n def complete_chat(self, messages, append_role=None):\n \"\"\"\n Mimicks a chat scenario via a list of {\"role\": , \"content\":} objects. \n \"\"\"\n\n prompt_text = _clean_messages_to_prompt(messages)\n if append_role is not None and len(append_role) > 0:\n prompt_text += f\"\\n{append_role}:\"\n\n co = cohere.Client(self.apikey)\n response = co.generate(\n prompt=prompt_text,\n max_tokens=300, \n stop_sequences=_get_stop_sequences_from_messages(messages)\n )\n\n resp = response.generations[0].text\n\n for s in _get_stop_sequences_from_messages(messages):\n resp = resp.replace(s, \"\").strip()\n\n return resp\n\n def text_completion(self, prompt, stop_sequences=[]):\n \"\"\"\n Completes text.\n \"\"\"\n co = cohere.Client(self.apikey)\n response = co.generate(\n prompt=prompt,\n max_tokens=300, \n stop_sequences=stop_sequences\n )\n resp = response.generations[0].text\n return resp\n\nclass ChatBot():\n \"\"\"\n Allows you to have a chat conversation with an LLM wrapper.\n\n In short, it manages the list of {\"role\": , \"content\":} objects for you, so you don't have to figure this out. It also interacts directly with the model.\n \"\"\"\n\n def __init__(self, llm, initial_system_prompt=\"You are a friendly chatbot assistant.\"):\n \"\"\"\n Initializes a ChatBot. Provide an initial_system_prompt value to request the type of chatbot you will be dealing with.\n \n Warning: not all LLMs are trained to use instructions provided in a system prompt.\n \"\"\"\n self.llm = llm \n self.messages = []\n self._append_message('system', initial_system_prompt)\n\n def _append_message(self, role, message):\n \"\"\"\n Saves a message to the chatbot's message queue.\n \"\"\"\n self.messages.append({\"role\":role, \"content\":message})\n\n def chat(self, message):\n \"\"\"\n Chats with the chatbot.\n \"\"\"\n self._append_message('user', message)\n response = self.llm.complete_chat(self.messages, \"assistant\")\n self._append_message('assistant', response)\n return response\n \n", "repo_name": "kpister/prompt-linter", "sub_path": "data/scraping/repos/DSamuelHodge~phasellm/phasellm~llms~llms.py", "file_name": "phasellm~llms~llms.py", "file_ext": "py", "file_size_in_byte": 13829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.findall", "line_number": 88, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 119, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 138, "usage_type": "call"}, {"api_name": "openai.api_key", "line_number": 149, "usage_type": "attribute"}, {"api_name": "openai.ChatCompletion.create", "line_number": 164, "usage_type": "call"}, {"api_name": "openai.ChatCompletion", "line_number": 164, "usage_type": "attribute"}, {"api_name": "openai.Completion.create", "line_number": 178, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 178, "usage_type": "attribute"}, {"api_name": "openai.Completion.create", "line_number": 195, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 195, "usage_type": "attribute"}, {"api_name": "openai.Completion.create", "line_number": 200, "usage_type": "call"}, {"api_name": "openai.Completion", "line_number": 200, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 239, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 240, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 256, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 257, "usage_type": "call"}, {"api_name": "transformers.pipeline", "line_number": 282, "usage_type": "call"}, {"api_name": "transformers.pipeline", "line_number": 292, "usage_type": "call"}, {"api_name": "transformers.pipeline", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.bfloat16", "line_number": 305, "usage_type": "attribute"}, {"api_name": "cohere.Client", "line_number": 353, "usage_type": "call"}, {"api_name": "cohere.Client", "line_number": 371, "usage_type": "call"}]} +{"seq_id": "26301982018", "text": "#!/usr/bin/env python # pylint: disable=missing-module-docstring\n# -*- coding: utf-8 -*-\n# ***************************************************************************80*************************************120\n#\n# python ./adversarial_supervision\\\n# /scripts\\\n# /1_attack\\\n# /adversarial_attack.py # pylint: disable=invalid-name\n#\n# *********************************************************************************************************************\n\n# standard import\nimport uuid\nimport os\nfrom os.path import join, exists\nfrom datetime import datetime\nfrom multiprocessing import Pool\n\n# 3rd party imports\nimport click\nimport pandas\nimport openai\nimport requests\nimport numpy\n\nclass UnrecognisedEnvironmentException(Exception):\n \"\"\"This happens when entered environmenis neither dev nor prod\"\"\"\n\nclass UnscuccessfulAPIConnectionException(Exception):\n \"\"\"This happens when an API connection goes unsuccessful\"\"\"\n\nclass EmptyResponseException(Exception):\n \"\"\"This happens when a response generated is empty\"\"\"\n\nclass ServerOverloadException(Exception):\n \"\"\"This happens when Openai server is overloaded\"\"\"\n\nclass ResponseGeneratorMismatchException(Exception):\n \"\"\"This happens when response generator is neither openai nor charlie\"\"\"\n\nclass UnscuccessfulAPICallException(Exception):\n \"\"\"This happens when an API call goes unsuccessful\"\"\"\n\n@click.command()\n@click.option('-a', '--openai_api_key', type=str, required=True, help='OpenAI API key')\n@click.option('-f', '--file_path', type=str,\n default='./adversarial_supervision/dataset/transfer_expriment_behaviors.csv',\n help='Input CSV file path')\n@click.option('-b', '--adversarial_suffix_file_path', type=str,\n default='./adversarial_supervision/dataset/adv_suffix.txt',\n help=\"File path of adversarial Suffix\")\n@click.option('-d', '--dan_attack_file_path', type=str,\n default=\"./adversarial_supervision/dataset/jailbreak_dan_prefix.txt\",\n help=\"File path of DAN attack\")\n@click.option('-r', '--reply_folder_path', type=str, required=True,\n help='folder where all the replies are stored')\n@click.option('-u','--username' ,type=str,default=\"\",help='username of HTTP endpoint in Node-RED')\n@click.option('-p','--password' ,type=str,default=\"\",help='password of HTTP endpoint in Node-RED')\n@click.option('-s','--sample',type=int,default=4,help='n text to sample from dataset')\n@click.option('-n', '--num_cores', type=int, default=2, help='Number of cores for parallelisation')\n@click.option('-v', '--use_adv_suffix', is_flag=True, default=False, help='Flag for adversarial suffix usage')\n@click.option('-k', '--use_dan_attack_prefix', is_flag=True, default=False, help='Flag for DAN attack usage as prefix')\n@click.option('-e','--env' ,type=click.Choice(['dev', 'prod']),default='dev',help='Dev or prod to update')\n@click.option('-g','--get_response_from' ,\n type=click.Choice(['openai', 'charlie']),\n default='openai',\n help='Get response from OpenAI or Charlie')\n@click.option('-m', '--model', type=str, required=True, help='model name - gpt-3.5-turbo-0301 or gpt-3.5-turbo-0613')\ndef main(openai_api_key: str, file_path: str, adversarial_suffix_file_path: str,\n dan_attack_file_path: str, reply_folder_path: str, sample: int, num_cores: int,\n use_adv_suffix: bool, use_dan_attack_prefix: bool, get_response_from: str, env: str,\n username: str, password: str, model: str) -> None:\n '''Main Function'''\n\n process(openai_api_key,\n file_path,\n adversarial_suffix_file_path,\n dan_attack_file_path,\n reply_folder_path,\n sample,\n num_cores,\n use_adv_suffix,\n use_dan_attack_prefix,\n get_response_from,\n env,\n username,\n password,\n model)\n\n\ndef process(openai_api_key: str,\n file_path: str,\n adversarial_suffix_file_path: str,\n dan_attack_file_path: str,\n reply_folder_path: str,\n sample: int,\n num_cores,\n use_adv_suffix: bool,\n use_dan_attack_prefix: bool,\n get_response_from: str,\n env: str,\n username: str,\n password: str,\n model: str) -> None:\n '''calls_openai for the attack'''\n\n openai.api_key = openai_api_key\n\n # read dataset\n df = pandas.read_csv(file_path,encoding=\"utf8\",header=None)\n df.rename(columns={0:\"prompt_text\"},inplace=True)\n\n # set the GUID\n # GUIDs are read from a text file if it was already present otherwise it creates them and\n # writes it into a text file. This is because it helps in comparing the results of utterances\n # in multiple runs\n guid_filepath = file_path.replace(\".csv\",\"_guid.txt\")\n\n if exists(guid_filepath):\n with open(guid_filepath,mode=\"r\",encoding=\"utf8\") as f:\n guid_list = f.read()\n guid_list = guid_list.strip().split(\"\\n\")\n if df.shape[0] != len(guid_list):\n for _ in range(abs(df.shape[0] - len(guid_list))):\n guid_list.append(f\"{uuid.uuid4()}\")\n df[\"id\"] = guid_list\n else:\n df[\"id\"] = df.apply(set_guid,axis=1)\n\n guid_list = df[\"id\"].unique().tolist()\n with open(guid_filepath, mode=\"w\", encoding=\"utf8\") as f:\n f.write(\"\\n\".join(guid_list))\n\n # strip if text has any whitespaces prefixed or suffixed\n df[\"prompt_text\"] = df[\"prompt_text\"].apply(lambda x: x.strip())\n\n # reply path\n df[\"reply_path\"] = df[\"id\"].apply(lambda x: join(reply_folder_path,f\"{x}.txt\"))\n\n df[\"final_prompt\"] = df['prompt_text']\n\n if use_dan_attack_prefix:\n # read adversarial suffix\n with open(dan_attack_file_path, mode=\"r\", encoding=\"utf8\") as f:\n dan_attack = f.read()\n\n df[\"dan_attack_prefix\"] = dan_attack\n df[\"final_prompt\"] = df[\"dan_attack_prefix\"] + df[\"final_prompt\"]\n\n if use_adv_suffix:\n # read adversarial suffix\n with open(adversarial_suffix_file_path, mode=\"r\", encoding=\"utf8\") as f:\n adv_suffix = f.read()\n\n df[\"adv_suffix\"] = adv_suffix\n df[\"final_prompt\"] = df[\"final_prompt\"] + df[\"adv_suffix\"]\n\n\n # get all the ids and corresponding text which are used to generate response\n completed_text_ids, completed_texts = get_completed_text_ids(reply_folder_path)\n uncompleted_text_ids = list(set(guid_list) - set(completed_text_ids))\n\n df.set_index(\"id\",inplace=True, drop=True)\n\n uncompleted_df = df.loc[uncompleted_text_ids]\n uncompleted_df[\"response\"] = \"\"\n uncompleted_df[\"completed\"] = False\n\n completed_df = df.loc[completed_text_ids]\n completed_df[\"response\"] = completed_texts\n completed_df[\"completed\"] = True\n\n df = pandas.concat([completed_df,uncompleted_df])\n\n # set where to get the response from\n df[\"get_response_from\"] = get_response_from\n\n # set the model name\n df[\"model\"] = model\n\n if get_response_from == \"charlie\":\n\n # set username and password\n df[\"username\"] = username\n df[\"password\"] = password\n\n # set the url depending on which system is being used - dev | prod\n if env == 'dev':\n url = \"https://elb.devvending.com/api/predict\"\n elif env == 'prod':\n url = \"https://elb.cwrtvending.com/api/predict\"\n else:\n raise UnrecognisedEnvironmentException('Unrecognised environment')\n\n # set the api\n df[\"url\"] = url\n\n\n print(df[[\"final_prompt\",\"completed\"]])\n\n # sample n number of rows from dataset\n df = df if sample >= df.shape[0] else df.sample(sample)\n\n print(df[\"prompt_text\"])\n\n # parallelization\n pool = Pool(num_cores)\n dfs = numpy.array_split(df, num_cores)\n pool_results = pool.map(parallelise_calls, dfs)\n pool.close()\n pool.join()\n df = pandas.concat(pool_results)\n\n if get_response_from == \"charlie\":\n df.drop(columns=[\"username\",\"password\"],inplace=True)\n\n print(df[[\"response\",\"get_response_from\",\"completed\"]])\n\n\n # write the final result with the timestamp\n df.to_csv(join(reply_folder_path,\n f\"final_result_{datetime.now().isoformat()}.csv\"),\n sep=\",\",\n index=True,\n encoding=\"utf8\")\n print(f\"Results are stored in {reply_folder_path}\")\n\n\ndef set_guid(_: pandas.Series) -> str:\n \"\"\"Sets the GUID for a text\"\"\"\n\n return str(uuid.uuid4())\n\ndef get_completed_text_ids(output_file_path: str) -> tuple:\n '''Find ids that have already been created'''\n\n file_names = os.listdir(output_file_path)\n completed_texts = []\n completed_ids = []\n for file_name in file_names:\n if file_name.endswith(\".txt\"):\n with open(join(output_file_path,file_name), mode=\"r\", encoding=\"utf8\") as f:\n text = f.read().strip()\n if text != \"\":\n completed_ids.append(file_name[0:-4])\n completed_texts.append(text)\n return completed_ids, completed_texts\n\n\ndef parallelise_calls(df: pandas.DataFrame) -> pandas.DataFrame:\n '''Parallelise dataframe processing'''\n\n return df.apply(send_text, axis=1)\n\n\ndef send_text(row: pandas.Series) -> pandas.Series:\n \"\"\"Send text\"\"\"\n\n if row[\"get_response_from\"] == \"openai\":\n return send_text_to_openai(row)\n elif row[\"get_response_from\"] == \"charlie\":\n return send_text_to_charlie(row)\n else:\n raise ResponseGeneratorMismatchException(\n f\"The provided response generator - {row['get_response_from']} is neither openai nor charlie\")\n\n\ndef send_text_to_openai(row: pandas.Series) -> pandas.Series:\n '''Send text to OpenAI'''\n\n if not row[\"completed\"]:\n\n try:\n response = call_openai(row[\"final_prompt\"], row[\"model\"])\n if not response.choices[0].message.content:\n raise ServerOverloadException(\"Unsuccessful API Call - may be dude to server overload\")\n\n text = response.choices[0].message.content\n\n text = text.strip()\n if text == \"\":\n raise EmptyResponseException(f\"Empty response generated for the text - {row.text}\")\n\n # Writing to text file\n with open(row[\"reply_path\"],mode=\"w\",encoding=\"utf-8\") as f:\n f.write(text)\n row[\"response\"] = text\n row[\"completed\"] = True\n\n except (Exception, ServerOverloadException, EmptyResponseException) as e: # pylint: disable=broad-exception-caught\n print(f\"Rerunning {row.name} due to {e}\")\n row = send_text_to_openai(row) # rerun the text\n\n return row\n\ndef call_openai(text: str, model: str) -> str:\n '''Calling OpenAI'''\n\n response = openai.ChatCompletion.create(\n model=model,\n messages=[\n {\"role\": \"user\", \"content\": text}\n ],\n temperature=0.0,\n max_tokens=500,\n top_p=1, # default value\n frequency_penalty=0.0, # default value\n presence_penalty=0.0 # default value\n )\n\n return response\n\ndef send_text_to_charlie(row: pandas.Series) -> pandas.Series:\n \"\"\"Send text to Charlie\"\"\"\n\n if not row[\"completed\"]:\n data = {\n \"id\": row.name,\n \"text\": row[\"final_prompt\"]\n }\n\n response = requests.post(url=row.url, # pylint: disable=missing-timeout\n auth=(row.username,row.password),\n json=data)\n\n try:\n if response.status_code != 200:\n raise UnscuccessfulAPICallException(\n f\"Status Code :{response.status_code} \\n\\nResponse:\\n\\n{response.json()}\")\n\n text = response.text.strip()\n if text == \"\":\n raise EmptyResponseException(f\"Empty response generated for the text - {row.text}\")\n\n # Writing to text file\n with open(row[\"reply_path\"],mode=\"w\",encoding=\"utf-8\") as f:\n f.write(text)\n row[\"response\"] = text\n row[\"completed\"] = True\n\n except (Exception, UnscuccessfulAPICallException, EmptyResponseException) as e: # pylint: disable=broad-exception-caught\n print(f\"Rerunning {row.name} due to {e}\")\n row = send_text_to_charlie(row) # rerun the text\n\n return row\n\nif __name__==\"__main__\":\n main() # pylint: disable=no-value-for-parameter\n", "repo_name": "kpister/prompt-linter", "sub_path": "data/scraping/repos/zia-ai~academy/adversarial_supervision~scripts~1_attack~adversarial_attack.py", "file_name": "adversarial_supervision~scripts~1_attack~adversarial_attack.py", "file_ext": "py", "file_size_in_byte": 12590, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "click.command", "line_number": 44, "usage_type": "call"}, {"api_name": "click.option", "line_number": 45, "usage_type": "call"}, {"api_name": "click.option", "line_number": 46, "usage_type": "call"}, {"api_name": "click.option", "line_number": 49, "usage_type": "call"}, {"api_name": "click.option", "line_number": 52, "usage_type": "call"}, {"api_name": "click.option", "line_number": 55, "usage_type": "call"}, {"api_name": "click.option", "line_number": 57, "usage_type": "call"}, {"api_name": "click.option", "line_number": 58, "usage_type": "call"}, {"api_name": "click.option", "line_number": 59, "usage_type": "call"}, {"api_name": "click.option", "line_number": 60, "usage_type": "call"}, {"api_name": "click.option", "line_number": 61, "usage_type": "call"}, {"api_name": "click.option", "line_number": 62, "usage_type": "call"}, {"api_name": "click.option", "line_number": 63, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 63, "usage_type": "call"}, {"api_name": "click.option", "line_number": 64, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 65, "usage_type": "call"}, {"api_name": "click.option", "line_number": 68, "usage_type": "call"}, {"api_name": "openai.api_key", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 173, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.array_split", "line_number": 208, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 212, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 221, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 222, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 229, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 232, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 242, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 268, "usage_type": "attribute"}, {"api_name": "openai.ChatCompletion.create", "line_number": 299, "usage_type": "call"}, {"api_name": "openai.ChatCompletion", "line_number": 299, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 313, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 322, "usage_type": "call"}]} +{"seq_id": "71734570714", "text": "import json, webbrowser\nfrom tile.tile import Tile, RotateableTile\nfrom utils import utils\nimport math, pygame, os\n\nwith open(\"mod.json\") as file:\n data = json.load(file)\n namespace = data[\"namespace\"]\n _mod = data[\"mod\"]\n\nclass Mover(RotateableTile):\n mod = _mod\n id = namespace + \":mover\"\n texture_name = \"tiles/mover.png\"\n tags = RotateableTile.tags + [\"movable\"]\n def tick(self, world):\n super().tick(world)\n self.t_x = self.x\n self.t_y = self.y\n\n cm, sm = Mover.can_move(self.x, self.y, self.r, world)\n if not cm:\n return\n \n for o in sm:\n o.update_queue[\"x\"], o.update_queue[\"y\"] = utils.move(o.x, o.y, self.r)\n self.update_queue[\"x\"], self.update_queue[\"y\"] = utils.move(self.x, self.y, self.r)\n\n def can_move(_x, _y, _r, world):\n x, y = utils.move(_x, _y, _r)\n tx, ty = utils.move(_x, _y, _r)\n sm = []\n while world.exist(tx, ty):\n obj = world.get(tx, ty) \n if isinstance(obj, AxisMovableBlock):\n if obj.r == 0 and (_r == 0 or _r == 180):\n pass\n elif obj.r == 180 and (_r == 90 or _r == 270):\n pass\n else:\n return False, []\n if \"movable\" in obj.tags:\n obj2 = world.get(*utils.move(obj.x, obj.y, _r))\n if (isinstance(obj2, Mover)):\n if obj2.r == _r:\n c, m = Mover.can_move(obj2.x, obj2.y, obj2.r, world)\n if not c:\n return False, []\n sm += m\n return True, sm\n if obj2 == None:\n sm.append(obj)\n return True, sm\n elif \"movable\" in obj2.tags:\n sm.append(obj)\n else:\n return False, []\n else:\n return False, []\n tx, ty = utils.move(tx, ty, _r)\n return True, sm\n\nclass RickRoller(RotateableTile):\n mod = _mod\n id = namespace + \":rickroller\"\n texture_name = \"tiles/rickroller.png\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._die = False\n def tick(self, world):\n if self._die:\n self.kill()\n super().tick(world)\n return\n self.t_x = self.x\n self.t_y = self.y\n self.update_queue[\"x\"], self.update_queue[\"y\"] = utils.move(self.x, self.y, self.r)\n super().tick(world)\n def onOverlay(self, tile):\n if tile == self:\n return\n webbrowser.open(\"https://www.youtube.com/watch?v=dQw4w9WgXcQ\")\n self._die = True\n\nclass SolidBlock(Tile):\n mod = _mod\n id = namespace + \":solidblock\"\n texture_name = \"tiles/solidblock.png\"\n\nclass MoveableBlock(Tile):\n mod = _mod\n id = namespace + \":moveableblock\"\n texture_name = \"tiles/moveableblock.png\"\n tags = [\"movable\", \"solid\"]\n\nclass LevelFinish(Tile):\n mod = _mod\n id = namespace + \":levelfinish\"\n texture_name = \"tiles/levelfinish.png\"\n def tick(self, world):\n super().tick(world)\n self.world = world\n def onHit(self, tile, side):\n try:\n self.world.objects.clear()\n except:\n pass\n\nclass Rotater(Tile):\n tags = RotateableTile.tags\n\n mod = _mod\n id = namespace + \":rotater\"\n texture_name = \"tiles/rotater.png\"\n\n textures = [pygame.surface.Surface((32, 32)) for _ in range(0, 2)]\n def __init__(self, x: int, y: int, r: float):\n super().__init__(x, y)\n self.r = r\n texture = pygame.image.load(os.path.join(\"mods\", self.__class__.mod, self.__class__.texture_name))\n self.textures[0] = texture.copy()\n self.textures[1] = pygame.transform.flip(texture, True, False)\n\n def onHit(self, tile, side):\n if side % 2:\n return\n if not \"rotatable\" in tile.tags:\n return\n if tile.id == self.id:\n return\n if self.r // 180:\n tile.update_queue[\"r\"] = (tile.r + 90) % 360\n else:\n tile.update_queue[\"r\"] = (tile.r - 90) % 360\n\n def get_texture(self):\n return self.textures[self.r // 180 > 0]\n def empty_instance(cl):\n return cl(0, 0, 0)\n\nclass Cloner(RotateableTile):\n mod = _mod\n id = namespace + \":cloner\"\n texture_name = \"tiles/cloner.png\"\n\n tags = RotateableTile.tags + [\"movable\"]\n\n def tick(self, world):\n super().tick(world)\n bx, by = utils.move(self.x, self.y, (self.r - 180) % 360)\n tx, ty = utils.move(self.x, self.y, self.r)\n if world.exist(bx, by) and not world.exist(tx, ty):\n obj = world.get(bx, by)\n obj2 = obj.copy()\n obj2.x, obj2.y = tx, ty\n obj2.should_tick = False\n world.objects.append(obj2)\n\nclass AxisMovableBlock(Tile):\n mod = _mod\n id = namespace + \":axismovableblock\"\n texture_name = \"tiles/axismovableblock.png\"\n\n tags = RotateableTile.tags + [\"movable\"]\n\n def __init__(self, x, y, r):\n super().__init__(x, y)\n self.r = r\n if self.r == 270:\n self.r = 180\n if self.r == 90:\n self.r = 0\n self.textures = [None, None]\n texture = pygame.image.load(os.path.join(\"mods\", self.__class__.mod, self.__class__.texture_name))\n self.textures[0] = texture.copy()\n self.textures[1] = pygame.transform.rotate(texture, 90)\n def get_texture(self):\n return self.textures[self.r // 180 > 0]\n def empty_instance(cl):\n return cl(0, 0, 0)\n\ndef get_tiles():\n return [Mover, RickRoller, SolidBlock, MoveableBlock, LevelFinish, Rotater, Cloner, AxisMovableBlock]\n", "repo_name": "jakiki6/justastupidgame", "sub_path": "mods/main/tiles.py", "file_name": "tiles.py", "file_ext": "py", "file_size_in_byte": 5811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "json.load", "line_number": 7, "usage_type": "call"}, {"api_name": "tile.tile.RotateableTile", "line_number": 11, "usage_type": "name"}, {"api_name": "tile.tile.RotateableTile.tags", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tile.tile.RotateableTile", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 43, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 43, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 60, "usage_type": "name"}, {"api_name": "tile.tile.RotateableTile", "line_number": 63, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 77, "usage_type": "name"}, {"api_name": "tile.tile", "line_number": 80, "usage_type": "name"}, {"api_name": "webbrowser.open", "line_number": 82, "usage_type": "call"}, {"api_name": "tile.tile.Tile", "line_number": 85, "usage_type": "name"}, {"api_name": "tile.tile.Tile", "line_number": 90, "usage_type": "name"}, {"api_name": "tile.tile.Tile", "line_number": 96, "usage_type": "name"}, {"api_name": "tile.tile.Tile", "line_number": 109, "usage_type": "name"}, {"api_name": "tile.tile.RotateableTile.tags", "line_number": 110, "usage_type": "attribute"}, {"api_name": "tile.tile.RotateableTile", "line_number": 110, "usage_type": "name"}, {"api_name": "pygame.surface.Surface", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.surface", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 122, "usage_type": "attribute"}, {"api_name": "tile.tile.tags", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tile.tile", "line_number": 127, "usage_type": "name"}, {"api_name": "tile.tile.id", "line_number": 129, "usage_type": "attribute"}, {"api_name": "tile.tile", "line_number": 129, "usage_type": "name"}, {"api_name": "tile.tile.update_queue", "line_number": 132, "usage_type": "attribute"}, {"api_name": "tile.tile", "line_number": 132, "usage_type": "name"}, {"api_name": "tile.tile.r", "line_number": 132, "usage_type": "attribute"}, {"api_name": "tile.tile.update_queue", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tile.tile", "line_number": 134, "usage_type": "name"}, {"api_name": "tile.tile.r", "line_number": 134, "usage_type": "attribute"}, {"api_name": "tile.tile.RotateableTile", "line_number": 141, "usage_type": "name"}, {"api_name": "tile.tile.RotateableTile.tags", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tile.tile.RotateableTile", "line_number": 146, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 150, "usage_type": "name"}, {"api_name": "utils.utils.move", "line_number": 151, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 151, "usage_type": "name"}, {"api_name": "tile.tile.Tile", "line_number": 159, "usage_type": "name"}, {"api_name": "tile.tile.RotateableTile.tags", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tile.tile.RotateableTile", "line_number": 164, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 176, "usage_type": "attribute"}]} +{"seq_id": "15061198306", "text": "import torch\nimport datetime\nimport time\nimport sys\nimport logging\n#import argparse\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.utils import save_image, make_grid\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.cuda.amp import autocast\n\nfrom tqdm import tqdm\nimport numpy as np\nimport pandas as pd\nfrom sklearn import manifold\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport PIL\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom argparse import ArgumentParser, Namespace\n\n#from models_resnet_pl import *\nfrom models_pl import *\nfrom datasets_resnet import *\n\nimport torch.distributed as dist\n#from parallel import DataParallelModel as DPM \n#from parallel import DataParallelCriterion as DPC\nfrom apex import *\nimport pytorch_ssim\nimport options\nimport save_im as sv\nimport post_process as pp\nimport pandas as pd\nimport csv\nimport cv2 as cv\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.trainer import Trainer\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom collections import OrderedDict\nfrom pytorch_lightning.utilities import _OMEGACONF_AVAILABLE, rank_zero_only, rank_zero_warn\nimport os\n#os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n\ncuda = True if torch.cuda.is_available() else False\n#device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n#print(\"GPU status: %d\"%torch.cuda.device_count())\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\n#dataroot = \"../data/ICT_10_26_mix_dataset\"\n\nclass UnNormalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.mul_(s).add_(m)\n # The normalize code -> t.sub_(m).div_(s)\n return tensor\n\ndef clean_dir(folder):\n for filename in os.listdir(folder):\n file_path = os.path.join(folder, filename)\n os.remove(file_path)\n\nclass WaeGAN(LightningModule):\n\n def __init__(self, args):\n super().__init__()\n self.save_hyperparameters()\n self.latent_dim = args.n_z\n self.lr = args.lr\n self.n_critic = args.n_critic\n self.args = args\n self.smth = 0.45#args.smth\n #self.automatic_optimization=False\n #self.b1 = b1\n #self.b2 = b2\n self.batch_size = args.batch_size\n self.one = torch.tensor(1,dtype=torch.float)#.to(self.device)\n # if args.precision==16:\n # self.one = self.one.half()\n # else:\n # pass\n # self.mone = -1*self.one\n \n \n self.df_csv = f\"./csv/{args.date}_{args.dataset}.csv\"\n self.tmp_csv = f\"./tmp/{args.date}_{args.dataset}_result.csv\"\n self.tmp_pred = f\"./tmp/{args.date}_{args.dataset}_predict.csv\"\n # networks\n self.generator_unet = ResNetUNet(args)#.to(self.device)\n self.discriminator_unet = MultiDiscriminator(args)#.to(self.device)\n self.mse_loss = nn.MSELoss()#.to(self.device)\n self.adv_loss = torch.nn.BCEWithLogitsLoss()#.to(self.device)\n self.aux_loss = LabelSmoothingCrossEntropy(0.1)#LabelSmoothing(self.smth)#torch.nn.CrossEntropyLoss(label_smoothing=self.smth)# \n self.criterion = pytorch_ssim.SSIM()#.to(self.device)\n\n self.generator_unet.apply(weights_init_normal)\n self.discriminator_unet.apply(weights_init_normal)\n\n self.k_enc = args.gp_lambda*args.k_wass\n self.no_sample = 0\n self.sum_test = 0\n self.json_dir = f\"./tmp/{args.date}_{args.dataset}_json\"\n self.jpg_dir = f\"./tmp/{args.date}_{args.dataset}_jpg\"\n self.png_dir = f\"./tmp/{args.date}_{args.dataset}_png\"\n self.org_dir = f\"./tmp/{args.date}_{args.dataset}_org\"\n self.gt_dir = f\"./tmp/{args.date}_{args.dataset}_gt\"\n self.pred_dir = f\"./tmp/{args.date}_{args.dataset}_pred\"\n os.makedirs(self.json_dir, exist_ok=True)\n os.makedirs(self.jpg_dir, exist_ok=True)\n os.makedirs(self.png_dir, exist_ok=True)\n os.makedirs(self.org_dir, exist_ok=True)\n os.makedirs(self.gt_dir, exist_ok=True)\n os.makedirs(self.pred_dir, exist_ok=True)\n #clean_dir(self.json_dir)\n #clean_dir(self.jpg_dir)\n self.result = []\n self.inv_transform = transforms.Compose(\n [\n UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),\n transforms.ToPILImage(),\n ]\n )\n\n def forward(self, z):\n return self.generator_unet(z)\n\n def compute_gradient_penalty(self, real_samples, fake_samples):\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n alpha = torch.Tensor(np.random.random((real_samples.size(0), 1, 1, 1))).to(self.device)\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n interpolates = interpolates.to(self.device)\n d_interpolates = self.discriminator_unet.compute_out(interpolates)\n fake = torch.Tensor(real_samples.shape[0], 1).fill_(1.0).to(self.device)\n # Get gradient w.r.t. interpolates\n gradients = torch.autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.size(0), -1).to(self.device)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n return gradient_penalty\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n \n lambda_gp = self.args.gp_lambda\n real_A = Variable(batch[\"A\"],requires_grad=True)#.to(self.device)\n real_B = Variable(batch[\"B\"],requires_grad=True)#.to(self.device)\n aug_A = Variable(batch[\"aug_A\"],requires_grad=True)#.to(self.device)\n labels = Variable(LongTensor(batch[\"label\"]), requires_grad=False)\n \n gen_labels = Variable(LongTensor(np.random.randint(0, self.args.n_classes,real_A.shape[0])),requires_grad=False)\n\n\n if self.args.noise_in:\n noisy = sv.gaussian(real_A,mean=0,stddev=self.args.sigma)#.to(self.device)\n else:\n noisy = aug_A\n \n if optimizer_idx == 0:\n frozen_params(self.discriminator_unet)\n free_params(self.generator_unet)\n\n generated, encoded_, e1_, _ = self.generator_unet(real_A)\n _, z_, z1_, _ = self(noisy)\n \n self.target, _ = torch.mode(torch.argmax(e1_, dim=1))\n match = (torch.argmax(e1_, dim=1) == self.target).type(Tensor)\n label_loss = self.aux_loss(z1_,e1_)\n self.log(\"matching\",match.mean().item())\n self.log(\"label loss\",label_loss)\n m_loss = self.mse_loss(real_B, generated)\n if self.args.descending:\n s_r = (1-self.current_epoch/self.args.train_max)*self.args.style_ratio\n else:\n s_r = (self.current_epoch/self.args.train_max)*self.args.style_ratio\n\n style_loss = (s_r)*(1 - self.criterion(real_B, generated))\\\n + (1-s_r)* m_loss\n \n \n \n enc_loss =(self.mse_loss(encoded_ , z_)) \n self.log(\"enc loss\",enc_loss) \n enc_loss = self.args.k_wass*enc_loss \n\n h_loss = self.args.k_wass*self.discriminator_unet(generated)\n wass_loss = -torch.mean(h_loss)\n g_loss = (style_loss + wass_loss + enc_loss) \n \n self.log(\"style loss\",style_loss)\n self.log(\"mse loss\",m_loss)\n self.log(\"g_loss\",g_loss, sync_dist=True)\n self.log(\"wass loss\",wass_loss)\n g_loss += self.args.k_wass*label_loss\n g_loss = g_loss.float()\n tqdm_dict = {'g_loss': g_loss}\n output = OrderedDict({\n 'loss': g_loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n return output\n\n elif optimizer_idx == 1:\n free_params(self.discriminator_unet)\n free_params(self.generator_unet)\n valid = Variable(Tensor(real_A.shape[0], 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(real_A.shape[0], 1).fill_(0.0), requires_grad=False)\n\n noisy = sv.gaussian(real_A,mean=0,stddev=self.args.sigma)\n generated, encoded_, e1_, e2_ = self(real_A.detach())\n _, z_, z1_, z2_ = self.generator_unet(noisy)\n\n real_aux, real_adv = e1_, e2_\n labels_onehot= torch.nn.functional.one_hot( labels, num_classes=self.args.n_classes)\n real_loss = self.adv_loss(real_adv,valid) + self.aux_loss(real_aux, labels_onehot)\n \n fake_aux, fake_adv = z1_, z2_\n gen_labels_onehot= torch.nn.functional.one_hot(gen_labels, num_classes=self.args.n_classes)\n fake_loss = self.adv_loss(fake_adv,fake) + self.aux_loss(fake_aux, gen_labels_onehot)\n \n \n enc_loss =(self.mse_loss(encoded_ , z_))\n self.log(\"enc loss\",enc_loss) # just monitor\n gen_loss = self.args.k_wass*(real_loss + fake_loss)/4.0\n self.log(\"genenc loss\",gen_loss) \n f_loss = self.args.k_wass*self.discriminator_unet(real_B)\n h_loss = self.args.k_wass*self.discriminator_unet(generated)\n d_loss = (torch.mean(f_loss) - torch.mean(h_loss))#wasserstein loss\n \n if self.args.clip_weight:\n d_loss -= gen_loss#enc_loss# if self.args.gram else 0\n for p in self.discriminator_unet.parameters():\n p.data.clamp_(-self.args.clip_value, self.args.clip_value)\n else:\n gradient_penalty = self.compute_gradient_penalty(real_B.data, generated.data)\n d_loss -= gen_loss#enc_loss# if self.args.gram else 0\n d_loss -= self.args.gp_lambda* self.args.k_wass* gradient_penalty\n \n \n d_loss = -d_loss.float()\n self.log(\"discriminator loss\",d_loss, sync_dist=True)\n \n tqdm_dict = {'d_loss': d_loss}\n \n output = OrderedDict({\n 'loss': d_loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n return output\n\n # def backward(self, loss, optimizer, optimizer_idx):\n # # do a custom way of backward\n # with autocast():\n # one = torch.tensor(1, dtype=torch.float) \n # if optimizer_idx == 0:\n # with autocast():\n # loss.backward(one,retain_graph=True)#.to(self.device)\n # else:\n # with autocast():\n # loss.backward(-1*one,retain_graph=True)#.to(self.device)\n\n\n def configure_optimizers(self):\n \n lr = self.lr\n #b1 = self.b1\n #b2 = self.b2\n opt_g = torch.optim.Adam(self.generator_unet.parameters(), lr=lr)#, betas=(b1, b2))\n opt_d = torch.optim.Adam(self.discriminator_unet.parameters(), lr=lr)#, betas=(b1, b2))\n \n return (\n {'optimizer': opt_g, 'frequency': 1},\n {'optimizer': opt_d, 'frequency': self.n_critic},\n )\n\n def train_dataloader(self):\n input_shape = (self.args.n_channel, self.args.img_height, self.args.img_width)\n dataset = ImageDataset(\"%s/%s\" % (self.args.dataroot,self.args.dataset) , input_shape, mode='train')\n return DataLoader(dataset, batch_size=self.args.batch_size, num_workers=24, pin_memory=True)\n\n def test_dataloader(self):\n input_shape = (self.args.n_channel, self.args.img_height, self.args.img_width)\n mode = self.args.val_target\n dataset = ImageDataset(\"%s/%s\" % (self.args.dataroot,self.args.dataset), input_shape, mode=mode )\n return DataLoader(dataset, batch_size= self.args.test_batch_size, shuffle=False, num_workers=24)\n\n def predict_dataloader(self):\n input_shape = (self.args.n_channel, self.args.img_height, self.args.img_width)\n #dataset = UserDataset(\"../data/%s\" % self.args.dataset, input_shape, mode=\"test\")\n dataset = UserDataset(\"../data/temporary\", input_shape, mode=\"test\")\n #dataset = UserDataset(\"../data/SNUH_test0\", input_shape, mode=\"test\")\n return DataLoader(dataset, batch_size=self.args.test_batch_size, num_workers=16)\n \n def on_epoch_end(self):\n pass\n \n def on_fit_start(self) -> None:\n pl.seed_everything(42)\n return super().on_fit_start()\n\n def test_step(self, batch, batch_idx):\n for img_A, img_B, aug_A, pathA, pathB in zip(batch[\"A\"], batch[\"B\"], batch[\"aug_A\"], batch[\"pathA\"], batch[\"pathB\"]):\n self.no_sample += 1\n real_A = Variable(img_A).type(Tensor)#.cuda()\n real_A = real_A.unsqueeze(0)\n real_B = Variable(img_B).type(Tensor)#.cuda()\n real_B = real_B.unsqueeze(0)\n aug_A = Variable(aug_A).type(Tensor)#.cuda()\n aug_A = aug_A.unsqueeze(0)\n fake_B, e, e1, e2 = self(real_A)\n _, z, z1, z2 = self(aug_A)\n nz_f = self.mse_loss(e, z) #+ self.mse_loss(e1, z1) + self.mse_loss(e2, z2)\n nz_f = nz_f.item()\n val = e2.squeeze().item()\n test_loss = self.mse_loss(fake_B, real_B)\n values, indexes = torch.topk(e1, k=3, dim=-1)\n indexes = indexes.data.view(-1)\n mean_v = values.mean()\n values = values - mean_v\n values = values.data.view(-1)\n \n real_A = real_A.data[0]\n fake_B = fake_B.data[0]\n real_B = real_B.data[0] #torch.cat([x for x in real_B.data.cpu()], -1)\n \n img_org = self.inv_transform(real_A.detach().clone())\n img_org = np.asarray(img_org, dtype='uint8')\n \n img_seg = self.inv_transform(fake_B.detach().clone())\n img_seg = np.asarray(img_seg, dtype='uint8')\n \n img_gt = self.inv_transform(real_B.detach().clone())#image_B\n img_gt = np.asarray(img_gt, dtype='uint8')\n \n target, t_area, area_p, _, _ =pp.critic_segmentation(img_seg)\n tp = target if area_p > 0.05 else 0\n #area_pred = t_area*area_p\n \n #tp = self.args.n_class\n iou, iou_bb, dice, unc, area_int, area_pred, area_gt, cnts = pp.critic_segmentation_by_class(tp, img_seg, img_gt, self.args)\n pngpath = os.path.relpath(self.org_dir+f\"/{batch_idx}_{self.no_sample}.png\")\n pp.save_pic(img_org,pngpath,0)\n pngpath = os.path.relpath(self.gt_dir+f\"/{batch_idx}_{self.no_sample}.png\")\n pp.save_pic(img_gt,pngpath,0)\n pngpath = os.path.relpath(self.pred_dir+f\"/{batch_idx}_{self.no_sample}.png\")\n pp.save_pic(img_seg,pngpath,0) \n self.result.append([nz_f,tp, indexes[0].item(), indexes[1].item(), indexes[2].item(),values[0].item(), test_loss.item(), val, unc, area_int, area_pred, area_gt, iou_bb, dice, iou, area_p,pathA, pathB])\n self.sum_test += test_loss.item()\n \n #del real_A, real_B, fake_B, pathA, pathB, test_loss, z, aug_A, cnts, nz_f, e, e1, e2, z1, z2\n #torch.cuda.empty_cache()\n self.log(\"test loss\",self.sum_test/self.no_sample, sync_dist=True)\n \n\n tqdm_dict = {'test_loss': test_loss}\n \n output = OrderedDict({\n 'loss': test_loss,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n return output\n \n def predict_step(self, batch, batch_idx):\n for img_A, aug_A, pathA in zip(batch[\"A\"], batch[\"aug_A\"], batch[\"pathA\"]):\n self.no_sample += 1\n real_A = Variable(img_A).type(Tensor)#.cuda()\n real_A = real_A.unsqueeze(0)\n aug_A = Variable(aug_A).type(Tensor)#.cuda()\n aug_A = aug_A.unsqueeze(0)\n fake_B, e, e1, e2 = self(real_A)\n _, z, z1, z2 = self(aug_A)\n nz_f = self.mse_loss(e, z) #+ self.mse_loss(e1, z1) + self.mse_loss(e2, z2)\n nz_f = nz_f.item()\n \n fake_B = fake_B.data[0] \n real_A = real_A.data[0] \n img_seg = self.inv_transform(fake_B.detach().clone())\n img_seg = np.asarray(img_seg, dtype='uint8')\n img_org = self.inv_transform(real_A.detach().clone())\n img_org = np.asarray(img_org, dtype='uint8') \n #img_cat = cv.vconcat([img_seg, img_gt])\n \n #tp = self.args.n_class\n #tp = int(torch.argmax(e1.squeeze()).item())\n val = e2.squeeze().item()\n values, indexes = torch.topk(e1, k=3, dim=-1)\n indexes = indexes.data.view(-1)\n mean_v = values.mean()\n values = values - mean_v\n values = values.data.view(-1)\n target, area, area_p, cnts, clist =pp.critic_segmentation(img_seg) \n base = os.path.splitext(pathA)[0]\n _, basename = os.path.split(base)\n jsonpath = os.path.relpath(self.json_dir+\"/\"+basename+\".json\")\n picpath = os.path.relpath(self.jpg_dir+\"/\"+basename+\".jpg\")\n pngpath = os.path.relpath(self.png_dir+\"/\"+basename+\".png\")\n #pp.save_pic(img_seg,picpath,0)\n pp.save_pic(img_org+img_seg,picpath,0)\n if cnts is not None: #len(cnts) > 0:\n M = cv.moments(cnts[0])\n M0 = M['m00']\n cX = int(M['m10'] / M0) if M0 > 0 else 0\n cY = int(M['m01'] / M0) if M0 > 0 else 0\n polygon = pp.save_contour(cnts,area_p,target,jsonpath,self.args)\n img_cv = pp.draw_pic(target,polygon,self.args)\n #img_cat = cv.vconcat([img_cat, img_cv])\n pp.save_pic(img_cv,pngpath,0)\n else:\n cX = cY = 0\n self.result.append([target, indexes[0].item(), indexes[1].item(), indexes[2].item(),values[0].item(),val, nz_f, clist, area, area_p, cX, cY, pathA])\n\n tqdm_dict = {'predict_loss': nz_f}\n \n output = OrderedDict({\n 'loss': nz_f,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n return output\n\n @rank_zero_only\n def on_predict_end(self):\n #dist.all_gather(self.result, Tensor)\n #output = [None for _ in self.result]\n \n self.result.sort(reverse=False, key=lambda list: list[5])\n sys.stdout.write(\"Sorted!!\\n\")\n logging.info(\"Sorted!!\\n\")\n\n with open(self.tmp_pred,\"w\",newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.result)\n\n @rank_zero_only\n def on_test_end(self):\n mean_test = self.sum_test / self.no_sample\n str = \"mean mse: {}\\n\".format(mean_test)\n sys.stdout.write(str)\n logging.info(str)\n\n self.result.sort(reverse=True, key=lambda list: list[0])\n sys.stdout.write(\"Sorted!!\\n\")\n logging.info(\"Sorted!!\\n\")\n\n with open(self.tmp_csv,\"w\",newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(self.result)\n\n world = args.img_width * args.img_height\n #[nz_f,tp, indexes[0].item(), indexes[1].item(), indexes[2].item(),values[0].item(), test_loss.item(), pathA, pathB, unc, area_int, area_pred, area_gt, iou_bb, dice, iou]\n df = pd.read_csv(self.tmp_csv, names=['nz_f', 'class', 'pclass', '2nd','3rd', 'diff','loss','validity','uncertainty',\\\n 'intersection','pred','gt','iou bb','f1','iou','area_p','pathA','pathB'], header=None) \n\n df['intersection'].astype(float)\n df['pred'].astype(float)\n df['gt'].astype(float)\n \n df['FP'] = df['pred'].sub(df['intersection']) #Predict an event when there was no event.\n df['FN'] = df['gt'].sub(df['intersection']) #Predict no event when in fact there was an event.\n df['TP'] = df['intersection']\n df['TN'] = df['intersection'].add(world - df['pred'].add(df['gt']))\n\n df['FP'].astype(float)\n df['FN'].astype(float)\n df['TP'].astype(float)\n df['TN'].astype(float)\n\n # Sensitivity, hit rate, recall, or true positive rate\n df['TPR'] = df['TP'].div(df['TP'].add(df['FN']))\n # Specificity or true negative rate\n df['TNR'] = df['TN'].div(df['TN'].add(df['FP']))\n # Precision or positive predictive value\n df['PPV'] = df['TP'].div(df['TP'].add(df['FP']))\n # Negative predictive value\n df['NPV'] = df['TN'].div(df['TN'].add(df['FN']))\n # Fall out or false positive rate\n df['FPR'] = df['FP'].div(df['FP'].add(df['TN']))\n # False negative rate\n df['FNR'] = df['FN'].div(df['TP'].add(df['FN']))\n # False discovery rate\n df['FDR'] = df['FP'].div(df['TP'].add(df['FP']))\n # Overall accuracy for each class\n df['ACC'] = (df['TP'].add(df['TN'])).div((df['TP'].add(df['FP'].add(df['FN'].add(df['TN'])))))\n # Approx AUC\n df['mAUC'] = 0.5*(df['TPR'].add(df['FPR']))\n str = \"Count={}, F1={},TP={},TN={},FP={},FN={},\\nACC={},IoU={},bbIoU={},mAUC={}, nz_f={} {}, validity={} {}, area_p={}\\n\".format(len(df), df['f1'].mean(), df['TP'].mean(),\\\n df['TN'].mean(),df['FP'].mean(),df['FN'].mean(),df['ACC'].mean(),df['iou'].mean(),df['iou bb'].mean(),df['mAUC'].mean(),df['nz_f'].quantile(0.5),df['nz_f'].std(),df['validity'].mean(),df['validity'].std(),df['area_p'].mean())\n sys.stdout.write(str)\n logging.info(str)\n \n top5 = df['pclass'].value_counts().head()\n str = f\"1st top5: index counts\\n{top5}\\n\"\n sys.stdout.write(str)\n logging.info(str)\n top5 = df['2nd'].value_counts().head()\n str = f\"2nd top5: index counts\\n{top5}\\n\"\n sys.stdout.write(str)\n logging.info(str)\n top5 = df['3rd'].value_counts().head()\n str = f\"3rd top5: index counts\\n{top5}\\n\"\n sys.stdout.write(str)\n logging.info(str)\n\n df.to_csv(self.df_csv,index=False)\n\nclass SaveImage(Callback):\n def __init__(self,args) -> None:\n self.args = args\n input_shape = (self.args.n_channel, self.args.img_height, self.args.img_width)\n self.dataset = ImageDataset(\"%s/%s\" % (self.args.dataroot,self.args.dataset), input_shape, mode='test')\n self.test_loader = DataLoader(self.dataset, batch_size= self.args.test_batch_size, shuffle=True, num_workers=16)\n \n #@rank_zero_only\n def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:\n batches_done = pl_module.current_epoch\n pl_module.generator_unet.eval()\n val_loss, validity = sv.sample_images(batches_done, self.test_loader, self.args, pl_module.generator_unet, pl_module.mse_loss, Tensor)\n self.log(\"validation loss\",val_loss)\n self.log(\"validity\",validity)\n pl_module.generator_unet.train()\n\n\ndef main(args: Namespace) -> None:\n input_shape = (args.n_channel, args.img_height, args.img_width)\n if args.precision == 16:\n Tensor = torch.cuda.HalfTensor if cuda else torch.FloatTensor\n o_level = 'O0'\n else:\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n o_level = 'O1'\n amp_back = 'apex'\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = WaeGAN(args)\n if amp_back == 'apex':\n model = amp.initialize(model.cuda(), opt_level=o_level,loss_scale=1.0)\n amp.state_dict()\n dataset = args.dataset\n date = args.date\n save_path = \"./save/{dataset}_{date}\".format(dataset=dataset,date=date)\n checkpoint_callback = ModelCheckpoint(monitor=\"mse loss\", dirpath=save_path,\n filename=\"waegan-{epoch:02d}\",\n save_top_k=3,\n mode=\"min\",\n save_last=True)\n saveim_callback = SaveImage(args)\n precision = args.precision\n accel = \"ddp\" if args.DDP else None\n callbacks = [checkpoint_callback,saveim_callback]\n logging.basicConfig(filename=\"./%s.log\" % args.date ,format='%(asctime)s %(levelname)-8s %(message)s',level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')\n logging.info(args)\n logging.getLogger('PIL').setLevel(logging.WARNING)\n\n if args.epoch !=0:\n # Load pretrained models\n start_epoch = args.epoch - 1\n if args.last:\n ckpt = ModelCheckpoint(dirpath=save_path,filename=\"last\")\n else:\n ckpt = ModelCheckpoint(dirpath=save_path,filename=\"waegan-{epoch:02d}\")\n \n #ckpt = ModelCheckpoint(dirpath=save_path,filename=\"waegan-{epoch:02d}\")\n base = os.path.basename(ckpt.format_checkpoint_name(dict(epoch=start_epoch)))\n ckpt_path = os.path.join(save_path,base)\n trainer = Trainer(gpus=args.gpu,accelerator=accel,callbacks=callbacks,\\\n precision=precision, amp_level= o_level, amp_backend=amp_back,\\\n log_every_n_steps=10, auto_select_gpus=True, max_epochs= args.train_max,\\\n auto_scale_batch_size=\"binsearch\", accumulate_grad_batches=1,\n sync_batchnorm=True)#gradient_clip_val=args.gp_lambda,\n \n if args.train:\n #trainer.tune(model)\n if args.epoch !=0:\n model = model.load_from_checkpoint(ckpt_path)\n model.train()\n trainer.fit(model, ckpt_path=ckpt_path)\n else:\n trainer.fit(model)\n else:\n \n if args.val_target=='train':\n model = model.load_from_checkpoint(ckpt_path)\n model.eval()\n input_shape = (args.n_channel, args.img_height, args.img_width)\n dataset = ImageDataset(\"%s/%s\" % (args.dataroot,args.dataset) , input_shape, mode='train')\n train_dataloader = DataLoader(dataset, batch_size=args.batch_size, num_workers=24)\n trainer.test(model,dataloaders=train_dataloader)\n \n \n elif args.val_target=='test':\n\n model = model.load_from_checkpoint(ckpt_path)\n model.eval()\n \n trainer.test(model)\n \n elif args.val_target=='user':\n\n model = model.load_from_checkpoint(ckpt_path)\n model.eval()\n \n trainer.predict(model)\n \n else:\n print(\"Nothing to be done !\")\n\nif __name__ == '__main__':\n \n args = options.Options()\n args = options.Options.parse(args)\n sv.init_imdirs(args)\n main(args)\n\n\n\n\n", "repo_name": "choib/waegan_pl", "sub_path": "waegan_pl/waegan_pl.py", "file_name": "waegan_pl.py", "file_ext": "py", "file_size_in_byte": 27223, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cuda.is_available", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 81, "usage_type": "call"}, {"api_name": "pytorch_lightning.core.LightningModule", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.BCEWithLogitsLoss", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pytorch_ssim.SSIM", "line_number": 114, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 128, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 129, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 130, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 131, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 132, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 133, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 137, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 137, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 140, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 150, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 157, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 177, "usage_type": "attribute"}, {"api_name": "save_im.gaussian", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.mode", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 213, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 234, "usage_type": "call"}, {"api_name": "save_im.gaussian", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 241, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 255, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 272, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 296, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 297, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 307, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 320, "usage_type": "call"}, {"api_name": "pytorch_lightning.seed_everything", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 332, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 334, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 361, "usage_type": "call"}, {"api_name": "post_process.critic_segmentation", "line_number": 363, "usage_type": "call"}, {"api_name": "post_process.critic_segmentation_by_class", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 369, "usage_type": "call"}, {"api_name": "os.path", "line_number": 369, "usage_type": "attribute"}, {"api_name": "post_process.save_pic", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 371, "usage_type": "call"}, {"api_name": "os.path", "line_number": 371, "usage_type": "attribute"}, {"api_name": "post_process.save_pic", "line_number": 372, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 373, "usage_type": "call"}, {"api_name": "os.path", "line_number": 373, "usage_type": "attribute"}, {"api_name": "post_process.save_pic", "line_number": 374, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 385, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 395, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 409, "usage_type": "call"}, {"api_name": "torch.topk", "line_number": 415, "usage_type": "call"}, {"api_name": "post_process.critic_segmentation", "line_number": 420, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 421, "usage_type": "call"}, {"api_name": "os.path", "line_number": 421, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 422, "usage_type": "call"}, {"api_name": "os.path", "line_number": 422, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 423, "usage_type": "call"}, {"api_name": "os.path", "line_number": 423, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 424, "usage_type": "call"}, {"api_name": "os.path", "line_number": 424, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path", "line_number": 425, "usage_type": "attribute"}, {"api_name": "post_process.save_pic", "line_number": 427, "usage_type": "call"}, {"api_name": "cv2.moments", "line_number": 429, "usage_type": "call"}, {"api_name": "post_process.save_contour", "line_number": 433, "usage_type": "call"}, {"api_name": "post_process.draw_pic", "line_number": 434, "usage_type": "call"}, {"api_name": "post_process.save_pic", "line_number": 436, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 443, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 456, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 456, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 457, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 460, "usage_type": "call"}, {"api_name": "pytorch_lightning.utilities.rank_zero_only", "line_number": 450, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 467, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 467, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 468, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 471, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 471, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 472, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 475, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 480, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 517, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 517, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 518, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 522, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 522, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 523, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 526, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 526, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 527, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 530, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 530, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 531, "usage_type": "call"}, {"api_name": "pytorch_lightning.utilities.rank_zero_only", "line_number": 463, "usage_type": "name"}, {"api_name": "pytorch_lightning.callbacks.Callback", "line_number": 535, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 540, "usage_type": "call"}, {"api_name": "pytorch_lightning.trainer.Trainer", "line_number": 543, "usage_type": "name"}, {"api_name": "pytorch_lightning.core.LightningModule", "line_number": 543, "usage_type": "name"}, {"api_name": "save_im.sample_images", "line_number": 546, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 552, "usage_type": "name"}, {"api_name": "torch.cuda", "line_number": 555, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 555, "usage_type": "attribute"}, {"api_name": "torch.cuda", "line_number": 558, "usage_type": "attribute"}, {"api_name": "torch.FloatTensor", "line_number": 558, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 571, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 580, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 580, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 581, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 582, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 582, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 588, "usage_type": "call"}, {"api_name": "pytorch_lightning.callbacks.ModelCheckpoint", "line_number": 590, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 593, "usage_type": "call"}, {"api_name": "os.path", "line_number": 593, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 594, "usage_type": "call"}, {"api_name": "os.path", "line_number": 594, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.trainer.Trainer", "line_number": 595, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 616, "usage_type": "call"}, {"api_name": "options.Options", "line_number": 639, "usage_type": "call"}, {"api_name": "options.Options.parse", "line_number": 640, "usage_type": "call"}, {"api_name": "options.Options", "line_number": 640, "usage_type": "attribute"}, {"api_name": "save_im.init_imdirs", "line_number": 641, "usage_type": "call"}]} +{"seq_id": "13013521864", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 27 23:11:05 2017\r\n\r\n@author: ATruong1\r\n\"\"\"\r\n#%%\r\n# -*- coding: utf-8 -*-\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\nfrom sklearn.preprocessing import Imputer\r\nfrom pandas import get_dummies\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\nfrom sklearn import decomposition\r\nfrom scipy.spatial.distance import pdist, squareform\r\nimport matplotlib.pyplot as plt\r\n#%%\r\ndef cleanAndSelect(data, var_selected):\r\n #selectionne les variables listées dans car_selected ['var1', 'var2', 'var3'], \r\n \r\n data = data[['movie_title','title_year']+var_selected] \r\n \r\n data = data.dropna()\r\n data = data.drop_duplicates(['movie_title','title_year'])\r\n data.index.name = 'film_id'\r\n data = data.reset_index()\r\n \r\n \r\n \r\n return data, data[var_selected].copy()\r\n#%%\r\ndef feat_to_drop(data, threshold):\r\n # renvoie une liste des variables à garder après condition de fréquence avec 'threshold'\r\n filtered = data.sum(axis = 0) < threshold \r\n return filtered.index[filtered].tolist()\r\ndef addColumnForEachWord(data,variable, threshold):\r\n #va créer une colonne pour chaque élément presént la colonne 'variable' (pour les listes séparées par '|')\r\n def getWords(data):\r\n # obtiends une liste de tous les genres présents dans la colonne 'Genres' (unique)\r\n serie = data[variable].str.split('|').dropna()\r\n serie_ = serie.agg(['sum'])\r\n return list(set(serie_.values[0]))\r\n def add_column_eachWord(data, words):\r\n # rajoute une colonne pour chaque genre de film\r\n for word in words:\r\n data[word] = 0\r\n return data\r\n def add_column_wordsSplit(data):\r\n word_split = data[data[variable].isnull()==False][variable].apply(split_)\r\n data['words_split'] = word_split\r\n return data\r\n def fill_column_eachWord(row):\r\n #remplis les colonnes avec des 1 et des 0 si le genre correspond au film\r\n if pd.isnull(row[variable]) == False:\r\n words = row.words_split\r\n for word in words:\r\n row[word] = 1\r\n return row\r\n def split(string , separator):\r\n # parse un string avec des séparateurs\r\n return string.split(separator)\r\n def split_(string):\r\n return split(string,'|')\r\n data_ = add_column_eachWord(data,getWords(data))\r\n data_ = add_column_wordsSplit(data)\r\n data_ = data_.apply(fill_column_eachWord, axis = 1)\r\n data_ = data_.drop(['words_split',variable], axis=1)\r\n data_ = data_.drop(feat_to_drop(data_[getWords(data)], threshold), axis = 1)\r\n return data_\r\ndef addColumnForEachContent(data,variable, threshold):\r\n # renvoie une colonne pour chque élément dans la colonne 'variable'\r\n data_sup = get_dummies(data[variable])\r\n data_ = pd.concat([data, data_sup], axis=1)\r\n data_ = data_.drop(variable, axis=1)\r\n data_ = data_.drop(feat_to_drop(data_sup, threshold), axis=1)\r\n return data_\r\ndef plotSilhouette(data, start = 200, end = 1000, step = 100):\r\n #plot silhouettes pour nb de centroids allant de 'start' à 'end'\r\n range_ = range(start, end, step)\r\n res =[]\r\n for k in range_:\r\n kmeans = KMeans(n_clusters=k).fit(data)\r\n res.append(metrics.silhouette_score(data_,kmeans.labels_))\r\n plt.plot(range_,res,marker='o')\r\n plt.xlabel('Score de Silhouette')\r\n plt.savefig('scoreSilhouette_{}_{}.png'.format(start,end))\r\n plt.show()\r\n return\r\ndef distance_matrix(data):\r\n #Calcule une matrice de distance euclidienne entre toutes les lignes de 'data'\r\n dist_ = pdist(data, 'euclidean')\r\n dist_ = pd.DataFrame(squareform(dist_))\r\n return dist_\r\ndef getRecommendation(index_bis , info, distanceMatrix):\r\n # renvoie les 5 éléments les plus proches de 'index_bis' dans 'distance_matrix'\r\n nsmallest_list = distanceMatrix.nsmallest(6, index_bis).index.values.tolist()\r\n del nsmallest_list[0] \r\n res = info.iloc[nsmallest_list]\r\n return res\r\ndef recommend(data, info, film_id, d_matrix):\r\n #Renvoie les films recommandés par la fonction 'getRecommend'\r\n index = info.index[info['film_id'] == film_id].tolist()\r\n if len(index) == 0:\r\n return None, None\r\n else:\r\n index_ = index[0]\r\n return info.iloc[[index_]] , getRecommendation(index_,info,d_matrix)\r\ndef print_( string ):\r\n # Format de print\r\n separator = \"---------------------------\"\r\n print(separator + \" \" + string + \" \" + separator)\r\n return\r\n#%% Chargement\r\npd.set_option('display.width', 1000)\r\ndata = pd.read_csv('movie_metadata.csv', sep=\",\")\r\n#%% Clean et Observation\r\ninfo, data_ = cleanAndSelect(data, ['num_voted_users','actor_1_name','actor_2_name','actor_3_name','imdb_score','genres','duration','gross','director_name','budget'])\r\ndf1 = info[['movie_title','film_id','genres','director_name','title_year']].head(10)\r\ndf2 = info[['movie_title','film_id','genres','director_name','title_year']].sample(10)\r\nprint(df1.append(df2))\r\n\r\n\r\n#%% Ajout des genres\r\nthreshold = 0\r\ndata_ = addColumnForEachWord(data_,'genres', threshold)\r\n#%% Ajout des mots clés\r\n#data_ = addColumnForEachWord(data_,'plot_keywords', threshold)\r\n#%% Ajout des réalisateurs\r\ndata_ = addColumnForEachContent(data_,'director_name', threshold)\r\n#%% Ajout des Acteurs (présence de l'acteur dans le film 1 ou 0, peu importe Acteur1, Acteur 2, Acteur3)\r\ndata_['actors']= data_['actor_1_name']+'|'+data_['actor_2_name']+'|'+data_['actor_3_name']\r\n\r\ndata_ = addColumnForEachWord(data_,'actors', threshold)\r\ndata_ = data_.drop(['actor_1_name','actor_3_name','actor_2_name'], axis=1) \r\n#%% nouveau score qui sublime les haut score avec beaucoup de votes et pénalise les score faibles avec beaucoup de vote\r\nscore_ = data['imdb_score'].divide(10)\r\nnum_voter_ = (data['num_voted_users']-data['num_voted_users'].mean()).divide(data['num_voted_users'].max())\r\ndata_['new_score'] = score_.multiply(num_voter_)\r\ndata_ = data_.drop(['num_voted_users','imdb_score'], axis = 1)\r\n#%% succès commmercial ou pas\r\ndata_['profitability'] = data_['gross'].divide(data_['budget'])\r\ndata_=data_.drop(data_['gross'])\r\n\r\n\r\n#%% rescaling des données et calcul dela matrice de distance\r\ndata_scaled = preprocessing.scale(data_)\r\n\r\ndmatrix = distance_matrix(data_scaled)\r\n\r\n#%% Recommandation avec la matrice de distance\r\n'''id du film'''\r\n# film_id = 9 Harry Potter\r\n# film_id = 3 The Dark Knight Rises\r\n# film_id = 2607 The King's Speech\r\n# 283 Gladiator\r\nfilm_id = 2607\r\n\r\nmovie, recommendations = recommend(data_, info, film_id, dmatrix)\r\nif movie is None or recommendations is None:\r\n print('Sorry, we are not able to recommend you a movie based on the selected movie')\r\nelse:\r\n selected_columns_display = ['movie_title', 'genres','director_name','title_year']\r\n print_(\"Selected Movie:\")\r\n print(movie[selected_columns_display].to_string(index=False,header=False))\r\n print_(\"Recommendations:\")\r\n print(recommendations[selected_columns_display].to_string(index=False,header=False))\r\n#%%\r\n#%%\r\nlis=[]\r\nfor k in range(2,1004,250):\r\n pca = decomposition.PCA(n_components = k)\r\n pca.fit(data_scaled)\r\n lis.append(pca.explained_variance_ratio_.sum())\r\nplt.plot(range(2,1004,250),lis, marker='o')\r\nplt.xlabel('Ratio de var expliquée vs nombre de dimensions')\r\n\r\n\r\n#%%\r\npca = decomposition.PCA(n_components = 1500) #70% expliqué\r\npca.fit(data_scaled)\r\ndata_trans = pca.transform(data_scaled)\r\npca.explained_variance_ratio_.sum()\r\n\r\n#%%\r\nplotSilhouette(data_trans, 2, 1003, 200)\r\n", "repo_name": "eric-snapcar/projet4", "sub_path": "Main_woNaN_v0.py", "file_name": "Main_woNaN_v0.py", "file_ext": "py", "file_size_in_byte": 7645, "program_lang": "python", "lang": "fr", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pandas.isnull", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.silhouette_score", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.pdist", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.set_option", "line_number": 115, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.scale", "line_number": 147, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 147, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 172, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 180, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 180, "usage_type": "name"}]} +{"seq_id": "22142381106", "text": "import argparse\r\nimport logging\r\nfrom train import *\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='train mnist: classification of handwritten digits')\r\n parser.add_argument('--lr', type=float, required=False, default=0.01)\r\n parser.add_argument('--type', choices=['CNN', 'MLP'], required=False, default='CNN')\r\n parser.add_argument('--loglevel', choices=['INFO', 'DEBUG', 'ERROR'], required=False, default='DEBUG')\r\n args = parser.parse_args()\r\n logging.basicConfig(filename=\"file.log\", level=getattr(logging, args.loglevel))\r\n train(args)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "repo_name": "JuliaRS/MNIST-simple-classifier", "sub_path": "run_train_MNIST.py", "file_name": "run_train_MNIST.py", "file_ext": "py", "file_size_in_byte": 621, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "7166613179", "text": "import os\n\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime, timedelta\n\nfrom bot.ent.user import User\nfrom bot.ent.user_habit import UserHabit\n\n\nclass FormHabit(StatesGroup):\n habit = State() # состояние для ожидания ввода привычки\n description = State()\n for_time = State()\n\n\nasync def new_habit(message: types.Message):\n await message.answer(\"Пожалуйста, введите название привычки.\")\n await FormHabit.habit.set()\n\n\nasync def create_habit(message: types.Message, state: FSMContext):\n engine = create_engine(os.getenv(\"path_to_database\"))\n session_maker = sessionmaker(bind=engine)\n session = session_maker()\n user = session.query(User).filter_by(id=message.from_user.id).first()\n if user is None or user.email is None:\n await message.answer(\"Пожалуйста, сначала зарегистрируйте свою электронную почту.\")\n await state.reset_state()\n return\n # Когда пользователь вводит количество дней\n days = int(message.text)\n completion_date = datetime.now() + timedelta(days=days)\n async with state.proxy() as data:\n new_habit = UserHabit(\n id=message.from_user.id,\n email=user.email,\n name=data['habit'],\n desc=data['description'],\n for_time=completion_date\n )\n print(\"s\")\n session.add(new_habit)\n session.commit()\n await message.answer(f\"Привычка '{data['habit']}' была успешно добавлена!\")\n await state.finish()\n\n\nasync def add_desc_habit(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['habit'] = message.text\n await message.answer(\"Введите описание привычки.\")\n await FormHabit.description.set() # Переходим к состоянию описания задачи\n\n\nasync def add_deadline_habit(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['description'] = message.text\n await message.answer(\"Введите количество дней до соблюдения привычки.\")\n await FormHabit.for_time.set()\n", "repo_name": "mkdemkov/HealthApp-habits_tracker", "sub_path": "bot/functions/habit/add_new_habit.py", "file_name": "add_new_habit.py", "file_ext": "py", "file_size_in_byte": 2479, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "aiogram.dispatcher.filters.state.StatesGroup", "line_number": 14, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 15, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 16, "usage_type": "call"}, {"api_name": "aiogram.dispatcher.filters.state.State", "line_number": 17, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 20, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 20, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 25, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 25, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.create_engine", "line_number": 26, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 27, "usage_type": "call"}, {"api_name": "bot.ent.user.User", "line_number": 29, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 36, "usage_type": "call"}, {"api_name": "bot.ent.user_habit.UserHabit", "line_number": 38, "usage_type": "call"}, {"api_name": "aiogram.types.Message", "line_number": 52, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 52, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 52, "usage_type": "name"}, {"api_name": "aiogram.types.Message", "line_number": 59, "usage_type": "attribute"}, {"api_name": "aiogram.types", "line_number": 59, "usage_type": "name"}, {"api_name": "aiogram.dispatcher.FSMContext", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "24896677480", "text": "#!/usr/bin/env python3\n\nimport sys\nfrom utils import timing\nimport string\n\nfilename = sys.argv[1]\nwith open(filename, 'r') as fin:\n lines = [line.strip() for line in fin.readlines()]\n\ndef get_priority(letter):\n return string.ascii_letters.index(letter) + 1\n\n\n@timing\ndef main():\n part1_score = 0\n part2_score = 0\n\n # part 1\n for line in lines:\n first, second = line[:len(line)//2], line[len(line)//2:]\n part1_score += get_priority(\n list(\n set(first).intersection(set(second))\n )[0]\n )\n\n print('part1 score', part1_score)\n\n # part 2\n groups = list(zip(*(iter(lines),) * 3))\n\n for a,b,c in groups:\n part2_score += get_priority(\n list(\n set(a).intersection(set(b)).intersection(set(c))\n )[0]\n )\n\n print('part2 score', part2_score)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "kishvanchee/advent-of-code", "sub_path": "2022/python/day03.py", "file_name": "day03.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "string.ascii_letters.index", "line_number": 12, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 12, "usage_type": "attribute"}, {"api_name": "utils.timing", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "17358630443", "text": "import numpy as np\nimport math\nimport torch\nimport logging\nlogger = logging.getLogger()\n\nclass Iterator():\n \"\"\"\n Iterator for list of tensors whose first dimension match.\n \"\"\"\n\n def __init__(self, tensors, batch_size, \n allow_smaller=True, \n shuffle=True,\n sampling_weights=None,\n continuous=True):\n self.tensors = tensors\n self.batch_size = batch_size\n self.allow_smaller = allow_smaller\n self.shuffle = shuffle and sampling_weights is None \n self.sampling_weights = sampling_weights\n self.continuous = continuous\n self.device = tensors[0].device\n\n # number of elements in each tensor should be equal and a positive number\n n_elems = [len(t) for t in tensors]\n assert np.all(np.equal(n_elems, n_elems[0]))\n self.n_sample = n_elems[0]\n assert self.n_sample > 0\n\n while self.n_sample < self.batch_size:\n self.tensors = [t.repeat(2, *([1] * (len(t.shape) - 1))) for t in self.tensors]\n self.n_sample *= 2\n logger.info('Tensors are repeated, new sizes:{}'.format([t.shape for t in self.tensors]))\n\n self._s_ix = 0 # index of sample that will be fetched as the first sample in next_batch\n self._order = torch.zeros(self.n_sample, dtype=torch.long, device=self.device) # order of samples fetched in an epoch\n self.reset_batch_order()\n\n def __len__(self):\n return math.ceil(self.n_sample / self.batch_size)\n\n def __iter__(self):\n return self\n\n def _check_new_epoch(self):\n if self.allow_smaller:\n # check whether there is no not-fetched sample left\n return self._s_ix >= self.n_sample\n else:\n # check whether number of remaining not-fetched samples less than the batch size\n return self.n_sample - self._s_ix < self.batch_size\n\n def reset_batch_order(self):\n self._s_ix = 0\n if self.sampling_weights is not None:\n torch.multinomial(self.sampling_weights, self.n_sample, replacement=True, out=self._order)\n elif self.shuffle:\n torch.randperm(self.n_sample, out=self._order)\n else:\n torch.arange(self.n_sample, out=self._order)\n \n def __next__(self):\n new_epoch = self._check_new_epoch()\n if new_epoch:\n self.reset_batch_order()\n if not self.continuous:\n raise StopIteration\n\n inds = self._order[self._s_ix : self._s_ix + self.batch_size]\n self._s_ix += self.batch_size\n batch = [t[inds] for t in self.tensors]\n return batch\n \n\ndef compute_sampling_weights(labels):\n classes = np.unique(labels)\n assert classes.max() == (classes.shape[0] - 1)\n n_samples_per_class = np.array([len(np.where(labels == c)[0]) for c in classes])\n class_weights = 1. / n_samples_per_class\n sample_weights = np.array([class_weights[l] for l in labels])\n sample_weights = torch.from_numpy(sample_weights).float()\n return sample_weights\n", "repo_name": "mbsariyildiz/gmn-zsl", "sub_path": "src/data/iterator.py", "file_name": "iterator.py", "file_ext": "py", "file_size_in_byte": 3111, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 22, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 37, "usage_type": "attribute"}, {"api_name": "math.ceil", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.randperm", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "31483597288", "text": "from fastapi import APIRouter, Response, UploadFile, File, Form\nfrom json import dumps\n\nfrom app import main, config\nfrom app.s3 import upload_fileobj\nfrom app.logger import logger\n\nrouter = APIRouter()\n\n@router.post(\"/\", response_class=Response)\ndef object_image_input(image: UploadFile = File(...), # ... = required\n image_name: str = Form(...),\n time: int = Form(...)): # epoch (seconds)\n\n # Upload image to S3\n upload_fileobj(image.file, image_name, config.S3_BUCKET_OBJECT_IMAGE)\n image_s3_uri = f\"s3://{config.S3_BUCKET_OBJECT_IMAGE}/{image_name}\"\n logger.info(f\"Image is uploaded to {image_s3_uri}\")\n\n # Send data to Kafka\n message = {'image_path': image_s3_uri}\n main.kafka_producer.send(config.KAFKA_TOPIC_OBJECT_IMAGE,\n value=dumps(message).encode(encoding='UTF-8'))\n logger.info(f\"Message is published to Kafka: {dumps(message)}\")", "repo_name": "senior-project-spai/image-input-api", "sub_path": "app/app/routes/object.py", "file_name": "object.py", "file_ext": "py", "file_size_in_byte": 934, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.APIRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "fastapi.UploadFile", "line_number": 11, "usage_type": "name"}, {"api_name": "fastapi.File", "line_number": 11, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 12, "usage_type": "call"}, {"api_name": "fastapi.Form", "line_number": 13, "usage_type": "call"}, {"api_name": "app.s3.upload_fileobj", "line_number": 16, "usage_type": "call"}, {"api_name": "app.config.S3_BUCKET_OBJECT_IMAGE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "app.config", "line_number": 16, "usage_type": "name"}, {"api_name": "app.config.S3_BUCKET_OBJECT_IMAGE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.config", "line_number": 17, "usage_type": "name"}, {"api_name": "app.logger.logger.info", "line_number": 18, "usage_type": "call"}, {"api_name": "app.logger.logger", "line_number": 18, "usage_type": "name"}, {"api_name": "app.main.kafka_producer.send", "line_number": 22, "usage_type": "call"}, {"api_name": "app.main.kafka_producer", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.main", "line_number": 22, "usage_type": "name"}, {"api_name": "app.config.KAFKA_TOPIC_OBJECT_IMAGE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.config", "line_number": 22, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "app.logger.logger.info", "line_number": 24, "usage_type": "call"}, {"api_name": "app.logger.logger", "line_number": 24, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "fastapi.Response", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "39897527233", "text": "#coding:utf-8\r\n\r\nimport psutil\r\n\r\n'''\r\n获取电脑后台正在运行的Process信息\r\n'''\r\n\r\n#定义进程列表\r\nProcessIds=psutil.pids()\r\n#获取进程的ID列表\r\ndef getProcessIdList():\r\n return ProcessIds\r\ndef getgetProcessNameList():\r\n ProcessNames=[]\r\n for id in ProcessIds:\r\n process=psutil.Process(id)\r\n proName=process.name()\r\n ProcessNames.append(proName)\r\n return ProcessNames\r\n\r\n", "repo_name": "GIS90/python_base_use", "sub_path": "WinNagois/processInfo.py", "file_name": "processInfo.py", "file_ext": "py", "file_size_in_byte": 426, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "psutil.pids", "line_number": 10, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "4516839475", "text": "\"\"\" helpers for importing and managing program modules\n\"\"\"\n\nimport importlib\nfrom elstruct import par\nfrom elstruct import pclass\n\n\n# Functions to import and call the appropriate writer function\ndef call_module_function(prog, function, *args, **kwargs):\n \"\"\" call the module implementation of a given function\n\n :param prog: the program\n :type prog: str\n :param function_template: a function with the desired signature\n :type function_template: function\n \"\"\"\n\n def _rename_prog(prog):\n \"\"\" Rename a program if number does not match module name \"\"\"\n if prog in ('molpro2021', 'molpro2021_mppx'):\n prog = 'molpro2015'\n elif prog == 'gaussian16':\n prog = 'gaussian09'\n return prog\n\n new_name = _rename_prog(prog)\n assert new_name in pclass.values(par.Program)\n assert new_name in program_modules_with_function(function)\n\n name = f'_{_rename_prog(prog)}'\n module = importlib.import_module(f'elstruct.writer.{name:s}')\n writer = getattr(module, 'write_input')\n\n return writer(function, *args, **kwargs)\n\n\ndef program_modules_with_function(function):\n \"\"\"\n :param function: a function with the desired signature\n :type function: function\n \"\"\"\n\n progs = []\n for prog in pclass.values(par.Program):\n if function in WRITER_MODULE_DCT[prog]:\n progs.append(prog)\n\n return progs\n\n\n# Information on what writers have been implemented\nclass Job():\n \"\"\" Names of electronic structure jobs to ne written\n \"\"\"\n ENERGY = 'energy'\n GRADIENT = 'gradient'\n HESSIAN = 'hessian'\n VPT2 = 'vpt2'\n IRC = 'irc'\n MOLPROP = 'molecular_properties'\n OPTIMIZATION = 'optimization'\n\n\n# Dictionaries that dictate what writer/reader functionality\nWRITER_MODULE_DCT = {\n par.Program.CFOUR2: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION),\n par.Program.GAUSSIAN09: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION,\n Job.MOLPROP, Job.IRC, Job.VPT2),\n par.Program.GAUSSIAN16: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION,\n Job.MOLPROP, Job.IRC, Job.VPT2),\n par.Program.MOLPRO2015: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION,\n Job.MOLPROP, Job.IRC, Job.VPT2),\n par.Program.MOLPRO2021: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION,\n Job.MOLPROP, Job.IRC, Job.VPT2),\n par.Program.MRCC2018: (\n Job.ENERGY, Job.HESSIAN, Job.OPTIMIZATION),\n par.Program.NWCHEM6: (),\n # par.Program.NWCHEM6: (\n # Job.ENERGY, Job.OPTIMIZATION),\n par.Program.ORCA4: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION),\n par.Program.PSI4: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION,\n Job.MOLPROP, Job.IRC),\n par.Program.QCHEM5: (\n Job.ENERGY, Job.GRADIENT, Job.HESSIAN, Job.OPTIMIZATION\n )\n}\n", "repo_name": "Auto-Mech/autoio", "sub_path": "autoio-interfaces/elstruct/writer/program_modules.py", "file_name": "program_modules.py", "file_ext": "py", "file_size_in_byte": 2940, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "elstruct.pclass.values", "line_number": 28, "usage_type": "call"}, {"api_name": "elstruct.pclass", "line_number": 28, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 28, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 28, "usage_type": "name"}, {"api_name": "importlib.import_module", "line_number": 32, "usage_type": "call"}, {"api_name": "elstruct.pclass.values", "line_number": 45, "usage_type": "call"}, {"api_name": "elstruct.pclass", "line_number": 45, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 45, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 45, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 67, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 67, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 69, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 69, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 72, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 72, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 75, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 75, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 78, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 78, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 81, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 81, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 83, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 83, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 86, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 86, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 88, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 88, "usage_type": "name"}, {"api_name": "elstruct.par.Program", "line_number": 91, "usage_type": "attribute"}, {"api_name": "elstruct.par", "line_number": 91, "usage_type": "name"}]} +{"seq_id": "28924990245", "text": "#coding = utf8\nimport sys\nsys.path.append('../../../alg/basic')\nimport str_util\nfrom pgmpy.models import BayesianModel\nfrom pgmpy.factors.discrete import TabularCPD\nfrom pgmpy.inference import VariableElimination\nimport json\nfrom pgmpy.readwrite import BIFReader, BIFWriter, XMLBIFReader, XMLBIFWriter, ProbModelXML, UAIReader, UAIWriter\n\ndef age_id(n):\n age_parts = [1, 10, 20, 40, 60]\n for i in range(len(age_parts)):\n if n < age_parts[i]:\n return i\n return len(age_parts)\n\nclass Diagnosis:\n def __init__(self):\n self.disease_part = {}\n self.disease_rate = {}\n for line in open('disease_intro.json').readlines():\n js = json.loads(line.rstrip())['struct'] \n self.disease_rate[js['id']] = js['rate']\n self.disease_part[js['id']] = js['department']\n\n self.fea_map = str_util.read_kv_file('models/feature.id')\n self.disease_id = {}\n self.disease_name = {}\n self.ds_rind = {}\n for line in open('disease_symptom.json').readlines():\n js = json.loads(line.rstrip())\n id = js['id']\n if not id in self.disease_rate:\n continue\n self.disease_name[id] = js['name']\n self.disease_id[js['name']] = id\n for s, w in js['symptoms'].items():\n if not s in self.ds_rind:\n self.ds_rind[s] = {} \n self.ds_rind[s][id] = self.disease_rate[id]\n\n print('init succeed ... type symptoms ...', file = sys.stderr)\n\n def extract_self_explain(self, req):\n fea_list = []\n #todo: 利用ac和同义词表,解析出更多表述的症状特征\n for word in req['cont']['req_text'].split(' '):\n sym = 'S_' + word\n if sym in self.fea_map:\n fea_list.append((sym, 0))\n\n #todo: 抽取部位特征\n\n return fea_list\n\n def extract_interactive(self, req_list):\n #todo: 利用机器询问和用户回答,解析出新特征\n return []\n\n def get_observed_info(self, req_list):\n observed_info = {}\n for fea, val in self.extract_self_explain(req_list[-1]):\n observed_info[self.fea_map[fea]] = val\n for fea, val in self.extract_interactive(req_list):\n observed_info[self.fea_map[fea]] = val\n \n observed_info['SEX'] = req_list[-1]['user']['sex']\n observed_info['AGE'] = age_id(req_list[-1]['user']['age'])\n\n return observed_info \n\n def run(self, input_json):\n if len(input_json) == 0:\n return ''\n\n req_list = json.loads(input_json, encoding = 'utf-8')\n if len(req_list) == 0:\n return ''\n \n candidates = {} \n sym_list = req_list[0]['cont']['req_text'].split(' ')\n for sym in sym_list:\n if sym in self.disease_id:\n id = self.disease_id[sym]\n if not id in candidates:\n candidates[id] = 0\n candidates[id] += 1 \n if sym in self.ds_rind:\n for id, rate in self.ds_rind[sym].items():\n if not id in candidates:\n candidates[id] = 0\n candidates[id] += rate \n ids = sorted(candidates.items(), key=lambda d:d[1], reverse=True)[:20]\n print('candidates generated', file = sys.stderr)\n \n \n observed_info = self.get_observed_info(req_list)\n print(observed_info)\n\n dw = {}\n deps = {}\n for i, r in ids:\n if r >= 1:\n for d in self.disease_part[i]:\n if not d in deps:\n deps[d] = 0\n deps[d] += 1\n\n try:\n model = BIFReader('models/model.bif.%s' % i).get_model()\n except:\n continue\n infer = VariableElimination(model)\n key = self.fea_map['D_' + self.disease_name[i]]\n \n observed_info_valid = {}\n for k, v in observed_info.items():\n if k in model.nodes():\n observed_info_valid[k] = v\n score = infer.query(variables = [key], evidence = observed_info_valid)[key].values[0]\n dw[self.disease_name[i]] = score\n #print('%s: %s = %.8f' % (i, self.disease_name[i], score)) \n\n for d in self.disease_part[i]:\n if not d in deps:\n deps[d] = 0\n deps[d] += score \n \n if len(deps) == 0:\n print('unknown symptoms...', file = sys.stderr)\n return ''\n \n for name, score in sorted(dw.items(), key=lambda d:d[1], reverse=True)[:10]:\n print('%s\\t%.8f' % (name, score), file = sys.stderr)\n \n dep_list = sorted(deps.items(), key=lambda d:d[1], reverse=True)\n print('department: ', file = sys.stderr)\n for dep, wei in dep_list[:3]:\n print('%s\\t%.4f' % (dep, wei), file = sys.stderr)\n req_list[-1]['cont']['res_text'] += '%s ' % dep\n req_list[-1]['cont']['req_wei'] = 100 * dep_list[0][1] \n return req_list[-1]['cont']['res_text'] \n\nif __name__ == '__main__':\n d = Diagnosis()\n while 1:\n try:\n buf = input().rstrip()\n except:\n break\n req = {'user': {'sex': 1, 'age': 30}, 'cont': {'req_text': buf, 'req_type': 0, 'res_text': ''}}\n req_list = [req]\n for part in buf.split(' '):\n if part.startswith('age='):\n req['user']['age'] = int(part[len('age='):])\n if part.startswith('sex='):\n req['user']['sex'] = int(part[len('sex='):])\n response = d.run(json.dumps(req_list, ensure_ascii = False))\n print(req_list)\n print(response)\n", "repo_name": "lixingjian/project", "sub_path": "app/hospital_guide_robot/bayesian/experiment/predict_disease.py", "file_name": "predict_disease.py", "file_ext": "py", "file_size_in_byte": 5840, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "str_util.read_kv_file", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 43, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pgmpy.readwrite.BIFReader", "line_number": 111, "usage_type": "call"}, {"api_name": "pgmpy.inference.VariableElimination", "line_number": 114, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 131, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 135, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 138, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 140, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "35818526715", "text": "#!/usr/bin/python3\n#use ransac on green and red using findMm() no left red 4/24/2023\n# find black\nimport numpy as np\nfrom sklearn import linear_model, datasets\nimport cv2\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import CompressedImage\nimport time\nfrom cv_bridge import CvBridge, CvBridgeError\n\ncam_vs_file = \"FILE\" #CAM if camera, FILE if file\nXc = -1\nyc = -1\nnumberOfClicks = 0\n\n\ndef get_mouse_cb(event, x, y, flags, param):\n global xc, yc, numberOfClicks\n if event == cv2.EVENT_LBUTTONDOWN:\n xc = x\n yc = y\n numberOfClicks += 1\n\nclass TakePhoto:\n def __init__(self):\n self.image_received = False\n # img_topic = \"/raspicam_node/image/compressed\"\n img_topic = \"/camera/image/compressed\"\n self.image_sub = rospy.Subscriber(\n img_topic, CompressedImage, self.callback, queue_size=10)\n\n\n def callback(self, data):\n self.image_received = True\n np_arr = np.frombuffer(data.data, np.uint8)\n self.img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)\n\n# cv2.imshow(\"Orignal\", self.img)\n# self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)\n\n def get_img(self):\n if self.image_received == False:\n print(\"None\")\n return\n return self.img\n\n def save_img(self, img_title):\n if self.image_received == False:\n print(\"None\")\n return\n cv2.imwrite(img_title, self.img)\n\n def disp_img(self, img_title):\n if self.image_received == False:\n print(\"None\")\n return\n cv2.imshow(img_title, self.img)\n\ndef jabs(a,b):\n if(a > b):\n ab = a-b\n else:\n ab = b-a\n return ab\n\ndef findMm(im, xgb,ygb, xgt, ygt,prt=0):\n H, S, V = im[ygb,xgb]\n if prt == 1:\n print(\"Initial HSV infindMm \",H,S,V)\n Hl = H\n Hh = H\n Sl = S\n Sh = S\n yp = ygb-1 #next line up\n xp = xgb\n yf = ygt-1\n while yp > yf:\n for xtst in range(xp-5,xp+5): #find closest Hue to test value H\n hq,sq,V = im[yp,xtst] #q question\n if(xtst == xp-5):\n he = jabs(hq,H) #e evaluate\n hxpt = xtst\n se = jabs(sq, S)\n if prt == 1:\n print(\"xtst,yp, hq,sq \", xtst,yp, hq,sq)\n else:\n if jabs(hq, H) < he:\n he = jabs(hq, H) \n hxpt = xtst\n if jabs(sq, S) < se:\n se = jabs(sq, S)\n xp = hxpt\n if Hh < hq:\n Hh = hq\n if Hl > hq: \n Hl = hq\n if Sh < sq:\n Sh = sq\n if Sl > sq:\n Sl = sq \n if prt == 1:\n print(\"hxpt, Hl,Hh,Sl,Sh \",hxpt, Hl,Hh, Sl, Sh)\n yp -= 1\n if prt == 1:\n print(\"inTest yp,xp \", yp,xp)\n print()\n return Hl, Hh, Sl, Sh\n\n\nrospy.init_node(\"Color_Lane_Following\")\ncamera = TakePhoto()\ntime.sleep(1)\nrate = rospy.Rate(10)\nif(cam_vs_file == \"CAM\"): #camera if CAM, or FILE from file\n img = camera.get_img()\n imgsz = img.shape\n print(\"img size \", imgsz)\n\t\nelse:\n # read image\n #img = cv2.imread(\"/home/parallels/Documents/color/Lab13/smallRoadFar.jpg\")\n #img=cv2.imread('/home/parallels/Documents/color/Lab13/smallRoadClose.jpg')\n #img = cv2.imread('/home/parallels/Documents/color/Lab13/black30.jpg')\n #img = cv2.imread('/home/parallels/Documents/color/Lab13/black20_5.jpg')\n img = cv2.imread('/home/parallels/Documents/color/Lab13/black15.jpg')\n #img = cv2.imread('/home/parallels/Documents/color/Lab13/black12.jpg')\n #img = cv2.imread('/home/parallels/Documents/color/Lab13/black8.jpg')\n\n\n imgsz = img.shape\n print(\"img size \", imgsz)\n\nimgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n# show image\ncv2.namedWindow(\"image\")\n\n# define the events for the\n# mouse_click.\n\n# array to store picture values\nrows, cols = (10, 3)\nhsvValues = np.zeros((rows, cols), dtype = int)\npixelXYCords = np.zeros((rows, 2), dtype = int)\nprint(\"************** Click Information ************\")\nold_num_clk = numberOfClicks\ncv2.setMouseCallback('image', get_mouse_cb)\nwhile numberOfClicks < 9:\n cv2.imshow(\"image\", img)\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"c\"):\n break\n if(old_num_clk < numberOfClicks):\n print(\"numberOfClicks \", numberOfClicks)\n r, g, b = img[yc, xc]\n h, s, v = imgHSV[yc,xc]\n hsvValues[numberOfClicks][0] = h\n hsvValues[numberOfClicks][1] = s\n hsvValues[numberOfClicks][2] = v\n pixelXYCords[numberOfClicks][0] = xc\n pixelXYCords[numberOfClicks][1] = yc\n print(\"Coordinates \", pixelXYCords[numberOfClicks]) #x,y\n a = (b, g, r)\n colr = np.uint8([[a]])\n hsvColr = cv2.cvtColor(colr, cv2.COLOR_BGR2HSV)\n print(\" RGB, HSV \", r, g, b, \" \",h,s,v)\n old_num_clk += 1\n print()\nprint(\"DONE Clicking Image\")\nprint(\"*********** Clicking Results ***************\")\nprint(\"pixelXYCords \", pixelXYCords)\nprint(\"hsvValues \", hsvValues)\nprint(\"********************************************\")\nstart = time.process_time()\n#Set Color Limits\n#scan green line from lower click to upper click\nxgl = pixelXYCords[1][0]\nygl = pixelXYCords[1][1]\nxgu = pixelXYCords[2][0]\nygu = pixelXYCords[2][1]\ngH = hsvValues[1][0]\ngS = hsvValues[1][1]\ngHl,gHh,gSl,gSh = findMm(imgHSV, xgl,ygl, xgu, ygu) #find Green M&m\nprint(\"findMm green\", gHl, gHh, gSl, gSh)\ngreen_hsv_lower = (75, 125, 0) #was 39,142\ngreen_hsv_upper = (81, 176, 255) #was 42,174\ngreen_hsv_lower = (gHl, gSl,0) #using routine\ngreen_hsv_upper = (gHh, gSh,255) #using routine\ngreen_hsv_lower = np.array(green_hsv_lower)\ngreen_hsv_upper = np.array(green_hsv_upper)\n\nxrl = pixelXYCords[3][0]\nyrl = pixelXYCords[3][1]\nxru = pixelXYCords[4][0]\nyru = pixelXYCords[4][1]\n\nrHl,rHh, rSl,rSh = findMm(imgHSV, xrl,yrl, xru, yru) #find Red M&m\nprint(\"findMm red \", rHl,rHh, rSl,rSh)\nred_hsv_lower = ( 172, 125, 0)\nred_hsv_upper = (176, 142, 255)\nred_hsv_lower = (rHl, rSl, 0)\nred_hsv_upper = (rHh, rSh, 255)\nred_hsv_lower = np.array(red_hsv_lower)\nred_hsv_upper = np.array(red_hsv_upper)\nblk_hsv_lower = (35, 28, 40)\nblk_hsv_upper = (90, 47,90)\n\nwht_hsv_lower = (50,3,0)\nwht_hsv_upper = (94, 46, 255)\nlower_color = np.array(green_hsv_lower)\nupper_color = np.array(green_hsv_upper)\n\n # Create a mask of all pixels that fall within the color range\nprint(\"************ Color Limits ****************\")\nprint(\"green_hsv_lower \", green_hsv_lower)\nprint(\"green_hsv_upper \", green_hsv_upper)\nprint()\nprint(\"red_hsv_lower \", red_hsv_lower)\nprint(\"red_hsv_upper \", red_hsv_upper)\nprint()\nprint(\"white_hsv_lower \", wht_hsv_lower)\nprint(\"white_hsv_upper \", wht_hsv_upper)\nprint()\nprint(\"black_hsv_lower \", blk_hsv_lower)\nprint(\"black_hsv_upper \", blk_hsv_upper)\n\nimgHSV[0:290,:] = 0\n#mask = cv2.inRange(imgHSV, lower_color, upper_color)\nmaskg = cv2.inRange(imgHSV, green_hsv_lower, green_hsv_upper)\nmaskr = cv2.inRange(imgHSV, red_hsv_lower, red_hsv_upper)\nmaskw = cv2.inRange(imgHSV, wht_hsv_lower, wht_hsv_upper)\nmaskbk= cv2.inRange(imgHSV, blk_hsv_lower, blk_hsv_upper)\n#width, height = mask.size\ncv2.imshow(\"HSV image \",imgHSV)\ncv2.imshow(\"green_mask\", maskg)\ncv2.imshow(\"red_mask\", maskr)\ncv2.imshow(\"White_mask\", maskw)\ncv2.imshow(\"Black_mask\", maskbk)\nprint()\n\n# get green straight line x = m*y + b x = [y 1] (m b)'\nprint(\"*********** Green ********************************\")\nV = cv2.findNonZero(maskg)\n(x,y) = (V[:,0,0],V[:,0,1])\n#Insert Ransac code\nX = x.reshape(-1,1)\nY = y.reshape(-1,1)\n# Fit line using all data\nlr = linear_model.LinearRegression()\nlr.fit(Y, X)\nprint(\"linear fit \", lr.coef_, lr.intercept_)\n\n# Robustly fit linear model with RANSAC algorithm\n#ransac = linear_model.RANSACRegressor(residual_threshold = 15.)\nransac = linear_model.RANSACRegressor()\nransac.fit(Y,X)\nmg = ransac.estimator_.coef_\ncg = ransac.estimator_.intercept_\nprint(\"ransac fit \", mg, cg)\ninlier_mask = ransac.inlier_mask_\niRmax = np.where(inlier_mask > 0)[0]\niRmin = np.where(inlier_mask == 0)[0] #Outliers\n#print(f\"number of non-zero: {np.count_nonzero(inlier_mask)}\" )\nprint(\"**** size inlier_mask ***\", len(iRmax), inlier_mask.shape)\nprint(\"**** size outlier_mask ***\", len(iRmin))\n\n#A = np.vstack([y, np.ones(len(y))]).T\n#x1,y1 = np.array(pixelXYCords[1], dtype=np.float32)\n#x2,y2 = np.array(pixelXYCords[2], dtype=np.float32)\n#print(\"int pixel XY Coor 1 \", pixelXYCords[1])\n#print(\"int pixel XY Coor 2 \", pixelXYCords[2])\n#mg = (x2-x1)/(y2-y1)\n#cg = x2 - mg*y2\n#print(\" mg, cg \" , mg,cg)\n #m, c = np.linalg.lstsq(A,x, rcond=None)[0]\n #print(\"m c \", m,c)\n#R = np.abs(x - A.dot([mg, cg]))\n\nxs = x[iRmax]\nys = y[iRmax]\nAs = np.vstack([ys, np.ones(len(ys))]).T\nmsg,csg = np.linalg.lstsq(As, xs, rcond=None)[0]\nprint(\"Green ms cs \",msg,csg)\nmaskgs = np.zeros_like(maskg)\nmaskgs[ys,xs] = 250\nxg_robot = msg*imgsz[0] + csg\nprint(\"Green at robot \",xg_robot)\ncv2.imshow(\"green mask sel \", maskgs)\n\nprint(\"*********** Red Right ************\")\nV = cv2.findNonZero(maskr)\n(x,y) = (V[:,0,0],V[:,0,1])\n\n#Insert Ransac code\nX = x.reshape(-1,1)\nY = y.reshape(-1,1)\n# Fit line using all data\nlr = linear_model.LinearRegression()\nlr.fit(Y, X)\nprint(\"linear fit \", lr.coef_, lr.intercept_)\n\n# Robustly fit linear model with RANSAC algorithm\n#ransac = linear_model.RANSACRegressor(residual_threshold = 15.)\nransac = linear_model.RANSACRegressor()\nransac.fit(Y,X)\nmr1 = ransac.estimator_.coef_\ncr1 = ransac.estimator_.intercept_\nprint(\"ransac fit \", mr1, cr1)\ninlier_mask = ransac.inlier_mask_\niRmax = np.where(inlier_mask > 0)[0]\niRmin = np.where(inlier_mask == 0)[0] #Outliers\n#print(f\"number of non-zero: {np.count_nonzero(inlier_mask)}\" )\nprint(\"**** size inlier_mask ***\", len(iRmax), inlier_mask.shape)\nprint(\"**** size outlier_mask ***\", len(iRmin))\n\n\nxs = x[iRmax]\nys = y[iRmax]\nAs = np.vstack([ys, np.ones(len(ys))]).T\nmsr1,csr1 = np.linalg.lstsq(As, xs, rcond=None)[0]\nprint(\"Red 1 ms cs \",msr1,csr1)\nmaskr1s = np.zeros_like(maskr)\nmaskr1s[ys,xs] = 250\nxr1_robot = msr1*imgsz[0] + csr1\nprint(\"Right red at robot \",xr1_robot)\ncv2.imshow(\"red Right mask sel \", maskr1s)\n\n#print(\"*********** Red Left ************\")\n#x = x[iRmin]\n#y = y[iRmin]\n#X = x.reshape(-1,1)\n#Y = y.reshape(-1,1)\n# Fit line using all data\n#lr = linear_model.LinearRegression()\n#lr.fit(Y, X)\n#print(\"linear fit \", lr.coef_, lr.intercept_)\n\n# Robustly fit linear model with RANSAC algorithm\n#ransac = linear_model.RANSACRegressor(residual_threshold = 15.)\n#ransac = linear_model.RANSACRegressor()\n#ransac.fit(Y,X)\n#mr2 = ransac.estimator_.coef_\n#cr2 = ransac.estimator_.intercept_\n#print(\"ransac fit \", mr2, cr2)\n#inlier_mask = ransac.inlier_mask_\n#iRmax = np.where(inlier_mask > 0)[0]\n#iRmin = np.where(inlier_mask == 0)[0] #Outliers\n##print(f\"number of non-zero: {np.count_nonzero(inlier_mask)}\" )\n#print(\"**** size inlier_mask ***\", len(iRmax), inlier_mask.shape)\n#print(\"**** size outlier_mask ***\", len(iRmin))\n\n#print(\"size iRmax \", len(iRmax), len(x))\n#xs = x[iRmax]\n#ys = y[iRmax]\n\n#As = np.vstack([ys, np.ones(len(ys))]).T\n#msr2,csr2 = np.linalg.lstsq(As, xs, rcond=None)[0]\n#print(\"Red 2 ms cs \",msr2,csr2)\n#maskr2s = np.zeros_like(maskr)\n#maskr2s[ys,xs] = 250\n#xr2_robot = msr2*imgsz[0] + csr2\n#print(\"Left red at robot \",xr2_robot)\n#cv2.imshow(\"red Left mask sel \", maskr2s)\n\n\n\n#\n## x = m*y + c\n## yo = (c2-c1)/(m1-m2)\n## xo = m1*yo + c1\n#\nyo = (csr1 - csg)/(msg - msr1)\nxo = msg*yo + csg\n#y1 = (csr2 - csg)/(msg - msr2)\n#x1 = msg*y1 + csg\n#y12= (csr2 - csr1)/(msr1 - msr2)\n#x12= msr2*y12 + csr2\nprint()\nprint(\"************* Vanishing Points **************\")\nprint(\"Right red and green vanishing point xo,yo \",xo, yo)\n#print(\"Left red and green vanishing point x1,y1 \", x1,y1)\n#print(\"Right and Left reds vanishing point x12,y12 \", x12,y12)\nprint()\nprint(\"Processing time \", time.process_time() - start)\nixo = int(xo)\niyo = int(yo)\nimage2 = cv2.circle(img, (ixo,iyo), radius=3, color=(0, 0, 255), thickness=-1)\ncv2.imshow(\"Drive To \", image2)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n", "repo_name": "leonardjd/lab13src", "sub_path": "findLane.py", "file_name": "findLane.py", "file_ext": "py", "file_size_in_byte": 11954, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rospy.Subscriber", "line_number": 31, "usage_type": "call"}, {"api_name": "sensor_msgs.msg.CompressedImage", "line_number": 32, "usage_type": "argument"}, {"api_name": "numpy.frombuffer", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 38, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 59, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 112, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 135, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 145, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 166, "usage_type": "attribute"}, {"api_name": "time.process_time", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 230, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 231, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 232, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 233, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 235, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 236, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 237, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 238, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 239, "usage_type": "call"}, {"api_name": "cv2.findNonZero", "line_number": 244, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 250, "usage_type": "name"}, {"api_name": "sklearn.linear_model.RANSACRegressor", "line_number": 256, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 256, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 283, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 285, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 289, "usage_type": "call"}, {"api_name": "cv2.findNonZero", "line_number": 292, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 299, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 299, "usage_type": "name"}, {"api_name": "sklearn.linear_model.RANSACRegressor", "line_number": 305, "usage_type": "call"}, {"api_name": "sklearn.linear_model", "line_number": 305, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 321, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 321, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 323, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 327, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 385, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 388, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 389, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 391, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 392, "usage_type": "call"}]} +{"seq_id": "39592580681", "text": "# *===================================*\n# -*- coding: utf-8 -*-\n# * Time : 2019-06-27 12:37\n# * Author : zhangsf\n# *===================================*\nfrom flask import Flask, request, render_template, redirect, url_for\nfrom werkzeug.utils import secure_filename\nimport os\nfrom flask import send_from_directory\n\napp = Flask(__name__)\n\nUPLOAD_FOLDER = 'upload'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n# 判断上传的文件是否是允许的后缀\ndef allowed_file(filename):\n return \".\" in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route(\"/upload\", methods=['GET', 'POST'])\ndef upload():\n if request.method == 'GET': # 请求方式是get\n return render_template('upload.html') # 返回模板\n else:\n if \"file\" not in request.files:\n return redirect(request.url)\n\n file = request.files.get('file') # 获取文件\n\n if file.filename == '':\n return redirect(request.url)\n\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename) # 用这个函数确定文件名称是否是安全 (注意:中文不能识别)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) # 保存文件\n return redirect(url_for('show',\n filename=filename))\n\n\n# 展示图片\n@app.route('/show/')\ndef show(filename):\n # send_from_directory可以从目录加载文件\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n\nif __name__ == '__main__':\n # 0.0.0.0代表任何能代表这台机器的地址都可以访问\n app.run(host='0.0.0.0', port=5006) # 运行程序\n\n\n", "repo_name": "zhangvalue/flask", "sub_path": "upload.py", "file_name": "upload.py", "file_ext": "py", "file_size_in_byte": 1804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.request.files.get", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "14568659281", "text": "# import libraries\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\nimport re\nimport joblib\nimport string \nimport sys \n\nimport nltk\nnltk.download(['punkt', 'wordnet','stopwords'])\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem.porter import PorterStemmer\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, make_scorer\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom scipy.sparse import csr_matrix\nfrom sklearn import svm\n\nfrom class_def import Lemmer\n\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import validation_curve\n\n# Loading data lite from file path\ndef load_data(database_filepath, noDisaster):\n \"\"\"\n\tLoad database and get dataset\n\tArgs: \n\t\tdatabase_filepath (str): file path of sqlite database\n\tReturn:\n\t\tX (pandas dataframe): Features\n\t\ty (pandas dataframe): Targets/ Labels\n categories (list): List of categorical columns\n :param databse_filepath:\n \"\"\"\n engine = create_engine('sqlite:///../'+database_filepath)\n df = pd.read_sql_table('DS_messages', engine)\n engine.dispose()\n \n if noDisaster: df = df.drop('related',axis=1)\n \n X = df['message']\n y = df[df.columns[4:]]\n categories = y.columns.tolist()\n\n return X, y, categories\n\n# Loading original data from file path\ndef load_OrigData(noDisaster=True):\n \"\"\"\n\tLoad database and get dataset\n\tArgs: \n\t\tdatabase_filepath (str): file path of sqlite database\n\tReturn:\n\t\tX_train (pandas dataframe): train Features\n X_test (pandas dataframe): test Features\n\t\ty_train (pandas dataframe): Targets/ Labels for training\n y_test (pandas dataframe): Targets/ Labels for testing\n categories (list): List of categorical columns\n :param databse_filepath:\n \"\"\"\n path = '../../data/'\n filenames = ['disaster_response_messages_training.csv',\n 'disaster_response_messages_validation.csv',\n 'disaster_response_messages_test.csv',]\n for filename in filenames:\n df = pd.read_csv(path+filename, encoding = 'latin-1')\n df.drop(['id', 'split', 'original', 'genre', 'PII', 'offer', 'child_alone'], \n axis = 1, inplace = True)\n if noDisaster: df = df.drop('related', axis=1)\n if filename == 'disaster_response_messages_training.csv':\n X_train = df['message']\n y_train = df[df.columns[1:]]\n categories = y_train.columns.tolist()\n elif filename == 'disaster_response_messages_validation.csv':\n X_val = df['message']\n y_val = df[df.columns[1:]]\n elif filename == 'disaster_response_messages_test.csv':\n X_test = df['message']\n y_test = df[df.columns[1:]]\n\n return X_train, y_train, X_val, y_val, X_test, y_test, categories \n\n# Tokenizing\n# def tokenize(text):\n# # normalize text and remove punctuation\n# text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n# # tokenize text\n# tokens = word_tokenize(text)\n# stop_words = stopwords.words(\"english\")\n# words = [w for w in tokens if w not in stop_words]\n \n# # Reduce words to their stems\n# stemmer = PorterStemmer()\n# stemmed = [stemmer.stem(w) for w in words]\n \n# # Reduce words to their root form\n# lemmatizer = WordNetLemmatizer()\n# lemmed = [lemmatizer.lemmatize(w) for w in stemmed]\n \n# return lemmed\n \n# Building model\ndef build_model():\n \"\"\"Returns the GridSearchCV model\n Args:\n None\n Returns:\n cv: Grid search model object\n \"\"\"\n #clf = svm.SVC() #MultinomialNB() #BernoulliNB()\n\n # The pipeline has tfidf, dimensionality reduction, and classifier\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=Lemmer.tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(svm.SVC(gamma=10))) #MultinomialNB() # #BernoulliNB()\n ])\n\n # Parameters for GridSearchCV\n param_grid = {\n 'vect__max_df': (0.5, 0.75, 1.0),\n 'vect__ngram_range': ((1, 1), (1,2)),\n 'vect__max_features': (None, 5000,10000),\n 'tfidf__use_idf': (True, False)\n }\n\n cv = GridSearchCV(pipeline, param_grid)\n\n return cv\n\ndef evaluate_model(model, X_test, y_test, categories):\n \"\"\"Prints multi-output classification results\n Args:\n model (pandas dataframe): the scikit-learn fitted model\n X_text (pandas dataframe): The X test set\n y_test (pandas dataframe): the y test classifications\n category_names (list): the category names\n Returns:\n None\n \"\"\"\n\n # Generate predictions\n y_pred = model.predict(X_test)\n\n # Print out the full classification report\n print(classification_report(y_test, y_pred, target_names=categories))\n\n# Save model \ndef save_model(model, model_filepath):\n \"\"\"\n Dumps the model to given path \n Args: \n model: the fitted model\n model_filepath (str): filepath to save model\n Return:\n None\n\t\"\"\"\n joblib.dump(model, model_filepath)\n \ndef plot_validation_curve(estimator, X, y, title, axes=None, cv=None, n_jobs=None): \n if axes is None:\n _, axes = plt.subplots(1, 1, figsize=(20, 5))\n \n param_range = np.logspace(0.5, 1.5, 5)\n train_scores, test_scores = validation_curve(\n estimator,\n X,\n y,\n param_name='clf__estimator__gamma',\n cv=cv,\n n_jobs=n_jobs,\n param_range=param_range,\n #scoring=\"accuracy\",\n )\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n \n plt.title(title)\n plt.xlabel(r\"$\\gamma$\")\n plt.ylabel(\"Score\")\n plt.ylim(0.0, 1.1)\n lw = 2\n plt.semilogx(\n param_range, train_scores_mean, label=\"Training score\", color=\"darkorange\", lw=lw\n )\n plt.fill_between(\n param_range,\n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std,\n alpha=0.2,\n color=\"darkorange\",\n lw=lw,\n )\n plt.semilogx(\n param_range, test_scores_mean, label=\"Cross-validation score\", color=\"navy\", lw=lw\n )\n plt.fill_between(\n param_range,\n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std,\n alpha=0.2,\n color=\"navy\",\n lw=lw,\n )\n plt.legend(loc=\"best\")\n \n return plt\n \ndef plot_learning_curve(\n estimator,\n title,\n X,\n y,\n axes=None,\n ylim=None,\n cv=None,\n n_jobs=None,\n train_sizes=np.linspace(0.1, 1.0, 5),\n):\n \"\"\"\n Generate 3 plots: the test and training learning curve, the training\n samples vs fit times curve, the fit times vs score curve.\n\n Parameters\n ----------\n estimator : estimator instance\n An estimator instance implementing `fit` and `predict` methods which\n will be cloned for each validation.\n\n title : str\n Title for the chart.\n\n X : array-like of shape (n_samples, n_features)\n Training vector, where ``n_samples`` is the number of samples and\n ``n_features`` is the number of features.\n\n y : array-like of shape (n_samples) or (n_samples, n_features)\n Target relative to ``X`` for classification or regression;\n None for unsupervised learning.\n\n axes : array-like of shape (3,), default=None\n Axes to use for plotting the curves.\n\n ylim : tuple of shape (2,), default=None\n Defines minimum and maximum y-values plotted, e.g. (ymin, ymax).\n\n cv : int, cross-validation generator or an iterable, default=None\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n - None, to use the default 5-fold cross-validation,\n - integer, to specify the number of folds.\n - :term:`CV splitter`,\n - An iterable yielding (train, test) splits as arrays of indices.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide ` for the various\n cross-validators that can be used here.\n\n n_jobs : int or None, default=None\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n train_sizes : array-like of shape (n_ticks,)\n Relative or absolute numbers of training examples that will be used to\n generate the learning curve. If the ``dtype`` is float, it is regarded\n as a fraction of the maximum size of the training set (that is\n determined by the selected validation method), i.e. it has to be within\n (0, 1]. Otherwise it is interpreted as absolute sizes of the training\n sets. Note that for classification the number of samples usually have\n to be big enough to contain at least one sample from each class.\n (default: np.linspace(0.1, 1.0, 5))\n \"\"\"\n if axes is None:\n _, axes = plt.subplots(1, 3, figsize=(20, 5))\n\n axes[0].set_title(title)\n if ylim is not None:\n axes[0].set_ylim(*ylim)\n axes[0].set_xlabel(\"Training examples\")\n axes[0].set_ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(\n estimator,\n X,\n y,\n cv=cv,\n n_jobs=n_jobs,\n train_sizes=train_sizes,\n return_times=True,\n )\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n fit_times_mean = np.mean(fit_times, axis=1)\n fit_times_std = np.std(fit_times, axis=1)\n\n # Plot learning curve\n axes[0].grid()\n axes[0].fill_between(\n train_sizes,\n train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std,\n alpha=0.1,\n color=\"r\",\n )\n axes[0].fill_between(\n train_sizes,\n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std,\n alpha=0.1,\n color=\"g\",\n )\n axes[0].plot(\n train_sizes, train_scores_mean, \"o-\", color=\"r\", label=\"Training score\"\n )\n axes[0].plot(\n train_sizes, test_scores_mean, \"o-\", color=\"g\", label=\"Cross-validation score\"\n )\n axes[0].legend(loc=\"best\")\n\n # Plot n_samples vs fit_times\n axes[1].grid()\n axes[1].plot(train_sizes, fit_times_mean, \"o-\")\n axes[1].fill_between(\n train_sizes,\n fit_times_mean - fit_times_std,\n fit_times_mean + fit_times_std,\n alpha=0.1,\n )\n axes[1].set_xlabel(\"Training examples\")\n axes[1].set_ylabel(\"fit_times\")\n axes[1].set_title(\"Scalability of the model\")\n\n # Plot fit_time vs score\n axes[2].grid()\n axes[2].plot(fit_times_mean, test_scores_mean, \"o-\")\n axes[2].fill_between(\n fit_times_mean,\n test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std,\n alpha=0.1,\n )\n axes[2].set_xlabel(\"fit_times\")\n axes[2].set_ylabel(\"Score\")\n axes[2].set_title(\"Performance of the model\")\n\n return plt\n \ndef custom_cv(trainIndices, valIndices):\n yield trainIndices, valIndices\n \n \ndef main():\n if len(sys.argv) == 1:\n # database_filepath, model_filepath = sys.argv[1:]\n database_filepath = 'data/MDRM-lite.db'\n model_filepath = 'DS_model_SVC.pkl'\n #model_filepath = 'Kaggle_SVCLITE05.pkl'\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n \n X, y, categories = load_data(database_filepath, noDisaster=False)\n # cv = ShuffleSplit(n_splits=5, test_size=0.33, random_state=42)\n cv = joblib.load('datasplits.pkl') \n for tr, te in cv.split(X): print(\"TRAIN:\", tr, \"TEST:\", te)\n X_train = X[tr]; y_train = y.loc[tr]; X_test = X[te]; y_test = y.loc[te]\n #joblib.dump(cv, 'datasplits.pkl')\n \n #X_train, y_train, X_val, y_val, X_test, y_test, categories = load_OrigData(noDisaster=True)\n # #idx = np.random.choice(len(X_train), size=int(0.75*len(X_train)))\n # #joblib.dump(idx, 'datasplitsOrigLITE075.pkl')\n #idx = joblib.load('datasplitsOrigLITE05.pkl') \n #X_train = X_train[idx]; y_train = y_train.loc[idx]\n #X = pd.concat([X_train,X_val], sort=False); X.reset_index(drop=True, inplace=True)\n #y = pd.concat([y_train,y_val], sort=False); y.reset_index(drop=True, inplace=True)\n #valIndices = y.index[len(y_train):].values.astype(int)\n #cv = custom_cv(valIndices, valIndices)\n \n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n if 'related' not in categories: model_filepath = model_filepath[:-4]+'_noRelatedClass.pkl'\n\n #print('Building model...')\n #model = build_model()\n \n model = joblib.load(model_filepath)\n\n #print('Training model {} ...'.format(model_filepath))\n #model.fit(X_train, y_train)\n # model.fit(X_train.to_numpy(), csr_matrix(y_train).todense()) # for CategoricalNB\n\n print('Evaluating model...')\n evaluate_model(model, X_test, y_test, categories)\n\n #print('Saving model...\\n MODEL: {}'.format(model_filepath))\n #save_model(model, model_filepath)\n #print('Trained model saved!') \n \n \n # plot validation curves\n # fig, axes = plt.subplots(1, 1, figsize=(10, 15))\n # title = r\"Validation Curve with (SVC)\"\n # # train_scores, test_scores = validation_curve(\n # # model.estimator,\n # # X,\n # # y,\n # # param_name='clf__estimator__gamma',\n # # cv=cv,\n # # n_jobs=1,\n # # param_range=1,\n # # scoring=\"accuracy\",\n # # )\n # plot_validation_curve(\n # model.estimator, title, X, y, axes=axes, cv=cv, n_jobs=1\n # )\n # plt.show()\n \n # plot learning curves\n # plt.close()\n #fig, axes = plt.subplots(3, 1, figsize=(10, 15))\n #title = r\"Learning Curves (SVM, RBF kernel)\"\n #plot_learning_curve(\n # model.estimator, title, X, y, axes=axes[:], ylim=(0, 1.01), cv=cv, n_jobs=1\n #)\n #plt.show()\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "IslandsLab/NLP-Disaster", "sub_path": "DisasterResponseApp/model/train-multiclassifier.py", "file_name": "train-multiclassifier.py", "file_ext": "py", "file_size_in_byte": 15680, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "nltk.download", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_sql_table", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.CountVectorizer", "line_number": 131, "usage_type": "call"}, {"api_name": "class_def.Lemmer.tokenize", "line_number": 131, "usage_type": "attribute"}, {"api_name": "class_def.Lemmer", "line_number": 131, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfTransformer", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.multioutput.MultiOutputClassifier", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 133, "usage_type": "name"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 144, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 163, "usage_type": "call"}, {"api_name": "joblib.dump", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "numpy.logspace", "line_number": 181, "usage_type": "call"}, {"api_name": "sklearn.model_selection.validation_curve", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.semilogx", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill_between", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 224, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 224, "usage_type": "name"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "sklearn.model_selection.learning_curve", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 317, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 318, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 373, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 380, "usage_type": "attribute"}, {"api_name": "joblib.load", "line_number": 389, "usage_type": "call"}, {"api_name": "joblib.load", "line_number": 410, "usage_type": "call"}]} +{"seq_id": "72930038886", "text": "import pickle, requests\n\nlink = 'http://www.pythonchallenge.com/pc/def/banner.p'\nbody = requests.get(link)\n\ndata = pickle.loads(body.content)\nfor row in data:\n for item in row:\n print(item[0]*item[1], end='')\n print()", "repo_name": "oestej/python_challenge", "sub_path": "05.py", "file_name": "05.py", "file_ext": "py", "file_size_in_byte": 222, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 4, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "2340546086", "text": "from shapely.geometry import Polygon\nimport osmnx as ox\nimport matplotlib.pyplot as plt\n\nfrom estaty.engine.vector.clip import clip_dataframe_by_polygon\n\n\nclass AdministrativeBoundaries:\n \"\"\" Load polygon of administrative boundaries for desired city\n\n :param city_name: name of the city for analysis\n \"\"\"\n\n def __init__(self, city_name: str):\n self.use_only_buildings = False\n if 'building' in city_name:\n city_name = city_name.split('_building')[0]\n self.use_only_buildings = True\n\n self.city_name = city_name\n\n def get_city_polygon(self) -> Polygon:\n \"\"\" Query data from OSM and return geometry of obtained polygon \"\"\"\n gdf = ox.geocode_to_gdf({'city': self.city_name})\n city_polygon = gdf.geometry.iloc[0]\n if self.use_only_buildings is False:\n return city_polygon\n\n # Add only buildings\n buildings_tags = {'building': ['apartments', 'barracks', 'bungalow',\n 'cabin', 'detached', 'dormitory', 'farm',\n 'ger', 'hotel', 'house', 'houseboat',\n 'residential', 'semidetached_house',\n 'static_caravan', 'stilt_house',\n 'terrace', 'tree_house', 'commercial',\n 'industrial', 'kiosk', 'office', 'retail',\n 'supermarket', 'warehouse']}\n bbox_info = ox.geometries_from_polygon(polygon=city_polygon,\n tags=buildings_tags)\n bbox_info = clip_dataframe_by_polygon(bbox_info, city_polygon)\n\n # Perform polygons merging into single geometry\n bbox_info['new_column'] = 0\n bbox_info = bbox_info.dissolve(by='new_column')\n return bbox_info.geometry.iloc[0]\n", "repo_name": "red5ai/estaty", "sub_path": "estaty/experiment/administrative.py", "file_name": "administrative.py", "file_ext": "py", "file_size_in_byte": 1906, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 16, "dataset": "github-code", "pt": "50", "api": [{"api_name": "osmnx.geocode_to_gdf", "line_number": 24, "usage_type": "call"}, {"api_name": "osmnx.geometries_from_polygon", "line_number": 38, "usage_type": "call"}, {"api_name": "estaty.engine.vector.clip.clip_dataframe_by_polygon", "line_number": 40, "usage_type": "call"}, {"api_name": "shapely.geometry.Polygon", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "34068966874", "text": "import pandas as pd\nimport numpy as np\nimport math\n\nfrom scipy.io import loadmat\n\nmedian_filter_window_size = 8\nsequence_length = 100\nsubjects = range(1, 8)\niterations = range(1, 6)\nactivities = ['step', 'rest', 'squat']\ncolumn_names = ['accel_x'] + [f'accel_x.{i}' for i in range(1, 100)] +\\\n ['accel_y'] + [f'accel_y.{i}' for i in range(1, 100)] +\\\n ['accel_z'] + [f'accel_z.{i}' for i in range(1, 100)] +\\\n ['ppg'] + [f'ppg.{i}' for i in range(1, 100)] +\\\n ['activity', 'subject']\n\n\ndef manage_ppgs(activity, subject, iteration):\n folder_name = f'S{subject}'\n filename = f'{activity}{iteration}_ppg.mat'\n f = loadmat(f'PPG_ACC_dataset/{folder_name}/{filename}')\n ppgs = f['PPG'][:, 1]\n # print(ppgs.shape)\n\n max_chunk = math.floor(len(ppgs) / 800) * 800\n ppgs = ppgs[0:max_chunk]\n\n ppg_with_median = np.concatenate([np.median(ppgs[i * median_filter_window_size: (i + 1) * median_filter_window_size])\n for i in range(math.floor(len(ppgs) / median_filter_window_size))], axis=None)\n\n number_of_sequences = math.floor(len(ppg_with_median) / sequence_length)\n split_ppg_with_median = np.split(ppg_with_median[0:number_of_sequences * sequence_length], number_of_sequences)\n stacked_split_ppg_with_median = np.stack(split_ppg_with_median, axis=0)\n\n return stacked_split_ppg_with_median\n\ndef manage_xyz(activity, subject, iteration):\n # activity = 'rest'\n # subject = 1\n # iteration = 1\n folder_name = f'S{subject}'\n filename = f'{activity}{iteration}_acc.mat'\n f = loadmat(f'PPG_ACC_dataset/{folder_name}/{filename}')\n xyz = f['ACC'][:, 1:4]\n # print(xyz.shape)\n parsed = []\n for i in range(3):\n v = xyz[:, i]\n max_chunk = math.floor(len(v) / 800) * 800\n # print(max_chunk, len(v))\n v = v[0:max_chunk]\n # print(v[0:8])\n v_with_median = np.concatenate(\n [np.median(v[i * median_filter_window_size: (i + 1) * median_filter_window_size])\n for i in range(math.floor(len(v) / median_filter_window_size))], axis=None)\n number_of_sequences = math.floor(len(v_with_median) / sequence_length)\n split_v_with_median = np.split(v_with_median[0:number_of_sequences * sequence_length], number_of_sequences)\n stacked_split_v_with_median = np.stack(split_v_with_median, axis=0)\n parsed.append(stacked_split_v_with_median)\n\n concat_xyz = np.concatenate(parsed, axis=1)\n # print(concat_xyz)\n # quit()\n return concat_xyz\n\n\nrows = []\nfor subject_index, subject in enumerate(subjects):\n for iteration in iterations:\n for activity_index, activity in enumerate(activities):\n print(subject, activity, iteration)\n xyzs = manage_xyz(activity, subject, iteration)\n ppgs = manage_ppgs(activity, subject, iteration)\n activity_label = (activity_index + 1) * np.ones((ppgs.shape[0], 1))\n subject_label = (subject_index + 1) * np.ones((ppgs.shape[0], 1))\n print(xyzs.shape)\n print(ppgs.shape)\n batch_rows = np.concatenate([xyzs, ppgs, activity_label, subject_label], axis=1)\n rows.append(batch_rows)\n\noutput = np.concatenate(rows, axis=0)\nprint(output.shape)\ndf = pd.DataFrame(output, columns=column_names)\n# df.to_csv('test_ppg.csv', header=False, index=False)\n\n\n# First fix any first sequences that are nans\nfor col in ['accel_x', 'accel_y', 'accel_z', 'ppg']:\n df[col] = df[col].fillna(df[col + '.1'])\n\n# Now fix any other occurrences with the preceding value\nfor i in range(1, len(df.columns)):\n df[df.columns[i]] = df[df.columns[i]].fillna(df[df.columns[i - 1]])\n\n\nactivity_label = df['activity']#.apply(lambda x: activity_label_mapping[x]).values\n# df = df.drop(columns=['activity', 'subject'])\n\n# Manage imbalanced data\ncounts = np.bincount(activity_label)\n\nactivity_label_1 = df[df['activity'] == 1]\nactivity_label_2 = df[df['activity'] == 2]\nactivity_label_3 = df[df['activity'] == 3]\n\nundersampled_2_index = np.random.choice(activity_label_2.shape[0], size=max(len(activity_label_1), len(activity_label_3)), replace=False)\nundersampled_2 = activity_label_2.iloc[undersampled_2_index]\n\noutput_df = pd.concat([activity_label_1, undersampled_2, activity_label_3], axis=0)\noutput_df.to_csv('accel_x_y_z+ppgv3_removed_nulls_balanced.csv', index=False)\n\n\n# df.to_csv('accel_x_y_z+ppgv3_removed_nulls_test.csv', index=False)\n", "repo_name": "paper-code-anon/dp-gan", "sub_path": "PPG/parse_ppg_data.py", "file_name": "parse_ppg_data.py", "file_ext": "py", "file_size_in_byte": 4443, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.io.loadmat", "line_number": 22, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 29, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 30, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 34, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 44, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 55, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 56, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.bincount", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 110, "usage_type": "call"}]} +{"seq_id": "4488839169", "text": "#!/usr/bin/python\n#\n# Block header comment\n#\n#\nimport sys, imp, atexit\nsys.path.append(\"/home/courses/cs3214/software/pexpect-dpty/\");\nimport pexpect, shellio, signal, time, os, re, proc_check\n\n#Ensure the shell process is terminated\ndef force_shell_termination(shell_process):\n\tc.close(force=True)\n\n#pulling in the regular expression and other definitions\ndefinitions_scriptname = sys.argv[1]\ndef_module = imp.load_source('', definitions_scriptname)\nlogfile = None\nif hasattr(def_module, 'logfile'):\n logfile = def_module.logfile\n\n#spawn an instance of the shell\nc = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile)\natexit.register(force_shell_termination, shell_process=c)\n\nc.timeout = 5\n\n#echo | grep | grep\n#2nd grep will return 'much pipe', and not 'much wow', because of the first grep\nc.sendline(\"echo -e \\\"wow\\nso pipe\\nvery echo\\nmuch pipe\\nwow\\nmuch wow\\\" | grep pipe | grep much\")\nassert c.expect_exact(\"much pipe\") == 0, \"didn't multi-pipe to grep correctly\"\n\nshellio.success()\n", "repo_name": "tbporter/trash", "sub_path": "esh/src/eshtests/advanced/multi_pipe_test.py", "file_name": "multi_pipe_test.py", "file_ext": "py", "file_size_in_byte": 1008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "imp.load_source", "line_number": 16, "usage_type": "call"}, {"api_name": "pexpect.spawn", "line_number": 22, "usage_type": "call"}, {"api_name": "atexit.register", "line_number": 23, "usage_type": "call"}, {"api_name": "shellio.success", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "70394468635", "text": "from typing import Sequence, Type, Union\nfrom unittest.mock import MagicMock\n\nimport pytest\n\nimport omnicite.exceptions as omnicite_exceptions\nfrom omnicite.sources.base_source import BaseSource\n\n\n@pytest.mark.parametrize(\n (\"test_required_fields\", \"test_key_fields\"),\n (\n (\n (\n \"test\",\n (\n \"test1\",\n \"test2\",\n ),\n ),\n (\"test1\",),\n ),\n ),\n)\ndef test_check_field_existence_and_exclusivity(\n test_required_fields: Sequence[Union[str, Sequence[str]]],\n test_key_fields: Sequence[str],\n):\n BaseSource.check_field_existence_and_exclusivity(test_required_fields, test_key_fields)\n\n\n@pytest.mark.parametrize(\n (\"test_required_fields\", \"test_key_fields\", \"exception_string\"),\n (\n (\n (\n \"test\",\n (\n \"test1\",\n \"test2\",\n ),\n ),\n (),\n \"No field found\",\n ),\n (\n (\n \"test\",\n (\n \"test1\",\n \"test2\",\n ),\n ),\n (\"test\",),\n \"No field found\",\n ),\n (\n (\n \"test\",\n (\n \"test1\",\n \"test2\",\n ),\n ),\n (\n \"test\",\n \"test1\",\n \"test2\",\n ),\n \"Multiple exclusive fields\",\n ),\n ),\n)\ndef test_check_field_existence_and_exclusivity(\n test_required_fields: Sequence[Union[str, Sequence[str]]],\n test_key_fields: Sequence[str],\n exception_string: str,\n):\n with pytest.raises(omnicite_exceptions.OmniCiteSourceFieldError) as excinfo:\n BaseSource.check_field_existence_and_exclusivity(test_required_fields, test_key_fields)\n assert exception_string in str(excinfo.value)\n\n\n@pytest.mark.parametrize(\n (\"test_required_fields\", \"test_key_fields\", \"expected\"),\n (\n ((), (), ()),\n (\n (\"test1\",),\n (\"test1\",),\n (\"test1\",),\n ),\n (\n (\n \"test1\",\n (\"test2\", \"test3\"),\n ),\n (\n \"test1\",\n \"test3\",\n ),\n (\"test1\", \"test3\"),\n ),\n (\n (\n \"test1\",\n (\"test2\", \"test3\"),\n ),\n (\n \"test3\",\n \"test1\",\n ),\n (\"test1\", \"test3\"),\n ),\n ),\n)\ndef test_get_all_filled_required_fields(\n test_required_fields: Sequence[Union[str, Sequence[str]]],\n test_key_fields: Sequence[str],\n expected: Sequence[str],\n):\n result = BaseSource.get_all_filled_required_fields(test_required_fields, test_key_fields)\n assert len(result) == len(expected)\n assert all([r == expected[i] for i, r in enumerate(result)])\n\n\n@pytest.mark.parametrize(\n (\n \"test_optional_fields\",\n \"test_key_fields\",\n \"expected\",\n ),\n (\n ((), (), ()),\n (\n (\n \"option1\",\n \"option2\",\n ),\n (\n \"option1\",\n \"option2\",\n ),\n (\n \"option1\",\n \"option2\",\n ),\n ),\n (\n (\n \"option1\",\n \"option2\",\n ),\n (\n \"option2\",\n \"option1\",\n ),\n (\n \"option1\",\n \"option2\",\n ),\n ),\n (\n (\n \"option1\",\n \"option2\",\n (\"subopt1\", \"subopt2\"),\n ),\n (\n \"option2\",\n \"subopt2\",\n \"option1\",\n ),\n (\"option1\", \"option2\", \"subopt2\"),\n ),\n ),\n)\ndef test_get_all_filled_optional_fields(\n test_optional_fields: Sequence[Union[str, Sequence[str]]],\n test_key_fields: Sequence[str],\n expected: Sequence[str],\n):\n result = BaseSource.get_all_filled_optional_fields(test_optional_fields, test_key_fields)\n assert len(result) == len(expected)\n assert all([r == expected[i] for i, r in enumerate(result)])\n", "repo_name": "Serene-Arc/omnicite", "sub_path": "tests/sources/test_base_source.py", "file_name": "test_base_source.py", "file_ext": "py", "file_size_in_byte": 4391, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "typing.Sequence", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 27, "usage_type": "name"}, {"api_name": "omnicite.sources.base_source.BaseSource.check_field_existence_and_exclusivity", "line_number": 29, "usage_type": "call"}, {"api_name": "omnicite.sources.base_source.BaseSource", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 10, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 10, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 76, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 79, "usage_type": "call"}, {"api_name": "omnicite.exceptions.OmniCiteSourceFieldError", "line_number": 79, "usage_type": "attribute"}, {"api_name": "omnicite.exceptions", "line_number": 79, "usage_type": "name"}, {"api_name": "omnicite.sources.base_source.BaseSource.check_field_existence_and_exclusivity", "line_number": 80, "usage_type": "call"}, {"api_name": "omnicite.sources.base_source.BaseSource", "line_number": 80, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 32, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 32, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 118, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 119, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 120, "usage_type": "name"}, {"api_name": "omnicite.sources.base_source.BaseSource.get_all_filled_required_fields", "line_number": 122, "usage_type": "call"}, {"api_name": "omnicite.sources.base_source.BaseSource", "line_number": 122, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 84, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 84, "usage_type": "attribute"}, {"api_name": "typing.Sequence", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 179, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 180, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 181, "usage_type": "name"}, {"api_name": "omnicite.sources.base_source.BaseSource.get_all_filled_optional_fields", "line_number": 183, "usage_type": "call"}, {"api_name": "omnicite.sources.base_source.BaseSource", "line_number": 183, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 127, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 127, "usage_type": "attribute"}]} +{"seq_id": "42173326302", "text": "# ----------------------------------------\n# Written by Yude Wang\n# ----------------------------------------\nfrom __future__ import print_function, division\nimport os\nimport torch\nimport cv2\nimport multiprocessing\nimport pandas as pd\nimport numpy as np\nfrom skimage import io\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom datasets.transform import *\nfrom utils.imutils import *\nfrom utils.registry import DATASETS\nfrom datasets.BaseDataset import BaseDataset\n\n@DATASETS.register_module\nclass VOCDataset(BaseDataset):\n\tdef __init__(self, cfg, period, transform='none'):\n\t\tsuper(VOCDataset, self).__init__(cfg, period, transform)\n\t\tself.dataset_name = 'VOC%d'%cfg.DATA_YEAR\n\t\tself.root_dir = os.path.join(cfg.ROOT_DIR,'data','VOCdevkit')\n\t\tself.dataset_dir = os.path.join(self.root_dir,self.dataset_name)\n\t\tself.rst_dir = os.path.join(self.root_dir,'results',self.dataset_name,'Segmentation')\n\t\tself.eval_dir = os.path.join(self.root_dir,'eval_result',self.dataset_name,'Segmentation')\n\t\tself.img_dir = os.path.join(self.dataset_dir, 'JPEGImages')\n\t\tself.ann_dir = os.path.join(self.dataset_dir, 'Annotations')\n\t\tself.seg_dir = os.path.join(self.dataset_dir, 'SegmentationClass')\n\t\tself.set_dir = os.path.join(self.dataset_dir, 'ImageSets', 'Segmentation')\n\t\tif cfg.DATA_PSEUDO_GT:\n\t\t\tself.pseudo_gt_dir = cfg.DATA_PSEUDO_GT\n\t\telse:\n\t\t\tself.pseudo_gt_dir = os.path.join(self.root_dir,'pseudo_gt',self.dataset_name,'Segmentation')\n\n\t\tfile_name = None\n\t\tif cfg.DATA_AUG and 'train' in self.period:\n\t\t\tfile_name = self.set_dir+'/'+period+'aug.txt'\n\t\telse:\n\t\t\tfile_name = self.set_dir+'/'+period+'.txt'\n\t\tdf = pd.read_csv(file_name, names=['filename'])\n\t\tself.name_list = df['filename'].values\n\t\tif self.dataset_name == 'VOC2012':\n\t\t\tself.categories = ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',\n\t\t\t\t\t\t\t 'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']\n\t\t\tself.coco2voc = [[0],[5],[2],[16],[9],[44],[6],[3],[17],[62],\n\t\t\t\t\t\t\t [21],[67],[18],[19],[4],[1],[64],[20],[63],[7],[72]]\n\n\t\t\tself.num_categories = len(self.categories)+1\n\t\t\tself.cmap = self.__colormap(len(self.categories)+1)\n\n\tdef __len__(self):\n\t\treturn len(self.name_list)\n\n\tdef load_name(self, idx):\n\t\tname = self.name_list[idx]\n\t\treturn name\n\n\tdef load_image(self, idx):\n\t\tname = self.name_list[idx]\n\t\timg_file = self.img_dir + '/' + name + '.jpg'\n\t\timage = cv2.imread(img_file)\n\t\timage_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\t\treturn image_rgb\n\n\tdef load_segmentation(self, idx):\n\t\tname = self.name_list[idx]\n\t\tseg_file = self.seg_dir + '/' + name + '.png'\n\t\tsegmentation = np.array(Image.open(seg_file))\n\t\treturn segmentation\n\n\tdef load_pseudo_segmentation(self, idx):\n\t\tname = self.name_list[idx]\n\t\tseg_file = self.pseudo_gt_dir + '/' + name + '.png'\n\t\tsegmentation = np.array(Image.open(seg_file))\n\t\treturn segmentation\n\n\tdef __colormap(self, N):\n\t\t\"\"\"Get the map from label index to color\n\n\t\tArgs:\n\t\t\tN: number of class\n\n\t\t\treturn: a Nx3 matrix\n\n\t\t\"\"\"\n\t\tcmap = np.zeros((N, 3), dtype = np.uint8)\n\n\t\tdef uint82bin(n, count=8):\n\t\t\t\"\"\"returns the binary of integer n, count refers to amount of bits\"\"\"\n\t\t\treturn ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])\n\n\t\tfor i in range(N):\n\t\t\tr = 0\n\t\t\tg = 0\n\t\t\tb = 0\n\t\t\tidx = i\n\t\t\tfor j in range(7):\n\t\t\t\tstr_id = uint82bin(idx)\n\t\t\t\tr = r ^ ( np.uint8(str_id[-1]) << (7-j))\n\t\t\t\tg = g ^ ( np.uint8(str_id[-2]) << (7-j))\n\t\t\t\tb = b ^ ( np.uint8(str_id[-3]) << (7-j))\n\t\t\t\tidx = idx >> 3\n\t\t\tcmap[i, 0] = r\n\t\t\tcmap[i, 1] = g\n\t\t\tcmap[i, 2] = b\n\t\treturn cmap\n\n\tdef load_ranked_namelist(self):\n\t\tdf = self.read_rank_result()\n\t\tself.name_list = df['filename'].values\n\n\tdef label2colormap(self, label):\n\t\tm = label.astype(np.uint8)\n\t\tr,c = m.shape\n\t\tcmap = np.zeros((r,c,3), dtype=np.uint8)\n\t\tcmap[:,:,0] = (m&1)<<7 | (m&8)<<3\n\t\tcmap[:,:,1] = (m&2)<<6 | (m&16)<<2\n\t\tcmap[:,:,2] = (m&4)<<5\n\t\tcmap[m==255] = [255,255,255]\n\t\treturn cmap\n\t\n\tdef save_result(self, result_list, model_id):\n\t\t\"\"\"Save test results\n\n\t\tArgs:\n\t\t\tresult_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]\n\n\t\t\"\"\"\n\t\tfolder_path = os.path.join(self.rst_dir,'%s_%s'%(model_id,self.period))\n\t\tif not os.path.exists(folder_path):\n\t\t\tos.makedirs(folder_path)\n\t\t\t\n\t\tfor sample in result_list:\n\t\t\tfile_path = os.path.join(folder_path, '%s.png'%sample['name'])\n\t\t\tcv2.imwrite(file_path, sample['predict'])\n\n\tdef save_pseudo_gt(self, result_list, folder_path=None):\n\t\t\"\"\"Save pseudo gt\n\n\t\tArgs:\n\t\t\tresult_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]\n\n\t\t\"\"\"\n\t\tfolder_path = self.pseudo_gt_dir if folder_path is None else folder_path\n\t\tif not os.path.exists(folder_path):\n\t\t\tos.makedirs(folder_path)\n\t\tfor sample in result_list:\n\t\t\tfile_path = os.path.join(folder_path, '%s.png'%(sample['name']))\n\t\t\tcv2.imwrite(file_path, sample['predict'])\n\n\tdef do_python_eval(self, model_id):\n\t\tpredict_folder = os.path.join(self.rst_dir,'%s_%s'%(model_id,self.period))\n\t\tgt_folder = self.seg_dir\n\t\tTP = []\n\t\tP = []\n\t\tT = []\n\t\tfor i in range(self.num_categories):\n\t\t\tTP.append(multiprocessing.Value('i', 0, lock=True))\n\t\t\tP.append(multiprocessing.Value('i', 0, lock=True))\n\t\t\tT.append(multiprocessing.Value('i', 0, lock=True))\n\t\t\n\t\tdef compare(start,step,TP,P,T):\n\t\t\tfor idx in range(start,len(self.name_list),step):\n\t\t\t\tname = self.name_list[idx]\n\t\t\t\tpredict_file = os.path.join(predict_folder,'%s.png'%name)\n\t\t\t\tgt_file = os.path.join(gt_folder,'%s.png'%name)\n\t\t\t\tpredict = np.array(Image.open(predict_file))\n\t\t\t\tgt = np.array(Image.open(gt_file))\n\t\t\t\tcal = gt<255\n\t\t\t\tmask = (predict==gt) * cal\n\t\t \n\t\t\t\tfor i in range(self.num_categories):\n\t\t\t\t\tP[i].acquire()\n\t\t\t\t\tP[i].value += np.sum((predict==i)*cal)\n\t\t\t\t\tP[i].release()\n\t\t\t\t\tT[i].acquire()\n\t\t\t\t\tT[i].value += np.sum((gt==i)*cal)\n\t\t\t\t\tT[i].release()\n\t\t\t\t\tTP[i].acquire()\n\t\t\t\t\tTP[i].value += np.sum((gt==i)*mask)\n\t\t\t\t\tTP[i].release()\n\t\tp_list = []\n\t\tfor i in range(8):\n\t\t\tp = multiprocessing.Process(target=compare, args=(i,8,TP,P,T))\n\t\t\tp.start()\n\t\t\tp_list.append(p)\n\t\tfor p in p_list:\n\t\t\tp.join()\n\t\tIoU = []\n\t\tfor i in range(self.num_categories):\n\t\t\tIoU.append(TP[i].value/(T[i].value+P[i].value-TP[i].value+1e-10))\n\t\tloglist = {}\n\t\tfor i in range(self.num_categories):\n\t\t\tif i == 0:\n\t\t\t\tprint('%11s:%7.3f%%'%('background',IoU[i]*100),end='\\t')\n\t\t\t\tloglist['background'] = IoU[i] * 100 \n\t\t\telse:\n\t\t\t\tif i%2 != 1:\n\t\t\t\t\tprint('%11s:%7.3f%%'%(self.categories[i-1],IoU[i]*100),end='\\t')\n\t\t\t\telse:\n\t\t\t\t\tprint('%11s:%7.3f%%'%(self.categories[i-1],IoU[i]*100))\n\t\t\t\tloglist[self.categories[i-1]] = IoU[i] * 100\n\t\t\t\t\t\n\t\tmiou = np.mean(np.array(IoU))\n\t\tprint('\\n======================================================')\n\t\tprint('%11s:%7.3f%%'%('mIoU',miou*100))\n\t\tloglist['mIoU'] = miou * 100\n\t\treturn loglist\n\n\n@DATASETS.register_module\nclass SemiWeak_VOCDataset(VOCDataset):\n\tdef __init__(self, cfg, period, transform='none', split_idx=None, puresemi=False):\n\t\tsuper(SemiWeak_VOCDataset, self).__init__(cfg, period, transform)\n\t\tif split_idx:\n\t\t\tself.split_idx = split_idx\n\t\telif cfg.DATA_SPLIT is not None:\n\t\t\tself.split_idx = cfg.DATA_SPLIT\n\t\t\t#print(self.split_idx)\n\t\telse:\n\t\t\tself.split_idx = len(self.seg_name_list)\n\t\tif period == 'train':\n\t\t\tif puresemi:\n\t\t\t\tfile_name = os.path.join(self.set_dir,f'train_aug_labeled_1-{10581//self.split_idx+1}.txt')\n\t\t\telse:\n\t\t\t\tfile_name = self.set_dir+'/'+period+'.txt'\n\t\t\tdf = pd.read_csv(file_name, names=['filename'])\n\t\t\tself.seg_name_list = df['filename'].values\n\t\t\t#print(len(self.seg_name_list))\n\n\t\t\tlist_all = self.seg_name_list.copy()\n\t\t\tfor name in self.name_list:\n\t\t\t\tif name not in self.seg_name_list: \n\t\t\t\t\tlist_all = np.append(list_all, name)\n\t\t\tself.name_list = list_all\n\t\tif self.cfg.DATA_RANDOMCOPYPASTE>0:\n\t\t\tself.singleclass_filter()\n\t\n\tdef singleclass_filter(self):\n\t\tself.clsidxgroup = {}\n\t\tself.singlecls = []\n\t\tfor i in range(self.num_categories):\n\t\t\tself.clsidxgroup[i] = []\n\t\tfor idx in range(len(self.name_list)):\n\t\t\tsegmentation = self.load_segmentation(idx)\n\t\t\tunique = np.unique(segmentation)\n\t\t\tunique = unique[:-1] if 255 in unique else unique\n\t\t\tunique = unique[1:] if 0 in unique else unique\n\t\t\tif len(unique)==1:\n\t\t\t\tself.clsidxgroup[unique[0]].append(idx)\n\t\t\t\tself.singlecls.append(True)\n\t\t\telse:\n\t\t\t\tself.singlecls.append(False)\n\t\t\t\n\n\tdef __getitem__(self, idx):\n\t\tsample = self.__sample_generate__(idx, self.split_idx)\n\t\tif 'segmentation' in sample.keys():\n\t\t\tsample['mask'] = sample['segmentation'] < self.num_categories\n\t\t\tif idx >= self.split_idx:\n\t\t\t\tmask_numpy = sample['mask']\n\t\t\t\tsample['mask'] = np.zeros(mask_numpy.shape)\n\t\t\tt = sample['segmentation'].copy()\n\t\t\tt[t >= self.num_categories] = 0\n\t\t\tsample['segmentation_onehot']=onehot(t,self.num_categories)\n\t\treturn self.totensor(sample)\n\n\tdef __sample_generate__(self, idx, split_idx=0):\n\t\tname = self.load_name(idx)\n\t\timage = self.load_image(idx)\n\t\tr,c,_ = image.shape\n\t\tsample = {'image': image, 'name': name, 'row': r, 'col': c, 'image_orig': image}\n\n\t\tif 'test' in self.period:\n\t\t\treturn self.__transform__(sample)\n\t\telif self.cfg.DATA_PSEUDO_GT and idx>=split_idx and 'train' in self.period:\n\t\t\tsegmentation = self.load_pseudo_segmentation(idx)\n\t\telse:\n\t\t\tsegmentation = self.load_segmentation(idx)\n\t\tsample['segmentation'] = segmentation\n\t\tt = sample['segmentation'].copy()\n\t\tt[t >= self.num_categories] = 0\n\t\tsample['category'] = seg2cls(t,self.num_categories)\n\t\tsample['category_copypaste'] = np.zeros(sample['category'].shape)\n\n\t\tsample = self.__transform__(sample)\n\t\treturn sample\n", "repo_name": "YudeWang/Learning-Pseudo-Label", "sub_path": "lib/datasets/VOCDataset.py", "file_name": "VOCDataset.py", "file_ext": "py", "file_size_in_byte": 9481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "50", "api": [{"api_name": "datasets.BaseDataset.BaseDataset", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 70, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 88, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "multiprocessing.Value", "line_number": 160, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 161, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 167, "usage_type": "call"}, {"api_name": "os.path", "line_number": 167, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 169, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 169, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 170, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 170, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 170, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 182, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.registry.DATASETS.register_module", "line_number": 19, "usage_type": "attribute"}, {"api_name": "utils.registry.DATASETS", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 286, "usage_type": "call"}, {"api_name": "utils.registry.DATASETS.register_module", "line_number": 213, "usage_type": "attribute"}, {"api_name": "utils.registry.DATASETS", "line_number": 213, "usage_type": "name"}]} +{"seq_id": "9923405818", "text": "#!/usr/bin/env python3\nif(__package__==None or __package__==\"\"):\n import Orbits\nelse:\n from . import Orbits\nclass TwoBodyChannel:\n def __init__(self,J=None,P=None,Z=None,orbits=None,e2max=None):\n self.J = J\n self.P = P\n self.Z = Z\n self.orbits = orbits\n self.e2max = e2max\n self.orbit1_index = []\n self.orbit2_index = []\n self.phase_from_indices = {}\n self.index_from_indices = {}\n self.number_states = 0\n if( self.J != None and self.P != None and self.Z != None and orbits != None ):\n self._set_two_body_channel()\n return\n def _set_two_body_channel(self):\n orbs = self.orbits\n if(self.e2max==None): self.e2max = 2*orbs.emax\n import itertools\n for oa, ob in itertools.combinations_with_replacement( orbs.orbits, 2 ):\n ia = orbs.get_orbit_index_from_orbit( oa )\n ib = orbs.get_orbit_index_from_orbit( ob )\n if( ia == ib and self.J%2==1 ): continue\n if( oa.e + ob.e > self.e2max ): continue\n if( (oa.z + ob.z) != 2*self.Z ): continue\n if( (-1)**(oa.l + ob.l) != self.P ): continue\n if( self._triag( oa.j, ob.j, 2*self.J ) ): continue\n self.orbit1_index.append( ia )\n self.orbit2_index.append( ib )\n idx = len( self.orbit1_index )-1\n self.index_from_indices[(ia,ib)] = idx\n self.index_from_indices[(ib,ia)] = idx\n self.phase_from_indices[(ia,ib)] = 1\n self.phase_from_indices[(ib,ia)] = -(-1)**( (oa.j+ob.j)//2 - self.J )\n self.number_states = len( self.orbit1_index )\n def get_number_states(self):\n return self.number_states\n def get_indices(self,idx):\n return self.orbit1_index[idx], self.orbit2_index[idx]\n def get_orbits(self,idx):\n ia, ib = self.get_indices(idx)\n return self.orbits.get_orbit(ia), self.orbits.get_orbit(ib)\n def get_JPZ(self):\n return self.J, self.P, self.Z\n def _triag(self,J1,J2,J3):\n b = True\n if(abs(J1-J2) <= J3 <= J1+J2): b = False\n return b\n\nclass TwoBodySpace:\n def __init__(self,orbits=None,e2max=None):\n self.orbits = orbits\n self.e2max = e2max\n self.index_from_JPZ = {}\n self.channels = []\n self.number_channels = 0\n if( self.orbits != None ):\n if( self.e2max == None ): self.e2max = 2*self.orbits.emax\n for J in range(self.e2max+2):\n for P in [1,-1]:\n for Z in [-1,0,1]:\n channel = TwoBodyChannel(J=J,P=P,Z=Z,orbits=self.orbits,e2max=e2max)\n if( channel.get_number_states() == 0): continue\n self.channels.append( channel )\n idx = len(self.channels) - 1\n self.index_from_JPZ[(J,P,Z)] = idx\n self.number_channels = len(self.channels)\n def get_number_channels(self):\n return self.number_channels\n def get_index(self,*JPZ):\n return self.index_from_JPZ[JPZ]\n def get_channel(self,idx):\n return self.channels[idx]\n def get_channel_from_JPZ(self,*JPZ):\n return self.get_channel( self.get_index(*JPZ) )\n def print_channels(self):\n print(\" Two-body channels list \")\n print(\" J,par, Z, # of states\")\n for channel in self.channels:\n J,P,Z = channel.get_JPZ()\n print(\"{:3d},{:3d},{:3d},{:12d}\".format(J,P,Z,channel.get_number_states()))\n\ndef main():\n orbs = Orbits.Orbits()\n orbs.set_orbits(emax=6)\n two = TwoBodySpace(orbits=orbs)\n two.print_channels()\nif(__name__==\"__main__\"):\n main()\n", "repo_name": "Takayuki-Miyagi/mylib_python", "sub_path": "Nucl/TwoBodySpace.py", "file_name": "TwoBodySpace.py", "file_ext": "py", "file_size_in_byte": 3709, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.combinations_with_replacement", "line_number": 25, "usage_type": "call"}, {"api_name": "Orbits.Orbits", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "1531708781", "text": "from threading import Event, Thread\r\nimport time\r\n\r\nimport pyautogui\r\nfrom system_hotkey import SystemHotkey\r\nimport yaml\r\n\r\n\r\nclass Util:\r\n\r\n @staticmethod\r\n def get_config() -> dict:\r\n dict = {}\r\n with open(\"config/base.yml\", \"r\", encoding=\"utf-8\") as f:\r\n content = yaml.load(f, Loader=yaml.FullLoader)\r\n return content\r\n\r\n\r\nclass Caiji(Thread):\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n self.config = Util.get_config()\r\n self.__flag = Event()\r\n self.__flag.clear()\r\n self.__running = Event()\r\n self.__running.set()\r\n self.hk = SystemHotkey()\r\n self.hk.register((\"control\", \"1\"), callback=self.pause_resume)\r\n self.hk2 = SystemHotkey()\r\n self.hk2.register((\"control\", \"3\"), callback=self.stop)\r\n\r\n def task(self):\r\n print(time.time())\r\n pyautogui.press(str(self.config.get(\"key\")))\r\n\r\n def run(self) -> None:\r\n while True:\r\n self.__flag.wait()\r\n self.task()\r\n if not self.__running.isSet():\r\n break\r\n time.sleep(self.config.get(\"sleep\"))\r\n\r\n def pause_resume(self, *args, **kwargs):\r\n if self.__flag.isSet():\r\n self.pause()\r\n else:\r\n self.resume()\r\n\r\n def begin(self, *args, **kwargs):\r\n print(\"开始了\")\r\n self.__flag.set()\r\n\r\n def pause(self, *args, **kwargs):\r\n print(\"暂停了\")\r\n self.__flag.clear()\r\n\r\n def resume(self, *args, **kwargs):\r\n print(\"恢复了\")\r\n self.__flag.set()\r\n\r\n def stop(self, *args, **kwargs):\r\n print(\"停止了\")\r\n self.__flag.set()\r\n self.__running.clear()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n caiji = Caiji()\r\n caiji.start()\r\n caiji.join()\r\n print(\"main end\")", "repo_name": "chengyijun/tool", "sub_path": "wow/caiji.py", "file_name": "caiji.py", "file_ext": "py", "file_size_in_byte": 1854, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "50", "api": [{"api_name": "yaml.load", "line_number": 15, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 15, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 19, "usage_type": "name"}, {"api_name": "threading.Event", "line_number": 24, "usage_type": "call"}, {"api_name": "threading.Event", "line_number": 26, "usage_type": "call"}, {"api_name": "system_hotkey.SystemHotkey", "line_number": 28, "usage_type": "call"}, {"api_name": "system_hotkey.SystemHotkey", "line_number": 30, "usage_type": "call"}, {"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "36417826453", "text": "import unittest\n\nimport errno\nimport mock\nfrom exporters.records.base_record import BaseRecord\nfrom exporters.writers import SFTPWriter\nfrom exporters.writers.base_writer import InconsistentWriteState\n\nfrom .utils import meta\n\n\nclass SFTPWriterTest(unittest.TestCase):\n\n def get_batch(self):\n data = [\n {'name': 'Roberto', 'birthday': '12/05/1987'},\n {'name': 'Claudia', 'birthday': '21/12/1985'},\n ]\n return [BaseRecord(d) for d in data]\n\n def get_writer_config(self):\n return {\n 'name': 'exporters.writers.sftp_writer.SFTPWriter',\n 'options': {\n 'sftp_user': 'user',\n 'sftp_password': 'password',\n 'filebase': 'test/',\n 'host': 'sftp.example.com',\n }\n }\n\n def test_create(self):\n options = self.get_writer_config()\n writer = SFTPWriter(options, meta())\n self.assertEquals(22, writer.read_option('port'))\n writer.close()\n\n @mock.patch('pysftp.Connection')\n def test_check_writer_consistency(self, mock_sftp):\n\n items_to_write = self.get_batch()\n options = self.get_writer_config()\n options['options']['check_consistency'] = True\n mock_sftp.return_value.__enter__.return_value.stat.return_value.st_size = 999\n\n # when:\n try:\n writer = SFTPWriter(options, meta())\n writer.write_batch(items_to_write)\n writer.flush()\n finally:\n writer.close()\n\n with self.assertRaisesRegexp(InconsistentWriteState, 'Wrong size for file'):\n writer.finish_writing()\n\n exception = IOError()\n exception.errno = errno.ENOENT\n mock_sftp.return_value.__enter__.return_value.stat.side_effect = exception\n with self.assertRaisesRegexp(InconsistentWriteState, 'is not present at destination'):\n writer.finish_writing()\n", "repo_name": "scrapinghub/exporters", "sub_path": "tests/test_writers_sftp.py", "file_name": "test_writers_sftp.py", "file_ext": "py", "file_size_in_byte": 1933, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 40, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "attribute"}, {"api_name": "exporters.records.base_record.BaseRecord", "line_number": 19, "usage_type": "call"}, {"api_name": "exporters.writers.SFTPWriter", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.meta", "line_number": 34, "usage_type": "call"}, {"api_name": "exporters.writers.SFTPWriter", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.meta", "line_number": 48, "usage_type": "call"}, {"api_name": "exporters.writers.base_writer.InconsistentWriteState", "line_number": 54, "usage_type": "argument"}, {"api_name": "errno.ENOENT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "exporters.writers.base_writer.InconsistentWriteState", "line_number": 60, "usage_type": "argument"}, {"api_name": "mock.patch", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "7809854082", "text": "from notificaciones.models import Notificacion\nfrom rest_framework import permissions\nfrom django.shortcuts import render\nfrom rest_framework.generics import CreateAPIView\nfrom api.exceptions import *\nfrom .serializers import *\n# from api.logger import log\nimport logging\nlog = logging.getLogger('django')\nfrom preregistro.models import *\nfrom rest_framework.views import APIView\nfrom api.Paginacion import Paginacion\nfrom rest_framework.response import Response\n\n\n\n# Create your views here.\n\n\n# ----------------------------------------------------------------------------------Chat\nclass ChatCreateView(CreateAPIView):\n serializer_class = MensajeSerializer\n permission_classes = (permissions.AllowAny,)\n\n def post(self, request, *args, **kwargs):\n serializer = MensajeSerializer(data=request.data)\n if serializer.is_valid():\n destinatario = self.request.data.get('destinatario')\n # remitente = self.request.data.get('remitente')\n # print(f'--->destinatario: {destinatario}')\n Conversacion.objects.filter(destinatario=destinatario).delete()\n nombre = getNombreSesion(request,destinatario)\n Conversacion.objects.create(destinatario=destinatario,nombre=nombre)\n Notificacion.objects.create(titulo='Chat',mensaje='Tiene un nuevo mensaje',destinatario=destinatario,remitente=0)\n \n return self.create(request, *args, **kwargs)\n log.error(f'--->>>campos incorrectos: {serializer.errors}')\n raise CamposIncorrectos(serializer.errors)\n\n\ndef getNombreSesion(request,destinatario):\n nombre = request.session.get(\"nombre\", None)\n if nombre is None:\n datosMedico = Medico.objects.filter(numRegistro=destinatario).values_list('nombre','apPaterno','apMaterno')\n nombre = str(datosMedico[0][0] + ' ' + datosMedico[0][1] + ' ' + datosMedico[0][2])\n request.session[\"nombre\"] = nombre\n\n return nombre\n\n\nclass ChatListEndPoint(APIView):\n \"\"\"\n ?size=3&page=1&orderby=id&direc=asc\n size -- es el numero de registros a traer\n page -- el numero de pagina a traer\n orderby -- campo opr el cual se ordenaran los registros a traer\n direc -- si es ascendente(asc) o descencende (vacio)\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n\n def get(self, request, remitente, destinatario):\n queryset = Mensaje.objects.all().filter(remitente=remitente, destinatario=destinatario)\n size = self.request.query_params.get('size', None)\n direc = self.request.query_params.get('direc', None)\n orderby = self.request.query_params.get('orderby', None)\n page = self.request.query_params.get('page', None)\n\n paginacion = Paginacion(queryset, MensajeSerializer, size, direc, orderby, page)\n serializer = paginacion.paginar()\n\n respuesta = {\n \"totalElements\": paginacion.totalElements,\n \"totalPages\": paginacion.totalPages,\n \"sort\": paginacion.orderby,\n \"direction\": paginacion.direc,\n \"size\": paginacion.size,\n \"content\": serializer.data\n }\n return Response(respuesta)\n\n\n\nclass ConversacionListEndPoint(APIView):\n \"\"\"\n ?size=3&page=1&orderby=id&direc=asc\n size -- es el numero de registros a traer\n page -- el numero de pagina a traer\n orderby -- campo opr el cual se ordenaran los registros a traer\n direc -- si es ascendente(asc) o descencende (vacio)\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n\n def get(self, request):\n queryset = Conversacion.objects.all()\n size = self.request.query_params.get('size', None)\n direc = self.request.query_params.get('direc', None)\n orderby = self.request.query_params.get('orderby', None)\n page = self.request.query_params.get('page', None)\n\n paginacion = Paginacion(queryset, ConversacionSerializer, size, direc, orderby, page)\n serializer = paginacion.paginar()\n\n respuesta = {\n \"totalElements\": paginacion.totalElements,\n \"totalPages\": paginacion.totalPages,\n \"sort\": paginacion.orderby,\n \"direction\": paginacion.direc,\n \"size\": paginacion.size,\n \"content\": serializer.data\n }\n return Response(respuesta)\n \n \n \nclass MedicoChatListEndPoint(APIView):\n \"\"\"\n ?size=3&page=1&orderby=id&direc=asc\n size -- es el numero de registros a traer\n page -- el numero de pagina a traer\n orderby -- campo opr el cual se ordenaran los registros a traer\n direc -- si es ascendente(asc) o descencende (vacio)\n \"\"\"\n permission_classes = (permissions.AllowAny,)\n\n def get(self, request):\n queryset = Medico.objects.all().filter(aceptado=True)\n size = self.request.query_params.get('size', None)\n direc = self.request.query_params.get('direc', None)\n orderby = self.request.query_params.get('orderby', None)\n page = self.request.query_params.get('page', None)\n\n paginacion = Paginacion(queryset, MedicoChatListSerializer, size, direc, orderby, page)\n serializer = paginacion.paginar()\n\n respuesta = {\n \"totalElements\": paginacion.totalElements,\n \"totalPages\": paginacion.totalPages,\n \"sort\": paginacion.orderby,\n \"direction\": paginacion.direc,\n \"size\": paginacion.size,\n \"content\": serializer.data\n }\n return Response(respuesta)", "repo_name": "gbrlQuirozMB/cmcperBack", "sub_path": "chat/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5479, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 23, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 23, "usage_type": "name"}, {"api_name": "notificaciones.models.Notificacion.objects.create", "line_number": 34, "usage_type": "call"}, {"api_name": "notificaciones.models.Notificacion.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "notificaciones.models.Notificacion", "line_number": 34, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 59, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 59, "usage_type": "name"}, {"api_name": "api.Paginacion.Paginacion", "line_number": 68, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 79, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 83, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 91, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 91, "usage_type": "name"}, {"api_name": "api.Paginacion.Paginacion", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 111, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 115, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 123, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 123, "usage_type": "name"}, {"api_name": "api.Paginacion.Paginacion", "line_number": 132, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "30831627187", "text": "from scipy.sparse import isspmatrix_csc, isspmatrix_csr\nfrom scipy.sparse import csc_matrix, identity\n\nimport numpy as onp\n\nfrom sksparse.cholmod import analyze, cholesky\nfrom sksparse.cholmod import CholmodNotPositiveDefiniteError as NotPosDefError\n\nfrom optimism.JaxConfig import *\n\n\nclass SparseCholesky:\n \n def factorize(self): \n\n print('Assembling preconditioner', 0)\n stiffnessTryStep = 0\n self.A = self.new_stiffness_func(stiffnessTryStep)\n \n # doing the analyze every time for now, even if we know the sparsity does not change\n # we can improve this later if we are inclined\n assert isspmatrix_csc(self.A), \\\n \"Preconditioner matrix is not in a valid sparse format\"\n self.Precond = analyze(self.A, mode='supernodal',\n ordering_method='nesdis')\n\n attempt = 0\n maxAttempts = 10\n while attempt < maxAttempts:\n try:\n print('Factorizing preconditioner')\n self.Precond.cholesky_inplace(self.A)\n except NotPosDefError:\n attempt += 1\n print('Cholesky failed, assembling preconditioner', attempt)\n # we are assuming that the sparsity does not change here\n self.A = self.new_stiffness_func(attempt)\n else:\n break\n \n if attempt == maxAttempts:\n print(\"Cholesky failed too many times, using identity preconditioner\")\n self.A = identity(self.A.shape[0], format='csc')\n self.Precond.cholesky_inplace(self.A)\n\n \n def update(self, new_stiffness_func):\n self.new_stiffness_func = new_stiffness_func\n self.factorize()\n\n \n def apply(self, b):\n if type(b) == type(np.array([])):\n b = onp.array(b)\n return self.Precond(b)\n\n \n def apply_transpose(self, b):\n return self.apply(b)\n \n \n def multiply_by_approximate(self, x):\n return self.A.dot(x)\n \n \n def multiply_by_transpose(self, x):\n return self.A.T.dot(x)\n\n\n def check_stability(self, x, p):\n A = self.stiffness_func(x, p)\n try:\n self.Precond.cholesky(A)\n print(\"Jacobian is stable.\")\n except NotPosDefError as e:\n print(e)\n print(\"Jacobian is unstable.\")\n\n\n def get_diagonal_stiffness(self):\n return self.A.diagonal()\n\n \n def __matmul__(self, b):\n return self.A.dot(b)\n\n", "repo_name": "btalami/optimism", "sub_path": "optimism/SparseCholesky.py", "file_name": "SparseCholesky.py", "file_ext": "py", "file_size_in_byte": 2526, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.sparse.isspmatrix_csc", "line_number": 22, "usage_type": "call"}, {"api_name": "sksparse.cholmod.analyze", "line_number": 24, "usage_type": "call"}, {"api_name": "sksparse.cholmod.CholmodNotPositiveDefiniteError", "line_number": 33, "usage_type": "name"}, {"api_name": "scipy.sparse.identity", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "sksparse.cholmod.CholmodNotPositiveDefiniteError", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "43759482052", "text": "# coding: utf-8\n\"\"\"\nhttps://soulmachine.gitbooks.io/algorithm-essentials/content/cpp/bfs/word-ladder-ii.html\nGiven two words (start and end), and a dictionary, find all shortest transformation sequence(s) from start to end, such that:\nOnly one letter can be changed at a time\nEach intermediate word must exist in the dictionary\nFor example, Given:\nstart = \"hit\"\nend = \"cog\"\ndict = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]\nReturn\n[\n [\"hit\",\"hot\",\"dot\",\"dog\",\"cog\"],\n [\"hit\",\"hot\",\"lot\",\"log\",\"cog\"]\n]\nNote:\nAll words have the same length.\nAll words contain only lowercase alphabetic characters.\n\"\"\"\n\ndef get_one_change(word, dict_, visted):\n ret = []\n for word_ in dict_:\n if word_ in visted:\n continue\n common = sum(i==j for i, j in zip(word_, word))\n if common == len(word) - 1:\n ret.append(word_)\n return ret\n\n\ndef get_ladder(start, end, dict_):\n visited = set()\n\n def _ladder(words, paths):\n for i in words:\n visited.add(i)\n\n ret = []\n for word, path in zip(words, paths):\n if word == end:\n ret.append(path)\n continue\n if ret:\n return ret\n\n next_words = []\n next_paths = []\n for word, path in zip(words, paths):\n starts = get_one_change(word, dict_, visited)\n for next_ in starts:\n next_words.append(next_)\n next_paths.append(path + [next_])\n\n if next_words:\n return _ladder(next_words, next_paths)\n\n return []\n\n return _ladder([start], [[start]])\n\n\ndef test():\n start = \"hit\"\n end = \"cog\"\n dict_ = [\"hit\", \"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]\n return_ = [\n [\"hit\", \"hot\", \"dot\", \"dog\", \"cog\"],\n [\"hit\", \"hot\", \"lot\", \"log\", \"cog\"]\n ]\n print(get_ladder(start, end, dict_))\n assert get_ladder(start, end, dict_) == return_\n\n\nif __name__ == '__main__':\n \"\"\"\n python -m mixleet.201908.WordLadderII test\n \"\"\"\n from fire import Fire\n Fire()\n\n", "repo_name": "spiritdjy/MixLeetCode", "sub_path": "mixleet/leecode/WordLadderII.py", "file_name": "WordLadderII.py", "file_ext": "py", "file_size_in_byte": 2042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fire.Fire", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "22026805354", "text": "\"\"\"initial\n\nRevision ID: a07e50720472\nRevises: \nCreate Date: 2023-05-22 16:36:13.721089\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a07e50720472'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('admin',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=150), nullable=True),\n sa.Column('password', sa.String(length=150), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('audio',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('chat_id', sa.BigInteger(), nullable=True),\n sa.Column('text', sa.Text(), nullable=True),\n sa.Column('distination', sa.String(length=150), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('channel',\n sa.Column('chat_id', sa.BigInteger(), autoincrement=False, nullable=False),\n sa.Column('title', sa.String(length=150), nullable=True),\n sa.PrimaryKeyConstraint('chat_id'),\n sa.UniqueConstraint('chat_id')\n )\n op.create_table('user',\n sa.Column('user_id', sa.BigInteger(), autoincrement=False, nullable=False),\n sa.Column('username', sa.String(length=50), nullable=True),\n sa.Column('first_name', sa.String(length=50), nullable=True),\n sa.PrimaryKeyConstraint('user_id'),\n sa.UniqueConstraint('user_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n op.drop_table('channel')\n op.drop_table('audio')\n op.drop_table('admin')\n # ### end Alembic commands ###\n", "repo_name": "KarimovMurodilla/post-tts", "sub_path": "utils/db_api/migrations/versions/a07e50720472_initial.py", "file_name": "a07e50720472_initial.py", "file_ext": "py", "file_size_in_byte": 1750, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 34, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 40, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 40, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.BigInteger", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 45, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 52, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 52, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 53, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 53, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 54, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 54, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 55, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "8171580977", "text": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport os, sys\nimport shutil, tempfile\nimport re\nfrom enum import Enum\nfrom subprocess import Popen, PIPE\n\n# Dependencies:\n# - p8tool (from picotool repo) must be in PATH\n# - luamin must have been installed locally with `npm update` or `pico-boots/setup.sh`\n\n# This script minifies the __lua__ section of a cartridge {game}.p8:\n# 1. It uses p8tool listlua A.p8 to quickly extract the __lua__ code into {game}.lua\n# 2. Convert remaining bits of pico8 lua (generated by p8tool) into clean lua\n# 3. It applies luamin to {game}.lua and outputs to {game}_min.lua\n# 4. It reads the header (before __lua__) of {game}.p8 and copies it into {game}_min.p8\n# 5. It appends {game}_min.lua's content to {game}_min.p8\n# 6. It finishes reading {game}.p8's remaining sections and appends them into {game}_min.p8\n# 7. It replaces {game}.p8 with {game}_min.p8\n\nMINIFY_SCRIPT_RELATIVE_PATH = \"npm/node_modules/.bin/luamin\"\n\nscript_dir_path = os.path.dirname(os.path.realpath(__file__))\nminify_script_path = os.path.join(script_dir_path, MINIFY_SCRIPT_RELATIVE_PATH)\n\nLUA_HEADER = b\"__lua__\\n\"\n# Note that this pattern captures 1. condition 2. result of a \"one-line if\" if it is,\n# but that it also matches a normal if-then, requiring a check before using the pattern.\n# This pattern may not be exhaustive as the user may put extra brackets but still use if-then\n# if issues arive, add a negative check to make sure the line doesn't end with \"then\" or even \"\\\"\n# In practice, a developer using pico-boots should write clean lua, and only picotool\n# should add a short if statement for the require bridging code.\nPICO8_ONE_LINE_IF_PATTERN = re.compile(r\"if \\(([^)]*)\\) (.*)\")\n\n\nclass Phase(Enum):\n CARTRIDGE_HEADER = 1 # copying header, from \"pico-8 cartridge...\" to \"__lua__\"\n LUA_SECTION = 2 # found \"__lua__\", still copy the 2 author/version comment lines then appending minified lua all at once\n LUA_CATCHUP = 3 # skipping the unused unminified lua until we reach the other sections\n OTHER_SECTIONS = 4 # copying the last sections\n\n\ndef minify_lua_in_p8(cartridge_filepath, use_aggressive_minification):\n \"\"\"\n Minifies the __lua__ section of a p8 cartridge, using luamin.\n\n \"\"\"\n logging.debug(f\"Minifying lua in cartridge {cartridge_filepath}...\")\n\n root, ext = os.path.splitext(cartridge_filepath)\n if not ext.endswith(\".p8\"):\n logging.error(f\"Cartridge filepath '{cartridge_filepath}' does not end with '.p8'\")\n sys.exit(1)\n\n min_cartridge_filepath = f\"{root}_min.p8\"\n lua_filepath = f\"{root}.lua\"\n min_lua_filepath = f\"{root}_min.lua\"\n\n # Step 1: extract lua code into separate file\n with open(lua_filepath, 'w') as lua_file:\n extract_lua(cartridge_filepath, lua_file)\n\n # Step 2: clean lua code in this file in-place\n with open(lua_filepath, 'r') as lua_file:\n # create temporary file object (we still need to open it with mode to get file descriptor)\n temp_file_object, temp_filepath = tempfile.mkstemp()\n original_char_count = sum(len(line) for line in lua_file)\n print(f\"Original lua code has {original_char_count} characters\")\n lua_file.seek(0)\n clean_lua(lua_file, os.fdopen(temp_file_object, 'w'))\n # replace original lua code with clean code\n os.remove(lua_filepath)\n shutil.move(temp_filepath, lua_filepath)\n\n # Step 3: apply luamin to generate minified code in a different file\n with open(min_lua_filepath, 'w+') as min_lua_file:\n minify_lua(lua_filepath, min_lua_file, use_aggressive_minification)\n min_lua_file.seek(0)\n min_char_count = sum(len(line) for line in min_lua_file)\n print(f\"Minified lua code to {min_char_count} characters\")\n if min_char_count > 65536:\n logging.error(f\"Maximum character count of 65536 has been exceeded, cartridge would be truncated in PICO-8, so exit with failure.\")\n sys.exit(1)\n\n # Step 4-6: inject minified lua code into target cartridge\n phase = Phase.CARTRIDGE_HEADER\n with open(cartridge_filepath, 'r') as source_file, \\\n open(min_cartridge_filepath, 'w') as target_file, \\\n open(min_lua_filepath, 'r') as min_lua_file:\n inject_minified_lua_in_p8(source_file, target_file, min_lua_file)\n\n # Step 7: replace original p8 with minified p8, clean up intermediate files\n os.remove(cartridge_filepath)\n os.remove(lua_filepath)\n os.remove(min_lua_filepath)\n shutil.move(min_cartridge_filepath, cartridge_filepath)\n\n\ndef extract_lua(source_filepath, lua_file):\n \"\"\"\n Extract lua from .p8 cartridge at source_filepath (string) to lua_file (file descriptor: write)\n\n \"\"\"\n # p8tool listrawlua bug (https://github.com/dansanderson/picotool/issues/59)\n # was fixed, so we prefer it to listlua as it is almost instant compared to listlua\n # which takes ~1s to parse the game .p8\n\n # However, note that it outputs an extra newline after *each* line, which will be stripped during minification most of the time\n # but will stay in [[multi-line strings]]. So we *must* skip every other line (preserve odd lines) using e.g. awk\n # https://superuser.com/questions/101756/show-only-odd-lines-with-cat\n\n # Usually a check_call(stdout=min_lua_file) (and no stderr) is enough,\n # as it throws CalledProcessError on error by itself, but in this case, due to output stream sync issues\n # (luamin error shown before __main__ print at the bottom of this script),\n # we prefer Popen + PIPE + communicate() + check stderrdata\n # For awk we just use a '|' in shell mode, a bit easier than calling Popen a second time with stdin = stdout of first process\n (_stdoutdata, stderrdata) = Popen([f\"p8tool listrawlua \\\"{source_filepath}\\\" | awk 'NR % 2 == 1'\"], shell=True, stdout=lua_file, stderr=PIPE).communicate()\n if stderrdata:\n logging.error(f\"p8tool listrawlua failed with:\\n\\n{stderrdata.decode()}\")\n sys.exit(1)\n\n\ndef clean_lua(lua_file, clean_lua_file):\n \"\"\"\n Convert PICO-8 specific lines from to lua_file (file descriptor: read)\n to native Lua in clean_lua_file (file descriptor: write)\n\n \"\"\"\n for line in lua_file:\n # we simplify things a lot thanks to our assumptions on the generated code\n # we know that the only pico8 one-line if will be generated for the require function\n # and have the pattern \"if (condition) [result]\" without \"then\",\n # and there are no edge cases like embedded conditions or continuing line with \"\\\"\n if line.startswith(\"if (\") and \"then\" not in line:\n # convert to \"if [condition] then [result] end\"\n clean_lua_file.write(PICO8_ONE_LINE_IF_PATTERN.sub(\"if \\\\1 then \\\\2 end\", line))\n else:\n clean_lua_file.write(line)\n\n\n\ndef minify_lua(clean_lua_filepath, min_lua_file, use_aggressive_minification=False):\n \"\"\"\n Minify lua from clean_lua_filepath (string)\n and send output to min_lua_file (file descriptor: write)\n\n Use option:\n -f to pass filepath\n -n to use newline separator\n\n For aggressive minification only, use option:\n -mk to minify member names and table key strings (should be done together as some members will be defined\n directly inside table, others defined and accessed with dot syntax)\n\n \"\"\"\n options = \"-fn\"\n if use_aggressive_minification:\n options += \"mk\"\n\n # see extract_lua for reason to use Popen\n (_stdoutdata, stderrdata) = Popen([minify_script_path, options, clean_lua_filepath], stdout=min_lua_file, stderr=PIPE).communicate()\n if stderrdata:\n logging.error(f\"Minify script failed with:\\n\\n{stderrdata.decode()}\")\n sys.exit(1)\n\n\ndef inject_minified_lua_in_p8(source_file, target_file, min_lua_file):\n \"\"\"\n Inject minified lua from min_lua_file (file descriptor: read)\n into a copy of source_file (file descriptor: read)\n producing target_file (file descriptor: write)\n\n \"\"\"\n phase = Phase.CARTRIDGE_HEADER\n for line in source_file:\n if phase is Phase.CARTRIDGE_HEADER:\n # Step 4: copy header (also copy the \"__lua__\" line just after)\n target_file.write(line)\n if line == \"__lua__\\n\":\n # enter lua section\n phase = Phase.LUA_SECTION\n\n elif phase is Phase.LUA_SECTION:\n # Step 5: copy minified lua\n target_file.writelines(min_lua_file.readlines())\n target_file.write(\"\\n\") # newline required before other sections\n phase = Phase.LUA_CATCHUP\n\n elif phase is Phase.LUA_CATCHUP:\n # skip all lines until __gfx__\n if line == \"__gfx__\\n\":\n # copy the __gfx__ line itself\n target_file.write(line)\n phase = Phase.OTHER_SECTIONS\n\n else: # phase is Phase.CARTRIDGE_HEADER\n # Step 6: copy remaining sections\n target_file.write(line)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Minify lua code in cartridge.')\n parser.add_argument('path', type=str, help='path containing cartridge file to minify')\n parser.add_argument('--aggressive-minify', action='store_true', help=\"use aggressive minification (minify member names and table key strings)\")\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n logging.info(f\"Minifying lua code in {args.path} with aggressive minification: {'ON' if args.aggressive_minify else 'OFF'}...\")\n\n minify_lua_in_p8(args.path, args.aggressive_minify)\n\n logging.info(f\"Minified lua code in {args.path}\")\n", "repo_name": "hsandt/pico-boots", "sub_path": "scripts/minify.py", "file_name": "minify.py", "file_ext": "py", "file_size_in_byte": 9678, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 36, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 39, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 69, "usage_type": "call"}, {"api_name": "os.fdopen", "line_number": 73, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 75, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 85, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 86, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 96, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 97, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 98, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 99, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 120, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 120, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 122, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 123, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 164, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 164, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 166, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 167, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 205, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 210, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 210, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 211, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "73306036965", "text": "from utils import *\n\nimport dgl.function as fn\nimport math\n\n\nclass GCNLayer(nn.Module):\n\tdef __init__(self, in_feats, out_feats, activation=F.relu, dropout=0.1, bias=True):\n\t\tsuper(GCNLayer, self).__init__()\n\t\tself.weight = nn.Parameter(torch.Tensor(in_feats, out_feats)).cuda()\n\t\tif bias:\n\t\t\tself.bias = nn.Parameter(torch.Tensor(out_feats)).cuda()\n\t\telse:\n\t\t\tself.bias = None\n\t\tself.activation = activation\n\t\tif dropout:\n\t\t\tself.dropout = nn.Dropout(p=dropout)\n\t\telse:\n\t\t\tself.dropout = 0.\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\t\tstdv = 1. / math.sqrt(self.weight.size(1))\n\t\tself.weight.data.uniform_(-stdv, stdv)\n\t\tif self.bias is not None:\n\t\t\tself.bias.data.uniform_(-stdv, stdv)\n\n\tdef forward(self, g, h):\n\t\tif self.dropout:\n\t\t\th = self.dropout(h)\n\t\th = torch.mm(h, self.weight.cuda())\n\t\tg.ndata['h'] = h\n\t\tg.update_all(fn.copy_src(src='h', out='m'), fn.sum(msg='m', out='h'))\n\t\th = g.ndata.pop('h')\n\t\tif self.bias is not None:\n\t\t\th = h + self.bias\n\t\tif self.activation:\n\t\t\th = self.activation(h)\n\t\treturn h\n\n\nclass GCN(nn.Module):\n\tdef __init__(self, in_dim, hidden_dim, n_desc, n_classes):\n\t\tsuper(GCN, self).__init__()\n\t\tself.layers = nn.ModuleList()\n\t\tself.layers.append(GCNLayer(in_dim, hidden_dim[0], F.relu, 0.))\n\t\tfor i, hid in enumerate(hidden_dim[:-1]):\n\t\t\tself.layers.append(GCNLayer(hid, hidden_dim[i + 1], F.relu, 0.2))\n\n\t\tself.lin1 = nn.Linear(hidden_dim[-1] + n_desc, 1000)\n\t\tself.lin2 = nn.Linear(1000, 500)\n\t\tself.lin3 = nn.Linear(500, 100)\n\t\tself.lin4 = nn.Linear(100, 20)\n\t\tself.drop = nn.Dropout(0.2)\n\t\tself.classify = nn.Linear(20, n_classes)\n\n\tdef forward(self, g, features, descriptors):\n\t\t# GCN\n\t\th = features\n\t\tfor layer in self.layers:\n\t\t\th = layer(g, h)\n\t\tg.ndata['h'] = h\n\t\tfeats = dgl.mean_nodes(g, 'h')\n\n\t\t# Concat (GCN_feat, descriptors)\n\t\th = torch.cat((feats, descriptors), 1)\n\n\t\t# Classify\n\t\th = self.drop(F.relu(self.lin1(h)))\n\t\th = self.drop(F.relu(self.lin2(h)))\n\t\th = self.drop(F.relu(self.lin3(h)))\n\t\th = self.drop(F.relu(self.lin4(h)))\n\t\treturn self.classify(h), feats\n\n\nclass GCN_des(nn.Module):\n\tdef __init__(self, in_dim, hidden_dim, n_desc, n_classes):\n\t\tsuper(GCN_des, self).__init__()\n\t\tself.layers = nn.ModuleList()\n\t\tself.layers.append(GCNLayer(in_dim, hidden_dim[0], F.relu, 0.))\n\t\tfor i, hid in enumerate(hidden_dim[:-1]):\n\t\t\tself.layers.append(GCNLayer(hid, hidden_dim[i + 1], F.relu, 0.2))\n\n\t\tself.lin1 = nn.Linear(hidden_dim[-1], 10)\n\t\tself.lin2 = nn.Linear(10, 5)\n\t\tself.drop = nn.Dropout(0.2)\n\t\tself.classify = nn.Linear(5, n_classes)\n\n\tdef forward(self, g, features, descriptors):\n\t\t# GCN\n\t\th = features\n\t\tfor layer in self.layers:\n\t\t\th = layer(g, h)\n\t\tg.ndata['h'] = h\n\t\tfeats = dgl.mean_nodes(g, 'h')\n\n\t\t# Concat (GCN_feat, descriptors)\n\t\t# h = torch.cat((h, descriptors), 1)\n\n\t\t# Classify\n\t\th = self.drop(F.relu(self.lin1(feats)))\n\t\th = self.drop(F.relu(self.lin2(h)))\n\t\treturn self.classify(h), feats", "repo_name": "anandrajasekar18/covid_drug_design", "sub_path": "task1/gcn_model/gcn_model.py", "file_name": "gcn_model.py", "file_ext": "py", "file_size_in_byte": 2883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.sqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "dgl.function.copy_src", "line_number": 33, "usage_type": "call"}, {"api_name": "dgl.function", "line_number": 33, "usage_type": "name"}, {"api_name": "dgl.function.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "dgl.function.mean_nodes", "line_number": 63, "usage_type": "call"}, {"api_name": "dgl.function", "line_number": 63, "usage_type": "name"}, {"api_name": "dgl.function.mean_nodes", "line_number": 95, "usage_type": "call"}, {"api_name": "dgl.function", "line_number": 95, "usage_type": "name"}]} +{"seq_id": "20601750875", "text": "import logging\nfrom datetime import datetime\nimport time\nimport pytz\n\nlogging.basicConfig(filename='output.log', level=logging.INFO)\nlogger = logging.getLogger()\n\n\ndef wait(timeout=120):\n def decorator(f):\n def wrapper(*args, **kwargs):\n logger.info(f'{timestamp()} - sleeping for {timeout} seconds before calling {f.__name__} ...')\n time.sleep(timeout)\n # value_if_true if condition else value_if_false\n logger.info(\n f'{timestamp()} - running {f.__name__} {\"with\" if args or kwargs else \"\"} {args if args else \"\"} {kwargs if kwargs else \"\"}')\n return f(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef timestamp():\n timezone = pytz.timezone('Europe/Berlin')\n now = timezone.localize(datetime.now())\n return now.strftime(\"%d-%m-%Y %H:%M:%S\")\n\n\nif __name__ == '__main__':\n foo(\"fed\")\n", "repo_name": "JBartscher/BA_Datengrundlage", "sub_path": "util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 14, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "69898620325", "text": "from plotly.graph_objs import Pie, Figure\nfrom plotly.offline import iplot\n\ntry:\n from pandas.core.base import DataError\nexcept ImportError:\n try:\n from pandas.core.groupby import DataError\n except ImportError:\n from pandas.errors import DataError\n\nimport autovizwidget.utils.configuration as conf\nfrom .graphbase import GraphBase\n\n\nclass PieGraph(GraphBase):\n @staticmethod\n def render(df, encoding, output):\n if encoding.x is None:\n with output:\n print(\"\\n\\n\\nPlease select an X axis.\")\n return\n\n try:\n values, labels = PieGraph._get_x_values_labels(df, encoding)\n except TypeError:\n with output:\n print(\n \"\\n\\n\\nCannot group by X selection because of its type: '{}'. Please select another column.\".format(\n df[encoding.x].dtype\n )\n )\n return\n except (ValueError, DataError):\n with output:\n print(\n \"\\n\\n\\nCannot group by X selection. Please select another column.\".format(\n df[encoding.x].dtype\n )\n )\n if df.size == 0:\n print(\"\\n\\n\\nCannot display a pie graph for an empty data set.\")\n return\n\n max_slices_pie_graph = conf.max_slices_pie_graph()\n with output:\n # There's performance issues with a large amount of slices.\n # 1500 rows crash the browser.\n # 500 rows take ~15 s.\n # 100 rows is almost automatic.\n if len(values) > max_slices_pie_graph:\n print(\n \"There's {} values in your pie graph, which would render the graph unresponsive.\\n\"\n \"Please select another X with at most {} possible values.\".format(\n len(values), max_slices_pie_graph\n )\n )\n else:\n data = [Pie(values=values, labels=labels)]\n\n fig = Figure(data=data)\n iplot(fig, show_link=False)\n\n @staticmethod\n def display_logarithmic_x_axis():\n return False\n\n @staticmethod\n def display_logarithmic_y_axis():\n return False\n\n @staticmethod\n def _get_x_values_labels(df, encoding):\n if encoding.y is None:\n series = df.groupby([encoding.x]).size()\n values = series.values.tolist()\n labels = series.index.tolist()\n else:\n labels, values = GraphBase._get_x_y_values(df, encoding)\n return values, labels\n", "repo_name": "jupyter-incubator/sparkmagic", "sub_path": "autovizwidget/autovizwidget/plotlygraphs/piegraph.py", "file_name": "piegraph.py", "file_ext": "py", "file_size_in_byte": 2676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1265, "dataset": "github-code", "pt": "52", "api": [{"api_name": "graphbase.GraphBase", "line_number": 16, "usage_type": "name"}, {"api_name": "pandas.errors.DataError", "line_number": 34, "usage_type": "name"}, {"api_name": "autovizwidget.utils.configuration.max_slices_pie_graph", "line_number": 45, "usage_type": "call"}, {"api_name": "autovizwidget.utils.configuration", "line_number": 45, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Pie", "line_number": 59, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Figure", "line_number": 61, "usage_type": "call"}, {"api_name": "plotly.offline.iplot", "line_number": 62, "usage_type": "call"}, {"api_name": "graphbase.GraphBase._get_x_y_values", "line_number": 79, "usage_type": "call"}, {"api_name": "graphbase.GraphBase", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "27752935729", "text": "from enum import Enum\nimport os\nimport re\nimport typing\nimport time\nimport gzip\nfrom threading import Thread\nfrom commonfun import *\n\n# 使用eval()函数创建对象实例\n# 使用getattr()获取属性值,获取方法并调用\n# 使用setattr()设置属性值\n\nclass SearchStatus(Enum):\n NOSTART=1\n START = 2\n STARTING = 3\n FINISH =4\n \nclass logBase():\n def __init__(self) -> None:\n self.resCallFun = None\n self.currentSearchLine = None\n self.searchStatusChangeFun = None\n self.sendMsgFun = None\n self.searchStatus = SearchStatus.NOSTART\n self.logThread = None\n self.stop = False\n self.tempIndex = -1\n self.keyWordSeries = {}\n self.clear()\n\n def clear(self):\n self.filePaths = []\n self.lineContents = []\n\n def load(self,filePaths):\n self.filePaths.extend(filePaths)\n filePathIndex = 0\n for filePath in self.filePaths:\n assert isinstance(filePath,str)\n self.loadLogFile(filePath)\n filePathIndex = filePathIndex + 1\n self.currentSearchLineChanged(filePathIndex,len(self.filePaths))\n \n\n def searchContentChanged(self,dateStr,value,content,series):\n if self.resCallFun != None:\n self.resCallFun(dateStr,value,content,series)\n else:\n print(dateStr,\" \",value,\" \",content)\n\n def sendMsg(self,msg):\n if self.sendMsgFun != None:\n self.sendMsgFun(msg)\n\n def searchStatusChange(self,status):\n self.searchStatus = status\n if self.searchStatusChangeFun != None:\n self.searchStatusChangeFun(self.searchStatus)\n\n def currentSearchLineChanged(self,lineIndex,count):\n if self.currentSearchLine!= None:\n lineIndex = int(lineIndex / count *100)\n if self.tempIndex != lineIndex:\n self.tempIndex = lineIndex\n self.currentSearchLine(self,lineIndex) \n else:\n pass\n\n @typing.overload\n def loadLogFile(self,filePath):\n self.lineContents.extend(readFileLines(filePath))\n return True\n\n def getLoglineCount(self):\n return len(self.lineContents)\n \n def setKeyWords(self,keyWordSeries):\n self.keyWordSeries = keyWordSeries\n\n def startSearchThread(self):\n self.searchStatusChange(SearchStatus.NOSTART)\n if self.getLoglineCount() == 0 or len(self.keyWordSeries) == 0: return\n self.logThread = Thread(target=self.searchkeyWords)\n self.logThread.setDaemon(True)\n self.logThread.start()\n\n def searchkeyWords(self):\n self.startAnalyze()\n\n # @typing.overload\n def startAnalyze(self):\n self.searchStatusChange(SearchStatus.START)\n lineContentIndex = 0\n keyConentCount = {}\n keyValueConentCount = {}\n searchCount = 0\n isFirstPrint = True\n for keyWord in self.keyWordSeries:\n keyConentCount[keyWord] = 0\n keyValueConentCount[keyWord] = 0\n for lineContent in self.lineContents:\n if self.stop : return\n assert isinstance(lineContent,str)\n for keyWord,series in self.keyWordSeries.items():\n if re.search(keyWord,lineContent,re.A):\n keyConentCount[keyWord] = keyConentCount[keyWord] +1\n searchCount = searchCount + 1\n spaceContent = lineContent.split(\" \")\n dateStr = spaceContent[1] +\" \"+spaceContent[2]\n contents = re.findall(e_i,lineContent,re.A)\n ishasValue = False\n for contentIndex in range(len(contents)):\n if \"value\" in contents[contentIndex]:\n contentNextIndex = contentIndex + 1\n value = contents[contentNextIndex]\n self.searchContentChanged(dateStr,value,lineContent,series)\n keyValueConentCount[keyWord] = keyValueConentCount[keyWord] +1\n ishasValue = True\n\n if not ishasValue:\n if isFirstPrint:\n isFirstPrint = False\n self.sendMsg(f'不符合规则的解析规则:')\n self.sendMsg(f'{lineContent}')\n lineContentIndex = lineContentIndex+1\n self.currentSearchLineChanged(lineContentIndex,self.getLoglineCount())\n self.searchStatusChange(SearchStatus.FINISH)\n for keyWord in self.keyWordSeries:\n self.sendMsg(f'搜索 {keyWord} 共 {keyConentCount[keyWord]}')\n self.sendMsg(f'显示 {keyWord} 共 {keyValueConentCount[keyWord]}')\n self.sendMsg(f\"搜索完成,共 {searchCount}\\n\")\n\nclass dltLogBase(logBase):\n def __init__(self) -> None:\n super().__init__()\n self.dltExe = None\n\n def setDltExe(self,dltExe):\n self.dltExe = dltExe\n\n @staticmethod\n def gunZip(filePath):\n extracted_file_path = filePath.replace('.gz','')\n try:\n # 打开gz文件\n with gzip.open(filePath, 'rb') as gz_file:\n # 读取解压后的文件内容\n extracted_data = gz_file.read()\n\n # 将解压后的数据写入新文件\n with open(extracted_file_path, 'wb') as extracted_file:\n extracted_file.write(extracted_data)\n\n return extracted_file_path\n except IOError as e:\n print(f\"解压失败:{e}\")\n return None\n \n def getConverTxt(self,filePath):\n fileName = os.path.basename(filePath)\n fileName = fileName.replace('.gz','')\n fileName = fileName.replace('.dlt','.txt')\n fileDir = os.path.dirname(filePath)+\"/\"\n logFile = fileDir + fileName\n # print(logFile)\n if os.path.exists(logFile):\n return logFile\n return None\n\n def dltToTxt(self,logPath,filePath,logFile):\n filePath = os.path.basename(filePath)\n logFile = os.path.basename(logFile)\n os.system(f'cd {logPath} && {self.dltExe} -c {filePath} {logFile}')\n\n def loadLogFile(self,filePath):\n assert isinstance(filePath,str)\n logFileDir = os.path.dirname(filePath)\n logFile = self.getConverTxt(filePath)\n if logFile == None and len(self.dltExe) != 0:\n if getSuffix(filePath) == '.gz':\n filePath = dltLogBase.gunZip(filePath)\n if filePath == None:\n return False\n logFile = filePath.replace('.dlt','.txt')\n self.dltToTxt(logFileDir,filePath,logFile)\n self.lineContents.extend(readFileLines(logFile))\n return True\n\nif __name__ == '__main__':\n logObj = dltLogBase()\n logObj.setDltExe(\"D:/Soft/DltViewerSDK/dlt-viewer.exe\")\n logObj.load([r\"C:/Users/chengxiong.zhu/Downloads/log分析/2023-09-24/ BGS-62018 385DAuserDF04镁佳1553左右手机端打开远程智能泊车CdcRemVideoPwrUpReq发送上高压请求退出远程智能泊车后座舱高压CdcRemVideoPwrUpReq未置为无请求没有下高压解锁座舱高压置为无请求/log_003460_20230922-153505.dlt\"])\n # logObj.startSearch(\"HUD_CDC_5C5\")\n while 1 :\n time.sleep(10)", "repo_name": "zcx01/Achievement", "sub_path": "pythonscript/chartAnalyze/loganalyze/baseAnalyze.py", "file_name": "baseAnalyze.py", "file_ext": "py", "file_size_in_byte": 7290, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.overload", "line_number": 71, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 85, "usage_type": "call"}, {"api_name": "re.search", "line_number": 107, "usage_type": "call"}, {"api_name": "re.A", "line_number": 107, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 112, "usage_type": "call"}, {"api_name": "re.A", "line_number": 112, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 174, "usage_type": "call"}, {"api_name": "os.path", "line_number": 174, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "33321173858", "text": "from argparse import Namespace\nfrom pathlib import Path\nfrom typing import Any, Dict, List\n\nimport cairo\nimport cv2\nimport numpy as np\nfrom nonebot.adapters.onebot.v11 import Bot, MessageEvent, MessageSegment\nfrom nonebot.params import ShellCommandArgs\nfrom nonebot.rule import ArgumentParser\nfrom PIL import Image, ImageChops, ImageFilter, ImageOps\n\nfrom util import command, imutil, misc\nfrom util.misc import range_int\nfrom util.user_aliases import AvatarGetter, DefaultType\n\n\ndef kernel_average(size: int) -> np.ndarray[Any, Any]:\n return np.full((size, size), 1 / size ** 2)\n\n\nDIR = Path(__file__).resolve().parent\nKERNELS: Dict[str, np.ndarray[Any, Any]] = {\n \"thin\": kernel_average(5),\n \"normal\": kernel_average(7),\n \"semibold\": kernel_average(9),\n \"bold\": kernel_average(11),\n \"black\": kernel_average(13),\n \"emboss\": np.array([\n [1, 1, 1],\n [1, 1, -1],\n [-1, -1, -1],\n ]),\n}\n# 这些选项在原网站不可调\nSHADE_LIGHT = 80\nLIGHT_CUT = 128\n\n\ndef make_mask(\n im: Image.Image,\n pencil: Image.Image,\n kernel: str = \"normal\",\n dark_cut: int = 118, # 对应原网站线迹轻重\n shade_limit: int = 108, # 对应原网站调子数量\n denoise: bool = True # 对应原网站降噪\n) -> Image.Image:\n shade = im.point(lambda v: 0 if v > shade_limit else 255, \"L\")\n shade = shade.filter(ImageFilter.BoxBlur(3))\n shade = ImageChops.multiply(shade, ImageChops.invert(pencil))\n shade = ImageChops.multiply(shade, Image.new(\"L\", shade.size, SHADE_LIGHT))\n\n if denoise:\n im = im.filter(ImageFilter.Kernel((3, 3), [1] * 9, 9))\n\n # 因为PIL只支持3x3和5x5的卷积核,NumPy的卷积是一维的,要用OpenCV\n im1 = Image.fromarray(cv2.filter2D(np.array(im), -1, KERNELS[kernel]))\n im = ImageChops.subtract(im, im1, 1, 128)\n\n scale = (255 - LIGHT_CUT - dark_cut) / 255\n im = ImageChops.subtract(im, Image.new(\"L\", im.size, dark_cut), scale)\n\n return ImageChops.lighter(ImageChops.invert(im), shade)\n\n\ndef make_gradient(width: int, height: int) -> Image.Image:\n with cairo.ImageSurface(cairo.FORMAT_RGB24, width, height) as surface:\n cr = cairo.Context(surface)\n gradient = cairo.LinearGradient(0, 0, width, height)\n gradient.add_color_stop_rgb(0.0, 0.984313725490196, 0.7294117647058823, 0.18823529411764706)\n gradient.add_color_stop_rgb(0.4, 0.9882352941176471, 0.4470588235294118, 0.20784313725490197)\n gradient.add_color_stop_rgb(0.6, 0.9882352941176471, 0.20784313725490197, 0.3058823529411765)\n gradient.add_color_stop_rgb(0.7, 0.8117647058823529, 0.21176470588235294, 0.8745098039215686)\n gradient.add_color_stop_rgb(0.8, 0.21568627450980393, 0.7098039215686275, 0.8509803921568627)\n gradient.add_color_stop_rgb(1.0, 0.24313725490196078, 0.7137254901960784, 0.8549019607843137)\n cr.set_source(gradient)\n cr.rectangle(0, 0, width, height)\n cr.fill()\n return imutil.from_cairo(surface)\n\n\nparser = ArgumentParser(add_help=False)\nparser.add_argument(\"target\", nargs=\"?\", default=\"\", metavar=\"目标\", help=(\n \"可使用@、QQ号、昵称、群名片或图片链接(可传入动图)\"\n))\nparser.add_argument(\"--style\", \"-s\", choices=list(KERNELS), default=\"normal\", help=(\n \"线条风格,可用: thin (精细)、normal (一般)、semibold (稍粗)、bold (超粗)、black (极粗)、\"\n \"emboss (浮雕),默认: normal\"\n))\nparser.add_argument(\"--edge\", \"-e\", type=range_int(80, 126), default=118, metavar=\"强度\", help=(\n \"边缘强度,为 [80, 126] 之间的整数,默认: 118\"\n))\nparser.add_argument(\"--shade\", \"-a\", type=range_int(20, 200), default=108, metavar=\"强度\", help=(\n \"暗部强度,为 [20, 200] 之间的整数,默认: 108\"\n))\nparser.add_argument(\"--no-denoise\", \"-D\", action=\"store_false\", dest=\"denoise\", help=\"不进行降噪\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\n \"--webp\", \"-w\", action=\"store_const\", dest=\"format\", const=\"webp\", default=\"gif\",\n help=\"使用WebP而非GIF格式(如果传入动图)\"\n)\ngroup.add_argument(\n \"--png\", \"--apng\", \"-p\", action=\"store_const\", dest=\"format\", const=\"png\",\n help=\"使用APNG而非GIF格式(如果传入动图)\"\n)\nparser.epilog = \"特别感谢: https://lab.magiconch.com/one-last-image/\"\nmatcher = (\n command.CommandBuilder(\"meme_pic.louvre\", \"卢浮宫\")\n .category(\"meme_pic\")\n .brief(\"[动]\")\n .shell(parser)\n .build()\n)\n@matcher.handle()\nasync def handler(bot: Bot, event: MessageEvent, args: Namespace = ShellCommandArgs()) -> None:\n async with AvatarGetter(bot, event) as g:\n target_task = g(args.target, DefaultType.TARGET, raw=True)\n\n def make() -> MessageSegment:\n target, _ = target_task.result()\n pencil = Image.open(DIR / \"pencil.jpg\").convert(\"L\")\n pencil = ImageOps.fit(pencil, target.size, imutil.scale_resample())\n gradient = make_gradient(target.width, target.height)\n frames: List[Image.Image] = []\n for raw in imutil.frames(target):\n l, a = raw.convert(\"LA\").split()\n frame = Image.new(\"L\", l.size, 255)\n frame.paste(l, mask=a)\n mask = make_mask(frame, pencil, args.style, args.edge, args.shade, args.denoise)\n frame = Image.new(\"RGB\", l.size, (255, 255, 255))\n frame.paste(gradient, mask=mask)\n frames.append(frame)\n return imutil.to_segment(frames, target, afmt=args.format)\n\n await matcher.finish(await misc.to_thread(make))\n", "repo_name": "su226/IdhagnBot", "sub_path": "plugins/meme_pic/louvre/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 5318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.full", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 23, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.Image.Image", "line_number": 42, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 42, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.BoxBlur", "line_number": 49, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 49, "usage_type": "name"}, {"api_name": "PIL.ImageChops.multiply", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 50, "usage_type": "name"}, {"api_name": "PIL.ImageChops.invert", "line_number": 50, "usage_type": "call"}, {"api_name": "PIL.ImageChops.multiply", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 51, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 51, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.Kernel", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 57, "usage_type": "name"}, {"api_name": "cv2.filter2D", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "PIL.ImageChops.subtract", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.ImageChops.subtract", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 61, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "PIL.ImageChops.lighter", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.ImageChops.invert", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 47, "usage_type": "name"}, {"api_name": "cairo.ImageSurface", "line_number": 67, "usage_type": "call"}, {"api_name": "cairo.FORMAT_RGB24", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cairo.Context", "line_number": 68, "usage_type": "call"}, {"api_name": "cairo.LinearGradient", "line_number": 69, "usage_type": "call"}, {"api_name": "util.imutil.from_cairo", "line_number": 79, "usage_type": "call"}, {"api_name": "util.imutil", "line_number": 79, "usage_type": "name"}, {"api_name": "PIL.Image.Image", "line_number": 66, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 66, "usage_type": "name"}, {"api_name": "nonebot.rule.ArgumentParser", "line_number": 82, "usage_type": "call"}, {"api_name": "util.misc.range_int", "line_number": 90, "usage_type": "call"}, {"api_name": "util.misc.range_int", "line_number": 93, "usage_type": "call"}, {"api_name": "util.command.CommandBuilder", "line_number": 108, "usage_type": "call"}, {"api_name": "util.command", "line_number": 108, "usage_type": "name"}, {"api_name": "nonebot.adapters.onebot.v11.Bot", "line_number": 115, "usage_type": "name"}, {"api_name": "nonebot.adapters.onebot.v11.MessageEvent", "line_number": 115, "usage_type": "name"}, {"api_name": "argparse.Namespace", "line_number": 115, "usage_type": "name"}, {"api_name": "nonebot.params.ShellCommandArgs", "line_number": 115, "usage_type": "call"}, {"api_name": "util.user_aliases.AvatarGetter", "line_number": 116, "usage_type": "call"}, {"api_name": "util.user_aliases.DefaultType.TARGET", "line_number": 117, "usage_type": "attribute"}, {"api_name": "util.user_aliases.DefaultType", "line_number": 117, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 121, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 121, "usage_type": "name"}, {"api_name": "PIL.ImageOps.fit", "line_number": 122, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 122, "usage_type": "name"}, {"api_name": "util.imutil.scale_resample", "line_number": 122, "usage_type": "call"}, {"api_name": "util.imutil", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 124, "usage_type": "name"}, {"api_name": "PIL.Image.Image", "line_number": 124, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 124, "usage_type": "name"}, {"api_name": "util.imutil.frames", "line_number": 125, "usage_type": "call"}, {"api_name": "util.imutil", "line_number": 125, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 127, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 127, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 130, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 130, "usage_type": "name"}, {"api_name": "util.imutil.to_segment", "line_number": 133, "usage_type": "call"}, {"api_name": "util.imutil", "line_number": 133, "usage_type": "name"}, {"api_name": "nonebot.adapters.onebot.v11.MessageSegment", "line_number": 119, "usage_type": "name"}, {"api_name": "util.misc.to_thread", "line_number": 135, "usage_type": "call"}, {"api_name": "util.misc", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "9169791557", "text": "from django.urls import path, re_path\n\nfrom . import views\n\napp_name = \"questions\"\nurlpatterns = [\n path(\"\", views.QuestionListView.as_view(), name=\"questions\"),\n path(\"search_question/\", views.SearchQuestionListView.as_view(), name=\"search_question\"),\n path(\"ask_question/\", views.AskQuestionView.as_view(), name=\"ask_question\"),\n path(\"tag/\", views.SearchQuestionByTagListView.as_view(), name=\"search_question_by_tag\"),\n path(\"question/\", views.QuestionDetailView.as_view(), name=\"question\"),\n path(\"question//add_answer\", views.AddAnswerView.as_view(), name=\"add_answer\"),\n path(\"create_tag\", views.CreateTagView.as_view(), name=\"create_tag\"),\n path(\"question//mark_answer_as_correct/\",\n views.MarkCorrectAnswerView.as_view(),\n name=\"mark_answer_as_correct\"),\n re_path(r'^question/(?P\\d+)/action/(?P[+-]1)',\n views.QuestionActionView.as_view(),\n name=\"question_action\"),\n re_path(r'^answer/(?P\\d+)/action/(?P[+-]1)',\n views.AnswerActionView.as_view(),\n name=\"answer_action\")\n]\n", "repo_name": "DalerBakhriev/otus_homeworks", "sub_path": "hw_week_7/hasker/hasker/questions/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "24583553515", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nnp.random.seed(1337)\nX = np.linspace(-1,1,200)\nnp.random.shuffle(X)\nN = np.random.normal(0,0.5,(200,)) ##噪声\nY=0.5 * X + 2 + N\n#plt.scatter(X,Y)\n#plt.show()\n\nx_train,y_train=X[:160],Y[:160]\n\nx_test,y_test = X[160:],Y[160:]\nmodel = Sequential()\nmodel.add(Dense(input_dim=1,units=1))\n\nmodel.compile(loss='mse',optimizer='sgd')\n\nprint(\"traning ....................\")\nfor step in range(301):\n cost = model.train_on_batch(x_train,y_train)\n if cost % 100 == 0 :\n print('\\n train cost:',cost)\n\n\nprint(\"\\ntest ........\")\ncost = model.evaluate(x_test,y_test,batch_size=40)\nprint('\\ntest cost :',cost)\n\nW,b = model.layers[0].get_weights()\nprint('Weight=',W,'\\n biases=',b)\ny_pred= model.predict(X)\nplt.plot(X,y_pred)\nplt.show()\n\n\n", "repo_name": "wwdz540/AI-TEST", "sub_path": "keras_test.py", "file_name": "keras_test.py", "file_ext": "py", "file_size_in_byte": 911, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 9, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "4010818187", "text": "'''\nby yzh 2022.2.13\n'''\n# 导入依赖\nimport random\nfrom utils.torch_utils import select_device, load_classifier, time_sync\nfrom utils.general import (\n check_img_size, non_max_suppression, apply_classifier, scale_coords,\n xyxy2xywh, strip_optimizer, set_logging)\nfrom utils.datasets import LoadStreams, LoadImages, letterbox\nfrom models.experimental import attempt_load\nimport torch.backends.cudnn as cudnn\nimport torch\n\nimport pyrealsense2 as rs\nimport math\nimport yaml\nimport argparse\nimport os\nimport time\nimport numpy as np\nimport sys\n\nimport cv2\n# PyTorch\n# YoloV5-PyTorch\n\npipeline = rs.pipeline() # 定义流程pipeline\nconfig = rs.config() # 定义配置config\nconfig.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)\nconfig.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)\nprofile = pipeline.start(config) # 流程开始\nalign_to = rs.stream.color # 与color流对齐\nalign = rs.align(align_to)\n\n\ndef get_aligned_images():\n frames = pipeline.wait_for_frames() # 等待获取图像帧\n aligned_frames = align.process(frames) # 获取对齐帧\n aligned_depth_frame = aligned_frames.get_depth_frame() # 获取对齐帧中的depth帧\n color_frame = aligned_frames.get_color_frame() # 获取对齐帧中的color帧\n\n ############### 相机参数的获取 #######################\n intr = color_frame.profile.as_video_stream_profile().intrinsics # 获取相机内参\n depth_intrin = aligned_depth_frame.profile.as_video_stream_profile(\n ).intrinsics # 获取深度参数(像素坐标系转相机坐标系会用到)\n '''camera_parameters = {'fx': intr.fx, 'fy': intr.fy,\n 'ppx': intr.ppx, 'ppy': intr.ppy,\n 'height': intr.height, 'width': intr.width,\n 'depth_scale': profile.get_device().first_depth_sensor().get_depth_scale()\n }'''\n\n # 保存内参到本地\n # with open('./intrinsics.json', 'w') as fp:\n #json.dump(camera_parameters, fp)\n #######################################################\n\n depth_image = np.asanyarray(aligned_depth_frame.get_data()) # 深度图(默认16位)\n depth_image_8bit = cv2.convertScaleAbs(depth_image, alpha=0.03) # 深度图(8位)\n depth_image_3d = np.dstack(\n (depth_image_8bit, depth_image_8bit, depth_image_8bit)) # 3通道深度图\n color_image = np.asanyarray(color_frame.get_data()) # RGB图\n\n # 返回相机内参、深度参数、彩色图、深度图、齐帧中的depth帧\n return intr, depth_intrin, color_image, depth_image, aligned_depth_frame\n\n\nclass YoloV5:\n def __init__(self, yolov5_yaml_path='config/yolov5s.yaml'):\n '''初始化'''\n # 载入配置文件\n with open(yolov5_yaml_path, 'r', encoding='utf-8') as f:\n self.yolov5 = yaml.load(f.read(), Loader=yaml.SafeLoader)\n # 随机生成每个类别的颜色\n self.colors = [[np.random.randint(0, 255) for _ in range(\n 3)] for class_id in range(self.yolov5['class_num'])]\n # 模型初始化\n self.init_model()\n\n @torch.no_grad()\n def init_model(self):\n '''模型初始化'''\n # 设置日志输出\n set_logging()\n # 选择计算设备\n device = select_device(self.yolov5['device'])\n # 如果是GPU则使用半精度浮点数 F16\n is_half = device.type != 'cpu'\n # 载入模型\n model = attempt_load(\n self.yolov5['weight'], map_location=device) # 载入全精度浮点数的模型\n input_size = check_img_size(\n self.yolov5['input_size'], s=model.stride.max()) # 检查模型的尺寸\n if is_half:\n model.half() # 将模型转换为半精度\n # 设置BenchMark,加速固定图像的尺寸的推理\n cudnn.benchmark = True # set True to speed up constant image size inference\n # 图像缓冲区初始化\n img_torch = torch.zeros(\n (1, 3, self.yolov5['input_size'], self.yolov5['input_size']), device=device) # init img\n # 创建模型\n # run once\n _ = model(img_torch.half()\n if is_half else img) if device.type != 'cpu' else None\n self.is_half = is_half # 是否开启半精度\n self.device = device # 计算设备\n self.model = model # Yolov5模型\n self.img_torch = img_torch # 图像缓冲区\n\n def preprocessing(self, img):\n '''图像预处理'''\n # 图像缩放\n # 注: auto一定要设置为False -> 图像的宽高不同\n img_resize = letterbox(img, new_shape=(\n self.yolov5['input_size'], self.yolov5['input_size']), auto=False)[0]\n # print(\"img resize shape: {}\".format(img_resize.shape))\n # 增加一个维度\n img_arr = np.stack([img_resize], 0)\n # 图像转换 (Convert) BGR格式转换为RGB\n # 转换为 bs x 3 x 416 x\n # 0(图像i), 1(row行), 2(列), 3(RGB三通道)\n # ---> 0, 3, 1, 2\n # BGR to RGB, to bsx3x416x416\n img_arr = img_arr[:, :, :, ::-1].transpose(0, 3, 1, 2)\n # 数值归一化\n # img_arr = img_arr.astype(np.float32) / 255.0\n # 将数组在内存的存放地址变成连续的(一维), 行优先\n # 将一个内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快\n # https://zhuanlan.zhihu.com/p/59767914\n img_arr = np.ascontiguousarray(img_arr)\n return img_arr\n\n @torch.no_grad()\n def detect(self, img, canvas=None, view_img=True):\n '''模型预测'''\n # 图像预处理\n img_resize = self.preprocessing(img) # 图像缩放\n self.img_torch = torch.from_numpy(img_resize).to(self.device) # 图像格式转换\n self.img_torch = self.img_torch.half(\n ) if self.is_half else self.img_torch.float() # 格式转换 uint8-> 浮点数\n self.img_torch /= 255.0 # 图像归一化\n if self.img_torch.ndimension() == 3:\n self.img_torch = self.img_torch.unsqueeze(0)\n # 模型推理\n t1 = time_sync()\n pred = self.model(self.img_torch, augment=False)[0]\n # pred = self.model_trt(self.img_torch, augment=False)[0]\n # NMS 非极大值抑制\n pred = non_max_suppression(pred, self.yolov5['threshold']['confidence'],\n self.yolov5['threshold']['iou'], classes=None, agnostic=False)\n t2 = time_sync()\n # print(\"推理时间: inference period = {}\".format(t2 - t1))\n # 获取检测结果\n det = pred[0]\n gain_whwh = torch.tensor(img.shape)[[1, 0, 1, 0]] # [w, h, w, h]\n\n if view_img and canvas is None:\n canvas = np.copy(img)\n xyxy_list = []\n conf_list = []\n class_id_list = []\n if det is not None and len(det):\n # 画面中存在目标对象\n # 将坐标信息恢复到原始图像的尺寸\n det[:, :4] = scale_coords(\n img_resize.shape[2:], det[:, :4], img.shape).round()\n for *xyxy, conf, class_id in reversed(det):\n class_id = int(class_id)\n xyxy_list.append(xyxy)\n conf_list.append(conf)\n class_id_list.append(class_id)\n if view_img:\n # 绘制矩形框与标签\n label = '%s %.2f' % (\n self.yolov5['class_name'][class_id], conf)\n self.plot_one_box(\n xyxy, canvas, label=label, color=self.colors[class_id], line_thickness=3)\n return canvas, class_id_list, xyxy_list, conf_list\n\n def plot_one_box(self, x, img, color=None, label=None, line_thickness=None):\n ''''绘制矩形框+标签'''\n tl = line_thickness or round(\n 0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n color = color or [random.randint(0, 255) for _ in range(3)]\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\n cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\n if label:\n tf = max(tl - 1, 1) # font thickness\n t_size = cv2.getTextSize(\n label, 0, fontScale=tl / 3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled\n cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3,\n [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n\n\nif __name__ == '__main__':\n print(\"[INFO] YoloV5目标检测-程序启动\")\n print(\"[INFO] 开始YoloV5模型加载\")\n # YOLOV5模型配置文件(YAML格式)的路径 yolov5_yaml_path\n model = YoloV5(yolov5_yaml_path='config/yolov5s.yaml')\n print(\"[INFO] 完成YoloV5模型加载\")\n\n try:\n while True:\n # Wait for a coherent pair of frames: depth and color\n intr, depth_intrin, color_image, depth_image, aligned_depth_frame = get_aligned_images() # 获取对齐的图像与相机内参\n if not depth_image.any() or not color_image.any():\n continue\n # Convert images to numpy arrays\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(\n depth_image, alpha=0.03), cv2.COLORMAP_JET)\n # Stack both images horizontally\n images = np.hstack((color_image, depth_colormap))\n \n # Show images\n\n t_start = time.time() # 开始计时\n # YoloV5 目标检测\n canvas, class_id_list, xyxy_list, conf_list = model.detect(\n color_image)\n\n t_end = time.time() # 结束计时\\\n #canvas = np.hstack((canvas, depth_colormap))\n #print(class_id_list)\n\n camera_xyz_list=[]\n if xyxy_list:\n for i in range(len(xyxy_list)):\n ux = int((xyxy_list[i][0]+xyxy_list[i][2])/2) # 计算像素坐标系的x\n uy = int((xyxy_list[i][1]+xyxy_list[i][3])/2) # 计算像素坐标系的y\n dis = aligned_depth_frame.get_distance(ux, uy)\n camera_xyz = rs.rs2_deproject_pixel_to_point(\n depth_intrin, (ux, uy), dis) # 计算相机坐标系的xyz\n camera_xyz = np.round(np.array(camera_xyz), 3) # 转成3位小数\n camera_xyz = camera_xyz.tolist()\n cv2.circle(canvas, (ux,uy), 4, (255, 255, 255), 5)#标出中心点\n cv2.putText(canvas, str(camera_xyz), (ux+20, uy+10), 0, 1,\n [225, 255, 255], thickness=2, lineType=cv2.LINE_AA)#标出坐标\n camera_xyz_list.append(camera_xyz)\n #print(camera_xyz_list)\n\n # 添加fps显示\n fps = int(1.0 / (t_end - t_start))\n cv2.putText(canvas, text=\"FPS: {}\".format(fps), org=(50, 50),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2,\n lineType=cv2.LINE_AA, color=(0, 0, 0))\n cv2.namedWindow('detection', flags=cv2.WINDOW_NORMAL |\n cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)\n cv2.imshow('detection', canvas)\n key = cv2.waitKey(1)\n # Press esc or 'q' to close the image window\n if key & 0xFF == ord('q') or key == 27:\n cv2.destroyAllWindows()\n break\n finally:\n # Stop streaming\n pipeline.stop()\n", "repo_name": "Thinkin99/yolov5_d435i_detection", "sub_path": "rstest.py", "file_name": "rstest.py", "file_ext": "py", "file_size_in_byte": 11712, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 110, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyrealsense2.pipeline", "line_number": 28, "usage_type": "call"}, {"api_name": "pyrealsense2.config", "line_number": 29, "usage_type": "call"}, {"api_name": "pyrealsense2.stream", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pyrealsense2.format", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pyrealsense2.stream", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyrealsense2.align", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 62, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 73, "usage_type": "call"}, {"api_name": "yaml.SafeLoader", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 75, "usage_type": "attribute"}, {"api_name": "utils.general.set_logging", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.torch_utils.select_device", "line_number": 86, "usage_type": "call"}, {"api_name": "models.experimental.attempt_load", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.general.check_img_size", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.backends.cudnn.benchmark", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torch.backends.cudnn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.datasets.letterbox", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 138, "usage_type": "call"}, {"api_name": "utils.torch_utils.time_sync", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.general.non_max_suppression", "line_number": 149, "usage_type": "call"}, {"api_name": "utils.torch_utils.time_sync", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 158, "usage_type": "call"}, {"api_name": "utils.general.scale_coords", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 133, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 186, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 186, "usage_type": "attribute"}, {"api_name": "cv2.getTextSize", "line_number": 189, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 192, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 192, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 193, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 194, "usage_type": "attribute"}, {"api_name": "cv2.applyColorMap", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.convertScaleAbs", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.COLORMAP_JET", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.hstack", "line_number": 215, "usage_type": "call"}, {"api_name": "time.time", "line_number": 219, "usage_type": "call"}, {"api_name": "time.time", "line_number": 224, "usage_type": "call"}, {"api_name": "pyrealsense2.rs2_deproject_pixel_to_point", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 236, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 238, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 239, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 240, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 246, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 247, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 248, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 249, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 249, "usage_type": "attribute"}, {"api_name": "cv2.WINDOW_KEEPRATIO", "line_number": 250, "usage_type": "attribute"}, {"api_name": "cv2.WINDOW_GUI_EXPANDED", "line_number": 250, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 251, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 252, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 255, "usage_type": "call"}]} +{"seq_id": "20332183455", "text": "from cached_property import cached_property\nimport logging\n\nclass BlockCallbackMixin():\n '''\n This mixin gives you free expiry of cached properties upon a new block. \n Also lets the class implement new_block_callback()\n to implement custom behavior.\n\n Implementing class is responsible for implementing\n _block_callback_watcher.register_target(self)\n and\n _block_callback_watcher.unregister_target(self)\n '''\n def new_block_callback(self):\n '''\n Override this callback in your dapp to perform actions \n upon new block\n '''\n pass\n\n\n def _new_block_callback(self):\n '''\n This is the private version of new_block_callback.\n It expires the caches of any cached_properties.\n '''\n self._expire_cached_properties()\n self.new_block_callback()\n\n def _expire_cached_properties(self):\n objects = self.__class__.__dict__.values()\n c_props = [x for x in objects if x.__class__ == cached_property]\n if len(c_props) == 0:\n return\n\n logging.info(\"{} attempting to expire {} properties\".format(\n str(self.__class__),\n str(len(c_props))\n ))\n\n for prop in c_props:\n try:\n del self.__dict__[prop.func.__name__]\n logging.info(\"{} expired {}\".format(\n str(self.__class__),\n prop.func.__name__\n ))\n except KeyError as e:\n logging.info(\"no need to expire {}\".format(str(e)))\n\n\n", "repo_name": "kayagoban/shadowlands", "sub_path": "shadowlands/block_callback_mixin.py", "file_name": "block_callback_mixin.py", "file_ext": "py", "file_size_in_byte": 1565, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 142, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cached_property.cached_property", "line_number": 33, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "38264775068", "text": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as f:\n REQUIREMENTS = f.readlines()\n\nsetup(\n name='famgz_utils',\n version='0.1',\n license='MIT',\n author=\"famgz\",\n author_email='famgz@proton.me',\n packages=['famgz_utils'],\n package_dir={'famgz_utils': 'src/famgz_utils'},\n package_data={'famgz_utils': ['mouse_movements/*.json']},\n include_package_data=True,\n url='https://github.com/famgz/famgz_utils',\n install_requires=REQUIREMENTS\n)\n", "repo_name": "famgz/famgz-utils", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 499, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "23439671284", "text": "# GUI.py\r\nimport pygame\r\nimport time\r\npygame.font.init()\r\nimport sys\r\nimport grid_solver\r\nimport numpy as np\r\nimport random\r\n\r\nclass Grid:\r\n # Graphical representation of a grid\r\n \r\n def __init__(self, size, width, height, win):\r\n self.size = size\r\n self.board = [[size*i + j + 1 for j in range(size)] for i in range(size)]\r\n self.board[size-1][size-1] = 0\r\n self.rows = size\r\n self.cols = size\r\n self.cubes = [[Cube(self.board[i][j], size, i, j, width, height) for j in range(size)] for i in range(size)]\r\n self.width = width\r\n self.height = height\r\n self.selected = None\r\n self.win = win\r\n self.update_grid()\r\n\r\n def update_grid(self):\r\n for i in range(self.cols):\r\n for j in range(self.rows):\r\n self.cubes[i][j].value = self.board[i][j]\r\n\r\n def find_neighbour_gap(self):\r\n # If the empty tile is a neighbour of the selected tile, \r\n # returns the coordinates of the empty tile. Otherwise returns None.\r\n \r\n row, col = self.selected\r\n neighbours = [(row-1, col), (row, col-1), (row+1, col), (row, col+1)]\r\n for (a,b) in neighbours:\r\n if 0 <= a < len(self.cubes) and 0 <= b < len(self.cubes[row]):\r\n if self.cubes[a][b].value == 0:\r\n return (a,b)\r\n return None\r\n\r\n def move(self):\r\n # Slides the selected tile into the gap if possible.\r\n # Otherwise does nothing.\r\n \r\n row, col = self.selected\r\n target = self.find_neighbour_gap()\r\n if target:\r\n self.board[target[0]][target[1]] = self.board[row][col]\r\n self.board[row][col] = 0\r\n self.update_grid()\r\n return True\r\n return False\r\n\r\n def draw(self):\r\n # Draw grid lines\r\n gap = self.width / self.size\r\n for i in range(self.rows + 1):\r\n thick = 1\r\n pygame.draw.line(self.win, (0, 0, 0), (0, i * gap), (self.width, i * gap), thick)\r\n pygame.draw.line(self.win, (0, 0, 0), (i * gap, 0), (i * gap, self.height), thick)\r\n\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.cubes[i][j].draw(self.win)\r\n\r\n def shuffle(self):\r\n valid = False\r\n while not valid:\r\n number_list = list(range(0, self.size * self.size, 1))\r\n random.shuffle(number_list)\r\n if solvable(number_list):\r\n valid = True\r\n\r\n new_grid = np.array(number_list).reshape(self.rows, self.cols)\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.board[i][j] = new_grid[i][j]\r\n self.update_grid()\r\n\r\n def select(self, row, col):\r\n # Reset\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n self.cubes[i][j].selected = False\r\n\r\n self.cubes[row][col].selected = True\r\n self.selected = (row,col)\r\n\r\n def clear(self):\r\n row, col = self.selected\r\n if self.cubes[row][col].value == 0:\r\n self.cubes[row][col].set_temp(0)\r\n\r\n def click(self, pos):\r\n if pos[0] < self.width and pos[1] < self.height:\r\n gap = self.width / self.size\r\n x = pos[0] // gap\r\n y= pos[1] // gap\r\n return (int(y), int(x))\r\n else:\r\n return None\r\n\r\n def change_state(self, new_state):\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n if self.board[i][j] != new_state[self.size*i + j] and self.board[i][j] != 0:\r\n self.select(i,j)\r\n self.move()\r\n\r\n\r\nclass Cube:\r\n\r\n def __init__(self, value, grid_size, row, col, width, height):\r\n self.grid_size = grid_size\r\n self.value = value\r\n self.temp = 0\r\n self.row = row\r\n self.col = col\r\n self.width = width\r\n self.height = height\r\n self.selected = False\r\n\r\n def draw(self, win):\r\n fnt = pygame.font.SysFont(\"comicsans\", 40)\r\n\r\n gap = self.width / self.grid_size\r\n x = self.col * gap\r\n y = self.row * gap\r\n\r\n if self.temp != 0 and self.value == 0:\r\n text = fnt.render(str(self.temp), 1, (128, 128, 128))\r\n win.blit(text, (x + 5, y + 5))\r\n elif not (self.value == 0):\r\n text = fnt.render(str(self.value), 1, (0, 0, 0))\r\n win.blit(text, (x + (gap / 2 - text.get_width() / 2), y + (gap / 2 - text.get_height() / 2)))\r\n\r\n if self.selected:\r\n pygame.draw.rect(win, (255, 0, 0), (x, y, gap, gap), self.grid_size)\r\n\r\ndef redraw_window(win, board, time, steps = None):\r\n win.fill((255, 255, 255))\r\n # Draw time\r\n fnt = pygame.font.SysFont(\"comicsans\", 40)\r\n text = fnt.render(\"Time: \" + format_time(time), 1, (0, 0, 0))\r\n win.blit(text, (540 - 160, 560))\r\n # Draw steps\r\n if steps:\r\n text = fnt.render(\"Steps: \" + str(steps), 1, (255, 0, 0))\r\n win.blit(text, (20, 560))\r\n # Draw instructions\r\n fnt = pygame.font.SysFont(\"comicsans\", 20)\r\n text = fnt.render(\"S - Shuffle\" , 1, (0, 0, 0))\r\n win.blit(text, (540 - 320, 560-15))\r\n text = fnt.render(\"LMB - Slide\", 1, (0, 0, 0))\r\n win.blit(text, (540 - 320, 560))\r\n text = fnt.render(\"Space - Solve\", 1, (0, 0, 0))\r\n win.blit(text, (540 - 320, 560+15))\r\n # Draw grid and board\r\n board.draw()\r\n\r\ndef format_time(secs):\r\n sec = secs%60\r\n minute = secs//60\r\n hour = minute//60\r\n mat = \" \" + str(minute) + \":\" + str(sec)\r\n return mat\r\n\r\ndef solvable(order):\r\n count = 0\r\n for i in range(len(order)):\r\n for j in range(i+1, len(order)):\r\n if order[j] and order[i] > order[j]:\r\n count += 1\r\n return count % 2 == 0\r\n\r\ndef main():\r\n size = int(input(\"Enter grid size: \"))\r\n goal_state = [[size * i + j + 1 for j in range(size)] for i in range(size)]\r\n goal_state[size - 1][size - 1] = 0\r\n win = pygame.display.set_mode((540, 600))\r\n pygame.display.set_caption(\"Sliding Puzzle\")\r\n board = Grid(size, 540, 540, win)\r\n key = None\r\n run = True\r\n steps = 0\r\n start = time.time()\r\n\r\n while run:\r\n play_time = round(time.time() - start)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n solver = grid_solver.GridSolver(np.array(board.board), np.array(goal_state))\r\n for node in reversed(solver.solve_grid()):\r\n steps += 1\r\n board.change_state(node.curr_state)\r\n redraw_window(win, board, play_time, steps)\r\n pygame.display.update()\r\n time.sleep(0.25)\r\n steps = 0\r\n if event.key == pygame.K_s:\r\n board.shuffle()\r\n key = None\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n pos = pygame.mouse.get_pos()\r\n clicked = board.click(pos)\r\n if clicked:\r\n board.select(clicked[0], clicked[1])\r\n key = None\r\n board.move()\r\n\r\n redraw_window(win, board, play_time)\r\n pygame.display.update()\r\n\r\nmain()\r\npygame.quit()\r\n", "repo_name": "leonkosarev/N-Puzzle", "sub_path": "src/GUI.py", "file_name": "GUI.py", "file_ext": "py", "file_size_in_byte": 7536, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.font.init", "line_number": 4, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 4, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 62, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 145, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 145, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 183, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 183, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 188, "usage_type": "call"}, {"api_name": "time.time", "line_number": 191, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 192, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 192, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 193, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 194, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 195, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 198, "usage_type": "attribute"}, {"api_name": "grid_solver.GridSolver", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 204, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 204, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 205, "usage_type": "call"}, {"api_name": "pygame.K_s", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 212, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 212, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 220, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 223, "usage_type": "call"}]} +{"seq_id": "24872521304", "text": "import forms\nimport tools\nfrom app import app\nfrom flask import flash\nfrom flask import session\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom read_data_from_db import apiReports\n\n\ndef update_session():\n session['data'] = request.form.to_dict()\n\n\ndef general_report_site(form, statuses, plan_id):\n update_session()\n return render_template('general_reports_site.html',\n title='test',\n form=form,\n statuses=statuses,\n plan_id=plan_id)\n\n\ndef read_session_form():\n dt = tools.cast_to_date(session['data']['dt'])\n form = forms.navigationForm(instances=session['data']['instances'],\n country=session['data']['country'],\n periodType=session['data']['periodType'],\n dt=dt)\n return form\n\n\ndef initilize_forms_variables(data):\n form = forms.navigationForm()\n\n form.instances.choices = tools.get_instances()\n form.periodType.choices = tools.get_period_types()\n form.country.choices = tools.get_countries()\n return form\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef instance_type():\n session['data'] = request.form.to_dict()\n\n form = initilize_forms_variables(request.form.to_dict())\n if form.validate_on_submit():\n country = session['data'].get('country')\n current_date = session['data'].get('dt')\n period_type = session['data'].get('periodType')\n instance_type = session['data'].get('instances')\n\n report_api = apiReports(country=country,\n report_name='empty',\n date=current_date,\n period_type=period_type,\n instance_type=instance_type\n )\n\n reports_status = report_api.get_reports_statuses()\n plan_id = report_api.get_default_plan_id()\n return general_report_site(form, reports_status, plan_id)\n\n return render_template('base.html',\n title='inital site',\n form=form)\n\n\n@app.route('/reports/', methods=['POST', 'GET'])\ndef reports(report_name):\n\n form = read_session_form()\n country = request.args.get('country')\n current_date = request.args.get('dt')\n period_type = request.args.get('periodType')\n instance_type = request.args.get('instances')\n plan_id = int(request.args.get('plan_id'))\n\n report_api = apiReports(country=country,\n report_name=report_name,\n date=current_date,\n period_type=period_type,\n instance_type=instance_type,\n plan_id=plan_id)\n\n reports_status = report_api.get_reports_statuses()\n plan_id = 1\n if form.validate_on_submit():\n return general_report_site(form, reports_status, plan_id)\n\n try:\n data = report_api.create_report()\n\n except ValueError as e:\n return render_template('error_site.html',\n title='error',\n form=form,\n statuses=reports_status,\n url_params=request.args,\n error_message=e,\n plan_id=plan_id\n )\n\n platforms = report_api.find_plans_id('on')\n\n return render_template('basic_report.html',\n name=report_name,\n form=form,\n plan_id=plan_id,\n statuses=reports_status,\n platforms=platforms,\n url_params=request.args,\n data=data\n )", "repo_name": "gonti89-old/degree_project", "sub_path": "interface/app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.session", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "tools.cast_to_date", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 27, "usage_type": "name"}, {"api_name": "forms.navigationForm", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 30, "usage_type": "name"}, {"api_name": "forms.navigationForm", "line_number": 36, "usage_type": "call"}, {"api_name": "tools.get_instances", "line_number": 38, "usage_type": "call"}, {"api_name": "tools.get_period_types", "line_number": 39, "usage_type": "call"}, {"api_name": "tools.get_countries", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "read_data_from_db.apiReports", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 66, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 44, "usage_type": "call"}, {"api_name": "app.app", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "read_data_from_db.apiReports", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 101, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 101, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "app.app.route", "line_number": 71, "usage_type": "call"}, {"api_name": "app.app", "line_number": 71, "usage_type": "name"}]} +{"seq_id": "32193966929", "text": "#!/usr/bin/env python\n\nimport xml.etree.ElementTree as ET\nimport build_common\nimport os\n\n# This class handles iterating over a set of Visual Studio build configurations\n# for one or more Visual Studio project solutions. For example:\n# for project in [ world_editor.sln, particle_editor.sln ]:\n# for config in [ \"Editor_Hybrid\", \"Editor_Debug\", \"Editor_Release\" ]\n# # generate (project, config)\nclass ProjectConfigIterator( object ):\n\n\tdef __init__( self, projects, configs, vsVersion ):\n\n\t\tif len( projects ) == 0:\n\t\t\traise ValueError( \"Invalid project list\" )\n\n\t\tif len( configs ) == 0:\n\t\t\traise ValueError( \"Invalid config list\" )\n\n\t\tself.projects = projects\n\t\tself.configs = configs\n\n\t\tself.vsVersion = vsVersion\n\n\t\tself.currProjIndex = 0\n\t\tself.currConfIndex = 0\n\n\n\tdef __iter__( self ):\n\t\treturn self\n\n\n\tdef _incCounters( self ):\n\t\tself.currConfIndex += 1\n\n\n\tdef next( self ):\n\t\tisEndOfProjects = self.currProjIndex >= len( self.projects )\n\t\tisEndOfConfigs = self.currConfIndex >= len( self.configs )\n\n\t\tif isEndOfProjects and isEndOfConfigs:\n\t\t\traise StopIteration\n\n\t\tif isEndOfConfigs:\n\t\t\tself.currProjIndex += 1\n\t\t\tself.currConfIndex = 0\n\n\t\t\tif self.currProjIndex >= len( self.projects ):\n\t\t\t\traise StopIteration\n\n\t\t# Perform a replacement on the project string with the Visual Studio\n\t\t# version (if required)\n\t\tprojectString = self.projects[ self.currProjIndex ].replace( \"%(vsVersion)s\", self.vsVersion )\n\t\tretData = (projectString, self.configs[ self.currConfIndex ])\n\n\t\tself._incCounters()\n\n\t\treturn retData\n\n###############################\n# Main module accessor methods\ndef generateProjectConfig( vsVersion, buildType, xmlElement ):\n\t\n\tif xmlElement is None:\n\t\treturn None\n\t\t\n\tisContinuousBuild = build_common.isContinuousBuild( buildType )\n\tpathList = []\n\tconfigList = []\n\titemPaths = xmlElement.find( \"paths\" )\n\tif itemPaths != None:\n\t\tpathList = [ os.path.normpath( path.text.strip() ) for path in itemPaths.findall( \"path\" ) ]\n\t\t\n\tconfigs = xmlElement.find( \"build_configs\" )\n\tif configs != None:\n\t\tif isContinuousBuild:\n\t\t\tfor config in configs.findall( \"config\" ):\n\t\t\t\tif 'continuous_build' in config.attrib and \\\n\t\t\t\t\tconfig.attrib['continuous_build'] == \"True\":\n\t\t\t\t\tconfigList.append( config.text.strip() )\n\t\telse:\n\t\t\tconfigList = [ config.text.strip() for config in configs.findall( \"config\" ) ]\n\n\treturn ProjectConfigIterator( pathList, configList, vsVersion )\n\t\t\t\n\t\ndef getProjects( vsVersion, buildType, projectConfigPath, tag = None ):\n\tprojectList = []\n\t\n\tdoc = ET.parse( projectConfigPath )\n\troot = doc.getroot()\n\tchildrenList = root.getchildren()\n\tfor item in childrenList:\n\t\tif tag != None:\n\t\t\tif tag == item.tag:\n\t\t\t\tprojectList.append( generateProjectConfig( vsVersion, buildType, item ) )\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprojectList.append( generateProjectConfig( vsVersion, buildType, item ) )\n\t\t\n\treturn projectList\n\n# project_manager.py\n", "repo_name": "v2v3v4/BigWorld-Engine-14.4.1", "sub_path": "programming/bigworld/build/bw_internal/scripts/project_manager.py", "file_name": "project_manager.py", "file_ext": "py", "file_size_in_byte": 2884, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 79, "dataset": "github-code", "pt": "52", "api": [{"api_name": "build_common.isContinuousBuild", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 92, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "70317187685", "text": "\"\"\"\nProject Name: People Sim\nFile Name: Buildings.py\nAuthor: Lex Hall\nLast Updated: 11-13-2018\nPython Version: 3.6\nPygame Version: 1.9.3\n\"\"\"\n\nimport pygame\nfrom random import *\nimport Constants as const\n\nclass VillageBuilding(object):\n def __init__(self, originXGrid, originYGrid):\n self.originXGrid = originXGrid\n self.originYGrid = originYGrid\n self.originXActual = self.originXGrid * const.PIXELSIZE - 5\n self.originYActual = self.originYGrid * const.PIXELSIZE - 5\n self.gridWidth = 10\n self.gridHeight = 10\n self.rect = pygame.Rect(self.originXActual, self.originYActual,\n self.gridWidth * const.PIXELSIZE, self.gridHeight * const.PIXELSIZE)\n self.buildingProgress = 0\n self.isComplete = False\n self.requiredWood = 50\n self.currentWood = 0\n self.buildLocationCount = -1\n\n def update(self, mapController):\n self.buildingProgress = (self.currentWood * 100) / self.requiredWood\n if self.buildingProgress >= 100:\n self.isComplete = True\n\n def draw(self, gameMap):\n if not self.isComplete:\n pygame.draw.rect(gameMap, const.BROWN, self.rect, 3)\n pygame.draw.rect(gameMap, const.BROWN, (self.originXActual, self.originYActual + 100 - self.buildingProgress,\n self.gridWidth * const.PIXELSIZE, self.buildingProgress))\n else:\n pygame.draw.rect(gameMap, const.DARKBROWN, self.rect)\n\n def getBuildLocation(self):\n return(self.originXGrid + 5, self.originYGrid + 5)\n\n def inputWood(self):\n self.currentWood += 1\n\n\nclass FarmHouse(VillageBuilding):\n\n def update(self, mapController):\n VillageBuilding.update(self, mapController)\n if self.isComplete:\n if randint(0,10000) == 1:\n newFood = self.getBuildLocation()\n if randint(0,1) == 0:\n newFoodX = newFood[0] + randint(5,20)\n else:\n newFoodX = newFood[0] - randint(5,20)\n if randint(0,1) == 0:\n newFoodY = newFood[1] + randint(5,20)\n else:\n newFoodY = newFood[1] - randint(5,20)\n mapController.spawnFood(newFoodX, newFoodY)\n\n def draw(self, gameMap):\n if not self.isComplete:\n pygame.draw.rect(gameMap, const.YELLOW, self.rect, 3)\n pygame.draw.rect(gameMap, const.YELLOW, (self.originXActual, self.originYActual + 100 - self.buildingProgress,\n self.gridWidth * const.PIXELSIZE, self.buildingProgress))\n else:\n pygame.draw.rect(gameMap, const.DARKYELLOW, self.rect)", "repo_name": "AnubisAbydos/People-Sim", "sub_path": "People Sim/Buildings.py", "file_name": "Buildings.py", "file_ext": "py", "file_size_in_byte": 2766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Constants.PIXELSIZE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Constants.PIXELSIZE", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 22, "usage_type": "call"}, {"api_name": "Constants.PIXELSIZE", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 37, "usage_type": "attribute"}, {"api_name": "Constants.BROWN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Constants.BROWN", "line_number": 38, "usage_type": "attribute"}, {"api_name": "Constants.PIXELSIZE", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 41, "usage_type": "attribute"}, {"api_name": "Constants.DARKBROWN", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 69, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 69, "usage_type": "attribute"}, {"api_name": "Constants.YELLOW", "line_number": 69, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Constants.YELLOW", "line_number": 70, "usage_type": "attribute"}, {"api_name": "Constants.PIXELSIZE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 73, "usage_type": "attribute"}, {"api_name": "Constants.DARKYELLOW", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "2051761039", "text": "from django.shortcuts import render\nfrom .models import Site\nfrom .forms import SiteForm\n\n# Create your views here.\n\ndef ajouter_site(request):\n if request.method == \"POST\":\n form = SiteForm(request.POST)\n if form.is_valid():\n site = form.save(commit=False)\n site.save()\n form = SiteForm()\n return render(request, 'site_web/ajouter_siteh.tml', {'form': form}) \n else:\n form = SiteForm()\n return render(request, 'site_web/ajouter_site.html', {'form': form})", "repo_name": "cberdaguer/table_numerique", "sub_path": "Table-Tactile/table_numerique/site_web/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "forms.SiteForm", "line_number": 9, "usage_type": "call"}, {"api_name": "forms.SiteForm", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "forms.SiteForm", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "15857626520", "text": "from tifffile import TiffFile, TiffWriter, TiffTag\nimport cv2\nimport numpy as np\nimport tifffile as tif\n\nfrom finders import Finder\nfrom model.location import Location\n\n\nclass ImageManager:\n \"\"\"\n Loads image and finds runs a finder in the image data and allows you to save the results.\n \"\"\"\n\n def __init__(self, filepath: str):\n \"\"\"\n initialses Mammal Finder\n\n filepath - filepath of the image to process\n \"\"\"\n self.filetype: str = filepath[len(filepath) - 3:].upper()\n self.tags = None\n self.locations: [Location] = None\n self.intermediaryImage = None\n self.outlined = None\n if self.filetype == 'TIF':\n print('found tif')\n with TiffFile(filepath) as tif:\n # fileInfo(tif)\n self.tags = metadataGeoTags(tif)\n self.image = tif.asarray()\n elif self.filetype == 'PNG' or self.filetype == 'JPG':\n print('found png')\n self.image = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)\n else:\n print('invalid file type:', self.filetype)\n\n def singleLayerFind(self, method: Finder, layer: int = 0):\n \"\"\"\n find the the mammel in the Image\n\n method - the finder to find the mammal\n layer - the layer or sample of the image to use.\n \"\"\"\n prepared_image = None\n if len(self.image.shape) > 2: # atleast 3 dimesions in array e.g 100,100,5\n if self.image.shape[0] > self.image.shape[2]: # one image array with multi samples e.g 100,100,5\n if self.image.shape[2] > layer: # check sample that exists\n prepared_image = self.image[:, :, layer]\n else:\n print('sample:', layer, ' out of bounds:', self.image.shape[2],\n 'from the following multi sample image', self.image.shape)\n return\n elif self.image.shape[0] < self.image.shape[2]: # image with more than one layer e.g 5,100,100\n if self.image.shape[0] > layer: # check layer exisits\n prepared_image = self.image[layer]\n else:\n print('layer:', layer, ' out of bounds:', self.image.shape[0],\n 'from the following multi layer image', self.image.shape)\n return\n else:\n print('Unrecognised dimesnsions:', self.image.shape)\n elif len(self.image.shape) == 2: # basic 2 dimesional array\n prepared_image = self.image\n else:\n print('invalid dimensions in image', self.image.shape)\n return\n\n if prepared_image is None:\n print('something went wrong')\n\n self.locations, self.intermediaryImage = method.findInImage(prepared_image)\n return self.locations, self.intermediaryImage\n\n def combineToSingleLayer(self,layers=[1.]):\n prepared_image = None\n if self.image.shape[0] < len(layers) or self.image.shape[2] < len(\n layers): # check number of layers provided is less than or equal to the number of layers in image\n print(\"too many layers given\")\n return -1\n count = 0.\n for l in layers:\n count = count + l\n if 1. < count < 0.9999:\n print(\"layers do not sum to one:\", layers, \"sum:\", count)\n return -1\n for c, l in enumerate(layers):\n if self.image.shape[0] > self.image.shape[2]:\n if prepared_image is None:\n prepared_image = l * self.image[:, :, c]\n else:\n prepared_image = prepared_image + l * self.image[:, :, c]\n else:\n if prepared_image is None:\n prepared_image = l * self.image[c]\n else:\n prepared_image = prepared_image + l * self.image[c]\n return prepared_image\n\n def combinedSingleLayerFind(self, method: Finder, layers=[1.]):\n prepared_image = None\n if len(self.image.shape) > 2: # check number of dimensions in image\n prepared_image = self.combineToSingleLayer(layers)\n if prepared_image == -1:\n return\n else:\n prepared_image = self.image\n\n self.locations, self.intermediaryImage = method.findInImage(prepared_image)\n return self.locations, self.intermediaryImage\n\n def multiLayerFind(self, method: Finder):\n variance = 10\n if len(self.image.shape) > 2:\n results = []\n for i in range(0, min(self.image.shape[0], self.image.shape[2])):\n if self.image.shape[0] > self.image.shape[2]:\n prepared_image = self.image[:, :, i]\n else:\n prepared_image = self.image[i]\n locations, self.intermediaryImage = method.findInImage(\n prepared_image) # only store last intermediaryImage\n print(locations)\n for l in locations:\n found_pair = False\n for r in results:\n if r.coords[0] + variance > l.coords[0] > r.coords[0] - variance and r.coords[1] + variance > \\\n l.coords[1] > r.coords[1] - variance:\n r.detected = r.detected + 1\n found_pair = True\n break\n if not found_pair:\n results.append(l)\n\n self.locations = filter(lambda x: x.detected > 1, results)\n else:\n self.locations, self.intermediaryImage = method.findInImage(self.image)\n\n return self.locations, self.intermediaryImage\n\n def outline_mammal(self, baseImage: str = 'blank', padding=30):\n \"\"\"\n based on coordinates provided outline the sheep in the image\n\n baseImage - one of {'blank','original','rgb'}\n padding - default 30px padding around a mammal when outlined\n\n returns - numpy array of base image with rectangles highlighting the mammals found mammels.\n\n \"\"\"\n if self.locations is None:\n print('no locations yet please run find first')\n return\n\n if baseImage == 'blank':\n outlined = np.zeros(self.intermediaryImage.shape)\n elif baseImage == 'original':\n outlined = self.image\n elif baseImage == 'rgb':\n if len(self.image.shape) > 2 and self.image.shape[0] > self.image.shape[2] >= 3:\n r = self.image[:, :, 0]\n g = self.image[:, :, 1]\n b = self.image[:, :, 2]\n outlined = cv2.merge((r,g,b))\n else:\n print('probably already rgb')\n return\n else:\n print('not a valid baseImage type:', baseImage, '. one of {\\'blank\\',\\'original\\'}')\n return\n\n if outlined is None:\n print(\"errror\")\n return\n print(outlined.shape)\n\n for location in self.locations:\n center = location.coords\n size = location.size\n if size is None:\n size = (25, 25)\n\n outlined = cv2.rectangle(\n outlined,\n (center[1] - round(size[1] / 2) - padding, center[0] - round(size[0] / 2) - padding),\n (center[1] + round(size[1] / 2) + padding, center[0] + round(size[0] / 2) + padding),\n 255,\n 3\n )\n self.outlined = outlined\n return outlined\n\n def saveIntermidiary(self, filepath: str):\n \"\"\"\n saves the intermidiary file to system\n\n filepath - the location to save the image to.\n \"\"\"\n if self.intermediaryImage is None:\n print('No intermidiary image, try run find first')\n return\n self.save(filepath, self.intermediaryImage)\n\n def saveOutlined(self, filepath: str):\n \"\"\"\n saves the outlined sheep image file to system\n\n filepath - the location to save the image to.\n \"\"\"\n\n if self.outlined is None:\n print('No intermidiary image, try run find first')\n return\n self.save(filepath, self.outlined)\n\n def save(self, filepath: str, image):\n if image is None:\n print('No image to save')\n if filepath is None:\n print('No file specified')\n\n saveType = filepath[len(filepath) - 3:].upper()\n if saveType == 'TIF':\n with TiffWriter(filepath, bigtiff=True) as tifw:\n if self.tags is None:\n print('btw there\\'s no tif tags being saved to this image')\n tifw.save(image)\n else:\n tifw.save(image, extratags=self.tags)\n elif saveType == 'PNG' or saveType == 'JPG':\n cv2.imwrite(filepath, image)\n print('saved', filepath)\n else:\n print('file type not handled:', saveType,\n '. try one of {\\'.tif\\',\\'.png\\',\\'.jpg\\'} at end of the filepath')\n\n def extract_sheep(self, folder: str):\n size = 10\n if self.locations is None:\n print(\"No locations\")\n return\n for location in self.locations:\n sheep = self.image[location.coords[0] - int(location.size[0]) - size:location.coords[0] + size,\n location.coords[1] - int(location.size[1]) - size:location.coords[1] + size\n , :]\n\n tif.imwrite(folder + \"/sheep_\" + str(location.coords) + \".tif\", sheep, photometric='rgb')\n\n\ndef fileInfo(tif: TiffFile):\n \"\"\"\n prints out useful tiff info\n \"\"\"\n print(tif.flags)\n print(tif.geotiff_metadata)\n for page in tif.pages:\n print(page.tags)\n print(page.geotiff_tags)\n print(page.shape)\n print(page.dtype)\n print(page.flags)\n\n\ndef metadataGeoTags(tif: TiffFile):\n \"\"\"\n extracts the useful geo tags from the tiff file\n \"\"\"\n geoTag: TiffTag = tif.pages[0].tags.get('GeoKeyDirectoryTag')\n if geoTag is not None:\n g: TiffTag = tif.pages[0].tags.get(34737)\n g2: TiffTag = tif.pages[0].tags.get(34736)\n g3: TiffTag = tif.pages[0].tags.get(33922)\n g4: TiffTag = tif.pages[0].tags.get(33550)\n\n tags = [(geoTag.code, 'H', geoTag.count, geoTag.value),\n (g.code, 's', g.count, g.value),\n (g2.code, 'd', g2.count, g2.value),\n (g3.code, 'd', g3.count, g3.value),\n (g4.code, 'd', g4.count, g4.value)]\n return tags\n else:\n print('no geo tags in file')\n", "repo_name": "IdrisTheDragon/wherearemysheep", "sub_path": "imageManager.py", "file_name": "imageManager.py", "file_ext": "py", "file_size_in_byte": 10706, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "model.location.Location", "line_number": 23, "usage_type": "name"}, {"api_name": "tifffile.TiffFile", "line_number": 28, "usage_type": "call"}, {"api_name": "tifffile.asarray", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 34, "usage_type": "attribute"}, {"api_name": "finders.Finder", "line_number": 38, "usage_type": "name"}, {"api_name": "finders.Finder", "line_number": 100, "usage_type": "name"}, {"api_name": "finders.Finder", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.merge", "line_number": 164, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 183, "usage_type": "call"}, {"api_name": "tifffile.TiffWriter", "line_number": 224, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 231, "usage_type": "call"}, {"api_name": "tifffile.imwrite", "line_number": 247, "usage_type": "call"}, {"api_name": "tifffile.TiffFile", "line_number": 250, "usage_type": "name"}, {"api_name": "tifffile.flags", "line_number": 254, "usage_type": "attribute"}, {"api_name": "tifffile.geotiff_metadata", "line_number": 255, "usage_type": "attribute"}, {"api_name": "tifffile.pages", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tifffile.TiffFile", "line_number": 264, "usage_type": "name"}, {"api_name": "tifffile.TiffTag", "line_number": 268, "usage_type": "name"}, {"api_name": "tifffile.pages", "line_number": 268, "usage_type": "attribute"}, {"api_name": "tifffile.TiffTag", "line_number": 270, "usage_type": "name"}, {"api_name": "tifffile.pages", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tifffile.TiffTag", "line_number": 271, "usage_type": "name"}, {"api_name": "tifffile.pages", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tifffile.TiffTag", "line_number": 272, "usage_type": "name"}, {"api_name": "tifffile.pages", "line_number": 272, "usage_type": "attribute"}, {"api_name": "tifffile.TiffTag", "line_number": 273, "usage_type": "name"}, {"api_name": "tifffile.pages", "line_number": 273, "usage_type": "attribute"}]} +{"seq_id": "15083721590", "text": "import sys\nfrom typing import Optional, List, Any\n\nimport pygame\n\nfrom pythongame.core.common import Millis, SceneTransition, AbstractScene\nfrom pythongame.core.game_data import ENTITY_SPRITE_INITIALIZERS, \\\n UI_ICON_SPRITE_PATHS, PORTRAIT_ICON_SPRITE_PATHS\nfrom pythongame.core.game_state import GameState\nfrom pythongame.core.sound_player import init_sound_player\nfrom pythongame.core.view.game_world_view import GameWorldView\nfrom pythongame.core.view.image_loading import load_images_by_sprite, \\\n load_images_by_ui_sprite, load_images_by_portrait_sprite\nfrom pythongame.core.world_behavior import AbstractWorldBehavior\nfrom pythongame.player_file import SaveFileHandler\nfrom pythongame.register_game_data import register_all_game_data\nfrom pythongame.scene_challenge_complete_screen.scene_challenge_complete_screen import ChallengeCompleteScreenScene\nfrom pythongame.scene_creating_world.scene_creating_world import CreatingWorldScene, InitFlags\nfrom pythongame.scene_main_menu.scene_main_menu import MainMenuScene\nfrom pythongame.scene_main_menu.view_main_menu import MainMenuView\nfrom pythongame.scene_picking_hero.scene_picking_hero import PickingHeroScene\nfrom pythongame.scene_picking_hero.view_picking_hero import PickingHeroView\nfrom pythongame.scene_starting_program.scene_starting_program import CommandlineFlags, StartingProgramScene\nfrom pythongame.scene_victory_screen.scene_victory_screen import VictoryScreenScene\nfrom pythongame.scenes_game.game_engine import GameEngine\nfrom pythongame.scenes_game.game_ui_view import GameUiView, UI_ICON_SIZE, PORTRAIT_ICON_SIZE, UI_ICON_BIG_SIZE\nfrom pythongame.scenes_game.scene_playing import PlayingScene\n\nABILITY_KEY_LABELS = [\"Q\", \"W\", \"E\", \"R\", \"T\"]\nSCREEN_SIZE = (800, 600) # If this is not a supported resolution, performance takes a big hit\nCAMERA_SIZE = (800, 430)\n\nregister_all_game_data()\n\n\nclass Main:\n def __init__(self, map_file_name: Optional[str], chosen_hero_id: Optional[str], hero_start_level: Optional[int],\n start_money: Optional[int]):\n\n cmd_flags = CommandlineFlags(map_file_name, chosen_hero_id, hero_start_level, start_money)\n\n pygame.init()\n\n print(\"Available display modes: \" + str(pygame.display.list_modes()))\n\n self.fullscreen = False # TODO\n self.pygame_screen = self.setup_screen()\n images_by_sprite = load_images_by_sprite(ENTITY_SPRITE_INITIALIZERS)\n images_by_ui_sprite = load_images_by_ui_sprite(UI_ICON_SPRITE_PATHS, UI_ICON_SIZE)\n big_images_by_ui_sprite = load_images_by_ui_sprite(UI_ICON_SPRITE_PATHS, UI_ICON_BIG_SIZE)\n self.images_by_portrait_sprite = load_images_by_portrait_sprite(PORTRAIT_ICON_SPRITE_PATHS, PORTRAIT_ICON_SIZE)\n self.world_view = GameWorldView(self.pygame_screen, CAMERA_SIZE, SCREEN_SIZE, images_by_sprite)\n self.ui_view = GameUiView(\n self.pygame_screen, CAMERA_SIZE, SCREEN_SIZE, images_by_ui_sprite,\n big_images_by_ui_sprite, self.images_by_portrait_sprite, ABILITY_KEY_LABELS)\n self.save_file_handler = SaveFileHandler()\n init_sound_player()\n self.clock = pygame.time.Clock()\n\n self.scene: AbstractScene = StartingProgramScene(\n self.main_menu_scene, self.creating_world_scene, self.picking_hero_scene, cmd_flags, self.save_file_handler)\n\n def main_loop(self):\n while True:\n self.clock.tick()\n time_passed = Millis(self.clock.get_time())\n fps_string = str(int(self.clock.get_fps()))\n self.ui_view.update_fps_string(fps_string)\n\n input_events: List[Any] = pygame.event.get()\n for event in input_events:\n if event.type == pygame.QUIT:\n self.quit_game()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE and self.fullscreen:\n self.toggle_fullscreen()\n self.ui_view.on_fullscreen_changed(self.fullscreen)\n\n transition: Optional[SceneTransition] = self.scene.handle_user_input(input_events)\n if transition:\n self.change_scene(transition)\n continue\n\n transition: Optional[SceneTransition] = self.scene.run_one_frame(time_passed)\n if transition:\n self.change_scene(transition)\n continue\n\n self.scene.render()\n pygame.display.update()\n\n def toggle_fullscreen(self):\n self.fullscreen = not self.fullscreen\n self.pygame_screen = self.setup_screen()\n\n def setup_screen(self):\n flags = pygame.DOUBLEBUF\n if self.fullscreen:\n flags = flags | pygame.FULLSCREEN | pygame.HWSURFACE\n return pygame.display.set_mode(SCREEN_SIZE, flags)\n\n @staticmethod\n def quit_game():\n pygame.quit()\n sys.exit()\n\n def change_scene(self, scene_transition: SceneTransition):\n self.scene = scene_transition.scene\n self.scene.on_enter()\n\n def main_menu_scene(self, flags: InitFlags):\n view = MainMenuView(self.pygame_screen, self.images_by_portrait_sprite)\n return MainMenuScene(\n self.save_file_handler, self.picking_hero_scene, self.creating_world_scene, flags, view)\n\n def creating_world_scene(self, flags: InitFlags):\n return CreatingWorldScene(self.playing_scene, self.picking_hero_scene, self.challenge_complete_scene,\n self.victory_screen_scene, CAMERA_SIZE, self.ui_view, flags)\n\n def picking_hero_scene(self, init_flags: InitFlags):\n view = PickingHeroView(self.pygame_screen, self.images_by_portrait_sprite)\n return PickingHeroScene(self.creating_world_scene, view, init_flags)\n\n def playing_scene(\n self, game_state: GameState, game_engine: GameEngine, world_behavior: AbstractWorldBehavior,\n ui_view: GameUiView, new_hero_was_created: bool, character_file: Optional[str],\n total_time_played_on_character: Millis):\n return PlayingScene(\n self.world_view, game_state, game_engine, world_behavior, ui_view, new_hero_was_created,\n character_file, self.save_file_handler, total_time_played_on_character, self.toggle_fullscreen)\n\n def challenge_complete_scene(self, total_time_played: Millis):\n return ChallengeCompleteScreenScene(self.pygame_screen, total_time_played)\n\n def victory_screen_scene(self):\n return VictoryScreenScene(self.pygame_screen)\n\n\ndef start(map_file_name: Optional[str], chosen_hero_id: Optional[str], hero_start_level: Optional[int],\n start_money: Optional[int]):\n main = Main(map_file_name, chosen_hero_id, hero_start_level, start_money)\n main.main_loop()\n", "repo_name": "risooonho/python-2d-game", "sub_path": "pythongame/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "pythongame.register_game_data.register_all_game_data", "line_number": 33, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "pythongame.scene_starting_program.scene_starting_program.CommandlineFlags", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display.list_modes", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pythongame.core.view.image_loading.load_images_by_sprite", "line_number": 48, "usage_type": "call"}, {"api_name": "pythongame.core.game_data.ENTITY_SPRITE_INITIALIZERS", "line_number": 48, "usage_type": "argument"}, {"api_name": "pythongame.core.view.image_loading.load_images_by_ui_sprite", "line_number": 49, "usage_type": "call"}, {"api_name": "pythongame.core.game_data.UI_ICON_SPRITE_PATHS", "line_number": 49, "usage_type": "argument"}, {"api_name": "pythongame.scenes_game.game_ui_view.UI_ICON_SIZE", "line_number": 49, "usage_type": "argument"}, {"api_name": "pythongame.core.view.image_loading.load_images_by_ui_sprite", "line_number": 50, "usage_type": "call"}, {"api_name": "pythongame.core.game_data.UI_ICON_SPRITE_PATHS", "line_number": 50, "usage_type": "argument"}, {"api_name": "pythongame.scenes_game.game_ui_view.UI_ICON_BIG_SIZE", "line_number": 50, "usage_type": "argument"}, {"api_name": "pythongame.core.view.image_loading.load_images_by_portrait_sprite", "line_number": 51, "usage_type": "call"}, {"api_name": "pythongame.core.game_data.PORTRAIT_ICON_SPRITE_PATHS", "line_number": 51, "usage_type": "argument"}, {"api_name": "pythongame.scenes_game.game_ui_view.PORTRAIT_ICON_SIZE", "line_number": 51, "usage_type": "argument"}, {"api_name": "pythongame.core.view.game_world_view.GameWorldView", "line_number": 52, "usage_type": "call"}, {"api_name": "pythongame.scenes_game.game_ui_view.GameUiView", "line_number": 53, "usage_type": "call"}, {"api_name": "pythongame.player_file.SaveFileHandler", "line_number": 56, "usage_type": "call"}, {"api_name": "pythongame.core.sound_player.init_sound_player", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pythongame.core.common.AbstractScene", "line_number": 60, "usage_type": "name"}, {"api_name": "pythongame.scene_starting_program.scene_starting_program.StartingProgramScene", "line_number": 60, "usage_type": "call"}, {"api_name": "pythongame.core.common.Millis", "line_number": 66, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 70, "usage_type": "name"}, {"api_name": "pygame.event.get", "line_number": 70, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 74, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 74, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 78, "usage_type": "name"}, {"api_name": "pythongame.core.common.SceneTransition", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 83, "usage_type": "name"}, {"api_name": "pythongame.core.common.SceneTransition", "line_number": 83, "usage_type": "name"}, {"api_name": "pygame.display.update", "line_number": 89, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pygame.DOUBLEBUF", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pygame.FULLSCREEN", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 99, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 104, "usage_type": "call"}, {"api_name": "pythongame.core.common.SceneTransition", "line_number": 106, "usage_type": "name"}, {"api_name": "pythongame.scene_creating_world.scene_creating_world.InitFlags", "line_number": 110, "usage_type": "name"}, {"api_name": "pythongame.scene_main_menu.view_main_menu.MainMenuView", "line_number": 111, "usage_type": "call"}, {"api_name": "pythongame.scene_main_menu.scene_main_menu.MainMenuScene", "line_number": 112, "usage_type": "call"}, {"api_name": "pythongame.scene_creating_world.scene_creating_world.InitFlags", "line_number": 115, "usage_type": "name"}, {"api_name": "pythongame.scene_creating_world.scene_creating_world.CreatingWorldScene", "line_number": 116, "usage_type": "call"}, {"api_name": "pythongame.scene_creating_world.scene_creating_world.InitFlags", "line_number": 119, "usage_type": "name"}, {"api_name": "pythongame.scene_picking_hero.view_picking_hero.PickingHeroView", "line_number": 120, "usage_type": "call"}, {"api_name": "pythongame.scene_picking_hero.scene_picking_hero.PickingHeroScene", "line_number": 121, "usage_type": "call"}, {"api_name": "pythongame.core.game_state.GameState", "line_number": 124, "usage_type": "name"}, {"api_name": "pythongame.scenes_game.game_engine.GameEngine", "line_number": 124, "usage_type": "name"}, {"api_name": "pythongame.core.world_behavior.AbstractWorldBehavior", "line_number": 124, "usage_type": "name"}, {"api_name": "pythongame.scenes_game.game_ui_view.GameUiView", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 125, "usage_type": "name"}, {"api_name": "pythongame.core.common.Millis", "line_number": 126, "usage_type": "name"}, {"api_name": "pythongame.scenes_game.scene_playing.PlayingScene", "line_number": 127, "usage_type": "call"}, {"api_name": "pythongame.core.common.Millis", "line_number": 131, "usage_type": "name"}, {"api_name": "pythongame.scene_challenge_complete_screen.scene_challenge_complete_screen.ChallengeCompleteScreenScene", "line_number": 132, "usage_type": "call"}, {"api_name": "pythongame.scene_victory_screen.scene_victory_screen.VictoryScreenScene", "line_number": 135, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 138, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 139, "usage_type": "name"}]} +{"seq_id": "44899331887", "text": "# Implementation of a simple MLP network with one hidden layer. Tested on the iris data set.\n# Requires: numpy, sklearn>=0.18.1, tensorflow>=1.0\n\n# NOTE: In order to make the code simple, we rewrite x * W_1 + b_1 = x' * W_1'\n# where x' = [x | 1] and W_1' is the matrix W_1 appended with a new row with elements b_1's.\n# Similarly, for h * W_2 + b_2\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\nRANDOM_SEED = 42\ntf.set_random_seed(RANDOM_SEED)\n\n\ndef init_weights(shape):\n \"\"\" Weight initialization \"\"\"\n weights = tf.random_normal(shape, stddev=0.1)\n return tf.Variable(weights)\n\n\ndef forwardprop(X, w_1, w_2):\n \"\"\"\n Forward-propagation.\n IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.\n \"\"\"\n h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \\sigma function\n yhat = tf.matmul(h, w_2) # The \\varphi function\n return yhat\n\n\ndef get_iris_data():\n \"\"\" Read the iris data set and split them into training and test sets \"\"\"\n iris = datasets.load_iris()\n data = iris[\"data\"]\n target = iris[\"target\"]\n\n # Prepend the column of 1s for bias\n N, M = data.shape\n all_X = np.ones((N, M + 1))\n all_X[:, 1:] = data\n\n # Convert into one-hot vectors\n num_labels = len(np.unique(target))\n all_Y = np.eye(num_labels)[target] # One liner trick!\n return train_test_split(all_X, all_Y, test_size=0.33, random_state=RANDOM_SEED)\n\n\ndef main():\n\n train_X, test_X, train_y, test_y = get_iris_data()\n\n print('vendo os dados da base > ')\n print('train x = ', len(train_X), ' <> ', train_X)\n print('train y = ', len(train_y), ' <> ', train_y)\n\n print('train x = ', len(test_X), ' <> ', test_X)\n print('test y = ', len(test_y), ' <> ', test_y)\n\n\n # Layer's sizes\n x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias\n h_size = 256 # Number of hidden nodes # 256\n y_size = train_y.shape[1] # Number of outcomes (3 iris flowers)\n\n # Symbols\n X = tf.placeholder(\"float\", shape=[None, x_size])\n y = tf.placeholder(\"float\", shape=[None, y_size])\n\n # Weight initializations\n w_1 = init_weights((x_size, h_size))\n w_2 = init_weights((h_size, y_size))\n\n # Forward propagation\n yhat = forwardprop(X, w_1, w_2)\n predict = tf.argmax(yhat, axis=1)\n\n # Backward propagation\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))\n updates = tf.train.GradientDescentOptimizer(0.01).minimize(cost)\n\n # Run SGD\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n\n for epoch in range(100):\n # Train with each example\n for i in range(len(train_X)):\n sess.run(updates, feed_dict={X: train_X[i: i + 1], y: train_y[i: i + 1]})\n\n train_accuracy = np.mean(np.argmax(train_y, axis=1) == sess.run(predict, feed_dict={X: train_X, y: train_y}))\n\n #test_accuracy = np.mean(np.argmax(test_y, axis=1) == sess.run(predict, feed_dict={X: test_X, y: test_y}))\n\n #print(\"Epoch = %d, train accuracy = %.2f%%, test accuracy = %.2f%%\" %(epoch + 1, (100. * train_accuracy), (100. * test_accuracy)))\n print(\"Epoch = %d, train accuracy = %.2f%%\" %(epoch + 1, (100. * train_accuracy)))\n\n sess.close()\n\n\ndef main_char():\n from data import loading_data_train\n\n matrix = loading_data_train()\n\n train_X = []\n train_y = []\n test_X = [0, 1, 3]\n test_y = [0, 1, 2]\n\n vetor_x = []\n\n for i in matrix:\n lis_tmp = []\n try:\n value = float((i[1]))\n train_y.append(int(i[0]))\n lis_tmp.append(value)\n train_X.append(lis_tmp)\n vetor_x.append((train_y, train_X))\n except:\n value = float((i[1]))\n lis_tmp.append(value)\n train_X.append(lis_tmp)\n train_y.append(int(ord(i[0])))\n\n\n\n print('vendo os dados da base > ')\n\n print('train x = ', len(train_X), ' <> ', train_X)\n print('train y = ', len(train_y), ' <> ', train_y)\n\n print('test x = ', len(test_X), ' <> ', test_X)\n print('test y = ', len(test_y), ' <> ', test_y)\n\n def func_quatra():\n import numpy as np\n import tensorflow as tf\n\n # Declare list of features, we only have one real-valued feature\n def model(features, labels, mode):\n # Build a linear model and predict values\n W = tf.get_variable(\"W\", [1], dtype=tf.float64)\n b = tf.get_variable(\"b\", [1], dtype=tf.float64)\n y = W * features['x'] + b\n # Loss sub-graph\n loss = tf.reduce_sum(tf.square(y - labels))\n # Training sub-graph\n global_step = tf.train.get_global_step()\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = tf.group(optimizer.minimize(loss),\n tf.assign_add(global_step, 1))\n # ModelFnOps connects subgraphs we built to the\n # appropriate functionality.\n return tf.contrib.learn.ModelFnOps(\n mode=mode, predictions=y,\n loss=loss,\n train_op=train)\n\n\n\n\n\n\nif __name__ == '__main__':\n main_char()", "repo_name": "Marcos001/SimpleNeuralNetwok", "sub_path": "tensorflow/b_simple_mlp_tensorflow.py", "file_name": "b_simple_mlp_tensorflow.py", "file_ext": "py", "file_size_in_byte": 5309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.nn.sigmoid", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 47, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 81, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 93, "usage_type": "call"}, {"api_name": "data.loading_data_train", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.float64", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.float64", "line_number": 147, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 150, "usage_type": "call"}, {"api_name": "tensorflow.train.get_global_step", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tensorflow.group", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.assign_add", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.contrib.learn.ModelFnOps", "line_number": 158, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 158, "usage_type": "attribute"}]} +{"seq_id": "8297631693", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport os.path\nimport sys\n\nimport numpy as np\nimport cv2\n\n# https://stackoverflow.com/a/11541450\n\n\ndef main(filename, outfile):\n img = cv2.imread(filename, cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 50, 150, apertureSize=3)\n lines = cv2.HoughLinesP(image=edges, rho=1, theta=np.pi / 180,\n threshold=100, lines=np.array([]),\n minLineLength=100, maxLineGap=80)\n lines = lines.reshape(lines.shape[0], lines.shape[2])\n vertical_lines = lines[np.abs(lines[:,2] - lines[:, 0]) < 5,:]\n horizontal_lines = lines[np.abs(lines[:,3] - lines[:, 1]) < 5,:]\n vertical_lines = vertical_lines[np.argsort(vertical_lines[:,0])]\n horizontal_lines = horizontal_lines[np.argsort(horizontal_lines[:,1])]\n\n x = vertical_lines[0, 0]\n y = horizontal_lines[0, 1]\n width = vertical_lines[8, 0] - x\n height = horizontal_lines[8, 1] - y\n print(x, y, width, height)\n\n cv2.imshow('edges', edges)\n cv2.imshow('img', img)\n while True:\n if cv2.waitKey(0) == ord('q'):\n return\n\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n def is_valid_file(parser, arg):\n if not os.path.exists(arg):\n parser.error(\"The file %s does not exist!\" % arg)\n else:\n return arg\n\n parser = argparse.ArgumentParser(\n description='Read a chess board from an image into FEN notation.')\n parser.add_argument(\"-i\", dest=\"filename\", required=True,\n help=\"Input image with chess board.\", metavar=\"\",\n type=lambda x: is_valid_file(parser, x))\n parser.add_argument(\"outfile\", nargs=\"?\", type=argparse.FileType(\"w\"),\n default=sys.stdout)\n args = parser.parse_args()\n main(args.filename, args.outfile)\n", "repo_name": "ebensh/chess_cv", "sub_path": "read_board.py", "file_name": "read_board.py", "file_ext": "py", "file_size_in_byte": 1804, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.Canny", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.HoughLinesP", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 42, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 47, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "36034931906", "text": "alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\ndef caesar(message, shift_amount, cipher_direction):\n end_text = \"\"\n if cipher_direction == \"decode\":\n shift_amount *= -1\n for letter in message:\n if letter in alphabet:\n position = alphabet.index(letter)\n new_position = position + shift_amount\n new_message = alphabet[new_position]\n end_text += new_message\n else:\n end_text += letter\n print(f\"Your {cipher_direction}d message is {end_text}\")\n \n \nfrom art import logo\nprint(logo)\nend_program = False\nwhile not end_program:\n direction = input(\"Enter 'decode to decrypt and 'encode' to encrypt.\\n\")\n text = input(\"Enter text message:\\n\")\n shift = int(input(\"Enter shift number:\\n\"))\n shift = shift % 26\n caesar(message=text, shift_amount=shift, cipher_direction=direction)\n restart = input(\"Try again(yes/no)\\n\")\n if restart == \"no\":\n end_program = True\n print(\"GoodBye!\")\n", "repo_name": "noobsixt9/30-days-of-python", "sub_path": "Day8/Caesar-Cipher/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1151, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "art.logo", "line_number": 19, "usage_type": "argument"}]} +{"seq_id": "32316300118", "text": "import datetime\nimport time\nimport pytz\n \n\n'''Time Formatting'''\nIST_TIMEZONE = pytz.timezone('Asia/Kolkata')\ndmY = \"%d-%m-%Y\"\nYmd = '%Y-%m-%d'\nIMp = \"%I:%M %p\"\nHMS = \"%H:%M:%S\"\nYmdHMS = \"%Y-%m-%d %H:%M:%S\"\ndmYHMS = \"%d-%m-%Y %H:%M:%S\"\nYmdTHMSf = \"%Y-%m-%dT%H:%M:%S.%f\"\nYmdHMSf = \"%Y-%m-%d %H:%M:%S.%f\"\nYmdTHMSfz = \"%Y-%m-%dT%H:%M:%S.%f%z\"\n\nclass TimeFormatException(Exception):\n \"\"\"\n UserDefined exception to hadle time format error\n\n \"\"\"\n def __int__(self):\n super().__init__(\"TimeFormatError...Time should be in HH:MM:SS format\")\n\n\n\ndef generate_current_date():\n\n timezone=pytz.timezone(\"Asia/Kolkata\")\n\n now_=datetime.datetime.now().astimezone(timezone)\n\n return now_\n\ndef update_availabilty(availabilty,current,days_int):\n \n now_=current\n dates_arr=[]\n for i in range(7):\n if now_.weekday() in days_int:\n availabilty[\"days\"][now_.weekday()][\"available\"]=True\n dates_arr.append(now_.strftime(\"%m/%d/%Y\"))\n availabilty[\"days\"][now_.weekday()][\"date\"]=now_.strftime(\"%m/%d/%Y\")\n \n now_=now_+datetime.timedelta(days=1)\n \n availabilty[\"days_arr\"]=days_int\n availabilty[\"dates_arr\"]=dates_arr\n \n return availabilty\n\ndef return_time_type(isotime):\n\n return datetime.time.fromisoformat(isotime)\n\n \ndef calculate_time_slots(start_time,end_time,duration,availabilty,time_slots=None):\n if time_slots:\n \n time_slots_arr=list(time_slots.keys())\n dates_arr=availabilty[\"dates_arr\"]\n\n final_=[]\n new_=[]\n old_=[]\n\n for i in dates_arr:\n if i in time_slots_arr:\n final_.append(i)\n old_.append(i)\n time_slots_arr.remove(i)\n else:\n final_.append(i)\n new_.append(i)\n \n \n slots_={}\n \n morning_={}\n afternoon_={}\n evening_={}\n slot_availabilty_={\n \"available\":True,\n \"count\":0\n }\n start_=datetime.date(1,1,1)\n end_=datetime.date(1,1,1)\n\n start_=datetime.datetime.combine(start_,start_time)\n end_=datetime.datetime.combine(end_,end_time)\n\n twelve_=datetime.datetime.combine(datetime.date(1,1,1),datetime.time(12,0,0))\n five_=datetime.datetime.combine(datetime.date(1,1,1),datetime.time(17,0,0))\n while start_\")\n return\n fname = argv[0]\n specPlot = initSpectrum( fname )\n\n # Tweak\n specPlot.voxelSize = 21.6\n fig, ax = specPlot.plot()\n fig2, ax2 = specPlot.plotDiff()\n fig3, ax3 = specPlot.scatteringCrossSection()\n try:\n fig4, ax4 = specPlot.reflection()\n except Exception as exc:\n print (str(exc))\n\n try:\n fig5, ax5 = specPlot.scatteringCrossSection( color=\"#e41a1c\", label=\"\\$\\sigma_s\\$\", asymmetry=True )\n except Exception as exc:\n print (str(exc))\n plt.show()\n\nif __name__ == \"__main__\":\n main( sys.argv[1:] )\n", "repo_name": "davidkleiven/OptiX", "sub_path": "Coccolith/Scripts/plotSpectrum.py", "file_name": "plotSpectrum.py", "file_ext": "py", "file_size_in_byte": 8168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "matplotlib.rcParams", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 5, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 6, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.log", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 143, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 214, "usage_type": "attribute"}]} +{"seq_id": "33041112579", "text": "\"\"\"\nStarts a Websocket server, with 3 datasets:\n MNIST\n eICU mortality classification\n eICU length of stay regression\n\nEach server keeps a subset of these dataset and has a teparate test set too.\n\"\"\"\nimport logging\n\nimport syft as sy\nfrom syft.workers import WebsocketServerWorker\n\nimport torch\nimport argparse\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom sklearn.preprocessing import RobustScaler\nimport numpy as np\nimport pandas as pd\n\n\ndef get_mnist_dataset(keep_labels, training=True):\n \"\"\"\n Sets up MNIST dataset for training or testing.\n \"\"\"\n mnist_dataset = datasets.MNIST(\n root=\"./data\",\n train=training,\n download=True,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n )\n\n # create mnist training\n indices = np.isin(mnist_dataset.targets, keep_labels).astype(\"uint8\")\n logger.info(\"number of true indices: %s\", indices.sum())\n selected_data = (\n torch.masked_select(\n mnist_dataset.data.transpose(0, 2),\n torch.tensor(indices)\n ).view(28, 28, -1).transpose(2, 0)\n )\n logger.info(\"after selection: %s\", selected_data.shape)\n selected_targets = torch.masked_select(\n mnist_dataset.targets,\n torch.tensor(indices)\n )\n\n return sy.BaseDataset(\n data=selected_data,\n targets=selected_targets,\n transform=mnist_dataset.transform\n )\n\n\ndef get_eicu_dataset(hospitalid, outcome):\n \"\"\"\n Sets up the eICU dataset for training or testing.\n \"\"\"\n df_x = pd.read_csv('x.csv')\n df_y = pd.read_csv('y.csv')\n\n # delete rows where the outcome is missing\n to_keep = ~(pd.isnull(df_y).sum(axis=1) > 0)\n df_x = df_x[to_keep]\n df_y = df_y[to_keep]\n\n # restrict x and y to the required hospital or test set\n to_keep = df_x.hospitalid.values == hospitalid\n df_x.drop('hospitalid', axis=1, inplace=True)\n df_x = df_x[to_keep]\n scaler = RobustScaler(quantile_range=(10.0, 90.0))\n x = scaler.fit_transform(df_x.values)\n y = df_y[outcome][to_keep].values\n\n return sy.BaseDataset(\n data=torch.from_numpy(x.astype('float32')),\n targets=torch.from_numpy(y.astype('float32'))\n )\n\n\ndef start_websocket_server_worker(id, host, port, hook, verbose, keep_labels=None):\n \"\"\"\n Helper function for spinning up a websocket server and setting up the local\n datasets: MNIST, eICU for classification and for regression.\n \"\"\"\n\n server = WebsocketServerWorker(\n id=id,\n host=host,\n port=port,\n hook=hook,\n verbose=verbose\n )\n\n # add mnist train & test\n server.add_dataset(\n get_mnist_dataset(keep_labels, training=True),\n key='mnist_train'\n )\n server.add_dataset(\n get_mnist_dataset(list(range(10)), training=False),\n key='mnist_test'\n )\n\n # add eicu train & test for classification\n id2hospitalid = {\n 'h1': 1,\n 'h2': 2,\n 'h3': 3,\n }\n server.add_dataset(\n get_eicu_dataset(hospitalid=id2hospitalid[id], outcome='hosp_mort'),\n key='eicu_class_train'\n )\n server.add_dataset(\n get_eicu_dataset(hospitalid=4, outcome='hosp_mort'),\n key='eicu_class_test'\n )\n\n # add eicu train & test for regression\n server.add_dataset(\n get_eicu_dataset(hospitalid=id2hospitalid[id], outcome='icu_los_hours'),\n key='eicu_reg_train'\n )\n server.add_dataset(\n get_eicu_dataset(hospitalid=4, outcome='icu_los_hours'),\n key='eicu_reg_test'\n )\n\n server.start()\n return server\n\n\nif __name__ == \"__main__\":\n # Logging setup\n logger = logging.getLogger(\"run_websocket_server\")\n FORMAT = (\"%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) \"\n \"- %(message)s\")\n logging.basicConfig(format=FORMAT)\n logger.setLevel(level=logging.DEBUG)\n\n # Parse args\n parser = argparse.ArgumentParser(description=\"Run websocket server worker.\")\n parser.add_argument(\n \"--port\",\n \"-p\",\n type=int,\n help=\"port number of the websocket server worker, e.g. --port 8777\",\n )\n parser.add_argument(\n \"--host\", type=str,\n default=\"localhost\",\n help=\"host for the connection\"\n )\n parser.add_argument(\n \"--id\",\n type=str,\n help=\"name (id) of the websocket server worker, e.g. --id hospital1\"\n )\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"if set, websocket server worker will be started in verbose mode\",\n )\n\n args = parser.parse_args()\n\n # define which hospital gets which mnist examples for training\n mnist_keep_labels = {\n \"h1\": [0, 1, 2, 3],\n \"h2\": [4, 5, 6],\n \"h3\": [7, 8, 9],\n }\n\n # Hook and start server\n hook = sy.TorchHook(torch)\n server = start_websocket_server_worker(\n id=args.id,\n host=args.host,\n port=args.port,\n hook=hook,\n verbose=args.verbose,\n keep_labels=mnist_keep_labels[args.id]\n )\n", "repo_name": "danielhomola/federated_ml_platform", "sub_path": "src/neoglia/workers/websocket_start.py", "file_name": "websocket_start.py", "file_ext": "py", "file_size_in_byte": 5144, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.datasets.MNIST", "line_number": 27, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 27, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 31, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 31, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 32, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 32, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.isin", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.masked_select", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 48, "usage_type": "call"}, {"api_name": "syft.BaseDataset", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.isnull", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.RobustScaler", "line_number": 74, "usage_type": "call"}, {"api_name": "syft.BaseDataset", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 80, "usage_type": "call"}, {"api_name": "syft.workers.WebsocketServerWorker", "line_number": 90, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 139, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 142, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 143, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 146, "usage_type": "call"}, {"api_name": "syft.TorchHook", "line_number": 180, "usage_type": "call"}]} +{"seq_id": "2812350452", "text": "from flask import Flask, jsonify, request\nfrom uuid import uuid4\n\n# Create a Flask web application\napp = Flask(__name__)\n\n# Create a unique node ID\nnode_id = str(uuid4()).replace('-', '')\nfrom blockchain import Blockchain\n# Import the Blockchain class from the blockchain module\n\n# Create a Blockchain instance\nblockchain = Blockchain()\n\n# Define a route for mining a new block\n@app.route('/mine', methods=['GET'])\ndef mine():\n last_block = blockchain.last_block()\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n# Create a new transaction for rewarding the miner (sender='0' represents the system)\n blockchain.new_transaction(\n sender='0',\n receiver=node_id,\n amount=1,\n )\n\n # Calculate the previous hash for the new block\n previous_hash = blockchain.hash(last_block)\n # Create a new block and add it to the blockchain\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message': \"New Block formed\",\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n }\n\n # Return the response as JSON with an HTTP status code 200 (OK)\n return jsonify(response), 200\n\n# Define a route for creating a new transaction\n@app.route('/transactions/new', methods=['POST'])\ndef new_transaction():\n # Get the transaction data from the request\n values = request.get_json()\n\n # Check if the required fields (sender, receiver, amount) are present\n required = ['sender', 'receiver', 'amount']\n if not all(k in values for k in required):\n return 'Missing values', 400\n\n index = blockchain.new_transaction(values['sender'], values['receiver'], values['amount'])\n response = {'message': f'Transaction will be added to Block {index}'}\n return jsonify(response), 201\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n # Get the current state of the blockchain and its length\n response = {\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain),\n }\n return jsonify(response), 200\n\n\n# Start the Flask app on host '0.0.0.0' (accessible from any IP) and port 5000\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n\n", "repo_name": "codeabuu/Simple_Blockchain", "sub_path": "BC_Api.py", "file_name": "BC_Api.py", "file_ext": "py", "file_size_in_byte": 2292, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 8, "usage_type": "call"}, {"api_name": "blockchain.Blockchain", "line_number": 13, "usage_type": "call"}, {"api_name": "blockchain.last_block", "line_number": 18, "usage_type": "call"}, {"api_name": "blockchain.proof_of_work", "line_number": 20, "usage_type": "call"}, {"api_name": "blockchain.new_transaction", "line_number": 23, "usage_type": "call"}, {"api_name": "blockchain.hash", "line_number": 30, "usage_type": "call"}, {"api_name": "blockchain.new_block", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "blockchain.new_transaction", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 58, "usage_type": "call"}, {"api_name": "blockchain.chain", "line_number": 64, "usage_type": "attribute"}, {"api_name": "blockchain.chain", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "34057870795", "text": "from __future__ import division, absolute_import\n\nfrom datetime import datetime\nfrom dateutil.tz import UTC\nfrom functools import wraps\nimport itertools\nimport random\n\nimport six\nimport click\n\n# Use numpy if available for fast decoding\ntry:\n import numpy as np\n _numpy = True\nexcept ImportError: # pragma: no cover\n _numpy = False\n\n__all__ = ['evaluate_ising', 'uniform_iterator', 'uniform_get',\n 'default_text_input', 'click_info_switch', 'datetime_to_timestamp']\n\n\ndef evaluate_ising(linear, quad, state):\n \"\"\"Calculate the energy of a state given the Hamiltonian.\n\n Args:\n linear: Linear Hamiltonian terms.\n quad: Quadratic Hamiltonian terms.\n state: Vector of spins describing the system state.\n\n Returns:\n Energy of the state evaluated by the given energy function.\n \"\"\"\n\n # If we were given a numpy array cast to list\n if _numpy and isinstance(state, np.ndarray):\n return evaluate_ising(linear, quad, state.tolist())\n\n # Accumulate the linear and quadratic values\n energy = 0.0\n for index, value in uniform_iterator(linear):\n energy += state[index] * value\n for (index_a, index_b), value in six.iteritems(quad):\n energy += value * state[index_a] * state[index_b]\n return energy\n\n\ndef active_qubits(linear, quadratic):\n \"\"\"Calculate a set of all active qubits. Qubit is \"active\" if it has\n bias or coupling attached.\n\n Args:\n linear (dict[variable, bias]/list[variable, bias]):\n Linear terms of the model.\n\n quadratic (dict[(variable, variable), bias]):\n Quadratic terms of the model.\n\n Returns:\n set:\n Active qubits' indices.\n \"\"\"\n\n active = {idx for idx,bias in uniform_iterator(linear)}\n for edge, _ in six.iteritems(quadratic):\n active.update(edge)\n return active\n\n\ndef generate_valid_random_problem(solver):\n \"\"\"Generates an Ising problem formulation valid for a particular solver,\n using all qubits and all couplings.\"\"\"\n\n h_range = solver.properties['h_range']\n j_range = solver.properties['j_range']\n\n lin = {qubit: random.uniform(*h_range) for qubit in solver.nodes}\n quad = {edge: random.uniform(*j_range) for edge in solver.undirected_edges}\n\n return lin, quad\n\n\ndef uniform_iterator(sequence):\n \"\"\"Uniform (key, value) iteration on a `dict`,\n or (idx, value) on a `list`.\"\"\"\n\n if isinstance(sequence, dict):\n return six.iteritems(sequence)\n else:\n return enumerate(sequence)\n\n\ndef uniform_get(sequence, index, default=None):\n \"\"\"Uniform `dict`/`list` item getter, where `index` is interpreted as a key\n for maps and as numeric index for lists.\"\"\"\n\n if isinstance(sequence, dict):\n return sequence.get(index, default)\n else:\n return sequence[index] if index < len(sequence) else default\n\n\ndef strip_head(sequence, values):\n \"\"\"Strips elements of `values` from the beginning of `sequence`.\"\"\"\n values = set(values)\n return list(itertools.dropwhile(lambda x: x in values, sequence))\n\n\ndef strip_tail(sequence, values):\n \"\"\"Strip `values` from the end of `sequence`.\"\"\"\n return list(reversed(list(strip_head(reversed(sequence), values))))\n\n\ndef default_text_input(prompt, default=None, optional=True):\n if default:\n prompt = \"{} [{}]: \".format(prompt, default)\n else:\n if optional:\n prompt = \"{} [skip]: \".format(prompt)\n else:\n prompt = \"{}: \".format(prompt)\n\n line = ''\n while not line:\n line = six.moves.input(prompt)\n if not line:\n line = default\n if not line:\n if optional:\n break\n click.echo(\"Input required, please try again.\")\n return line\n\n\ndef click_info_switch(f):\n \"\"\"Decorator to create eager Click info switch option, as described in:\n http://click.pocoo.org/6/options/#callbacks-and-eager-options.\n\n Takes a no-argument function and abstracts the boilerplate required by\n Click (value checking, exit on done).\n\n Example:\n\n @click.option('--my-option', is_flag=True, callback=my_option,\n expose_value=False, is_eager=True)\n def test():\n pass\n\n @click_info_switch\n def my_option()\n click.echo('some info related to my switch')\n \"\"\"\n\n @wraps(f)\n def wrapped(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n f()\n ctx.exit()\n return wrapped\n\n\ndef datetime_to_timestamp(dt):\n \"\"\"Convert timezone-aware `datetime` to POSIX timestamp and\n return seconds since UNIX epoch.\n\n Note: similar to `datetime.timestamp()` in Python 3.3+.\n \"\"\"\n\n epoch = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)\n return (dt - epoch).total_seconds()\n\n\ndef strtrunc(s, maxlen=60):\n s = str(s)\n return s[:(maxlen-3)]+'...' if len(s) > maxlen else s\n", "repo_name": "Elmistrana/dwave-cloud-client", "sub_path": "dwave/cloud/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 4922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.ndarray", "line_number": 36, "usage_type": "attribute"}, {"api_name": "six.iteritems", "line_number": 43, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 65, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 77, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 78, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 88, "usage_type": "call"}, {"api_name": "itertools.dropwhile", "line_number": 106, "usage_type": "call"}, {"api_name": "six.moves.input", "line_number": 125, "usage_type": "call"}, {"api_name": "six.moves", "line_number": 125, "usage_type": "attribute"}, {"api_name": "click.echo", "line_number": 131, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 170, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 170, "usage_type": "name"}, {"api_name": "dateutil.tz.UTC", "line_number": 170, "usage_type": "name"}]} +{"seq_id": "69897277597", "text": "\"\"\"Support for HWKettle.\"\"\"\nimport logging\nfrom .const import *\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import *\nimport homeassistant.helpers.event as ev\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom datetime import timedelta\nfrom .kettle_connection import KettleConnection\nimport json\n\n_LOGGER = logging.getLogger(__name__)\n\nPLATFORMS = [\n Platform.WATER_HEATER,\n Platform.SWITCH,\n Platform.NUMBER,\n Platform.SENSOR,\n]\n\n\nasync def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):\n \"\"\"Set up HWKettle integration from a config entry.\"\"\"\n if DOMAIN not in hass.data: hass.data[DOMAIN] = {}\n if entry.entry_id not in hass.data: hass.data[DOMAIN][entry.entry_id] = {}\n\n email = entry.data[CONF_EMAIL]\n password = entry.data[CONF_PASSWORD]\n kettle_id = entry.data[CONF_KETTLE_ID]\n\n kettle = KettleConnection(\n hass=hass,\n username=email,\n password=password,\n kettle_id=entry.data[CONF_KETTLE_ID],\n )\n await kettle.start_listening()\n\n hass.data[DOMAIN][entry.entry_id][DATA_CONNECTION] = kettle\n hass.data[DOMAIN][DATA_DEVICE_INFO] = lambda: device_info(entry)\n\n for component in PLATFORMS:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(entry, component)\n )\n\n return True\n\n\ndef device_info(entry):\n return DeviceInfo(\n name=(FRIENDLY_NAME + \" \" + entry.data.get(CONF_FRIENDLY_NAME, \"\")).strip(),\n manufacturer=MANUFACTORER,\n model=entry.data.get(CONF_KETTLE_ID, None),\n sw_version=\"?\",\n identifiers={\n (DOMAIN, entry.data[CONF_KETTLE_ID])\n },\n connections={\n (\"kettle_id\", entry.data[CONF_KETTLE_ID])\n }\n )\n\n", "repo_name": "lesleyxyz/hass-homewizard-kettle", "sub_path": "custom_components/homewizard_kitchen_kettle/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1889, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 24, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 24, "usage_type": "name"}, {"api_name": "kettle_connection.KettleConnection", "line_number": 33, "usage_type": "call"}, {"api_name": "homeassistant.helpers.entity.DeviceInfo", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "19161769751", "text": "import mxnet as mx\nfrom mxnet import gluon\nimport gluoncv as gcv\nfrom mxnet.gluon import nn\nimport numpy as np\nfrom feature import FPNFeatureExpander\nfrom mxnet import autograd\nfrom mxboard import SummaryWriter\n\n\nclass East(nn.HybridBlock):\n\n def __init__(self, base_model, outputs, text_scale=512, ctx=mx.cpu(), pretrained_base=True, **kwargs):\n super(East, self).__init__()\n self.text_scale = text_scale\n weight_init = mx.init.Xavier(factor_type=\"in\", magnitude=2.34)\n with self.name_scope():\n self.features = FPNFeatureExpander(network=base_model, outputs=outputs,pretrained=pretrained_base, ctx=ctx, **kwargs)\n\n self.score_branch = nn.Conv2D(1, 1, activation='sigmoid')\n self.geo_branch = nn.Conv2D(4, 1, activation='sigmoid')\n self.theta_branch = nn.Conv2D(1, 1, activation='sigmoid')\n\n def hybrid_forward(self, F, x, **kwargs):\n #x = F.Cast(x, dtype='float16')\n x = self.features(x)\n #x = F.Cast(x, dtype='float32')\n score_map = self.score_branch(x)\n geo_map = self.geo_branch(x) * self.text_scale\n\n angle_map = (self.theta_branch(x) - 0.5) * np.pi / 2.\n geometry_map = F.Concat(geo_map, angle_map, dim=1)\n\n return score_map, geometry_map\n\n\ndef get_east_resnet50(**kwargs):\n net = East(base_model='resnet50_v1d',\n outputs=['pool0_fwd','layers2_bottleneckv1b0__plus0','layers3_bottleneckv1b1__plus0' ,'layers4_relu8_fwd'], **kwargs) #layers4_bottleneckv1b2__plus0\n #outputs=['layers1_relu8_fwd', 'layers2_relu11_fwd', 'layers3_relu17_fwd', 'layers4_relu8_fwd'], **kwargs)\n\n return net\n\n\ndef get_east_mobilenet(**kwargs):\n net = East(base_model='mobilenetv2_1.0',\n outputs=['features_linearbottleneck3_relu60_relu6', 'features_linearbottleneck6_relu60_relu6',\n 'features_linearbottleneck9_relu60_relu6', 'features_linearbottleneck16_relu60_relu6'], **kwargs)\n return net\n\n\n_models = {\n 'resnet50': get_east_resnet50,\n 'mobilenet': get_east_mobilenet,\n}\n\n\ndef get_model(name, **kwargs):\n name = name.lower()\n if name not in _models:\n err_str = '\"%s\" is not among the following model list:\\n\\t' % (name)\n err_str += '%s' % ('\\n\\t'.join(sorted(_models.keys())))\n raise ValueError(err_str)\n net = _models[name](**kwargs)\n return net\n\n\nif __name__ == '__main__':\n net = get_model('resnet50', pretrained_base=True)\n net.hybridize()\n net.initialize()\n with autograd.train_mode():\n x = mx.nd.array([np.random.normal(size=(3, 512, 512))])\n net(mx.nd.random.uniform(low=0, high=1, shape=(1, 3, 512, 512)))\n with SummaryWriter(logdir='./logs') as sw:\n sw.add_graph(net)\n", "repo_name": "zzmcdc/east", "sub_path": "network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 2613, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mxnet.gluon.nn.HybridBlock", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "mxnet.cpu", "line_number": 13, "usage_type": "call"}, {"api_name": "mxnet.init.Xavier", "line_number": 16, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 16, "usage_type": "attribute"}, {"api_name": "feature.FPNFeatureExpander", "line_number": 18, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn.Conv2D", "line_number": 20, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Conv2D", "line_number": 21, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "mxnet.gluon.nn.Conv2D", "line_number": 22, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mxnet.autograd.train_mode", "line_number": 72, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 72, "usage_type": "name"}, {"api_name": "mxnet.nd.array", "line_number": 73, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 73, "usage_type": "attribute"}, {"api_name": "mxnet.nd.random.uniform", "line_number": 74, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 74, "usage_type": "attribute"}, {"api_name": "mxboard.SummaryWriter", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "426339398", "text": "from __future__ import absolute_import, unicode_literals\n\nfrom celery import shared_task, states\nfrom celery.exceptions import Ignore\nfrom celery_progress.backend import ProgressRecorder\n\nimport urllib.request\nimport json\nimport datetime\nimport io\nimport chess.pgn\n\n\nMAX_DEPTH = 4\n\n\ndef get_archive_links(username):\n link = f\"https://api.chess.com/pub/player/{username}/games/archives\"\n\n try:\n with urllib.request.urlopen(link) as url:\n data = json.loads(url.read().decode())\n return data[\"archives\"]\n except urllib.error.HTTPError as exception:\n if exception.code == 404:\n raise Exception(\"This username was not found on chess.com\")\n else:\n raise Exception(\"Something went wrong, plese try later\")\n except Exception as exception:\n raise Exception(\"Something went wrong, plese try later\")\n\n\ndef download_games(archive_links):\n try:\n for archive_link in archive_links:\n year, month = archive_link.split('/')[-2:]\n period = f\"{datetime.datetime.strptime(month, '%m').strftime('%b')} {year}\"\n with urllib.request.urlopen(archive_link) as url:\n data = json.loads(url.read().decode())\n yield period, data['games']\n except Exception as exception:\n raise Exception(\"Something went wrong, plese try later\")\n\n\ndef update_game_tree(tree, game, result):\n current_node = tree\n \n for i, move in enumerate(game.mainline_moves()):\n if i > MAX_DEPTH: break\n move = str(move)\n if move in current_node['next_moves']:\n current_node['next_moves'][move]['w'] += 1 if result == +1 else 0\n current_node['next_moves'][move]['l'] += 1 if result == -1 else 0\n current_node['next_moves'][move]['d'] += 1 if result == 0 else 0\n else:\n current_node['next_moves'][move] = {\n 'w': 1 if result == +1 else 0,\n 'l': 1 if result == -1 else 0,\n 'd': 1 if result == 0 else 0,\n 'next_moves': {}\n }\n current_node = current_node['next_moves'][move]\n\n\n@shared_task(bind=True)\ndef generate_game_trees(self, username):\n progress_recorder = ProgressRecorder(self)\n\n tree_white = {'next_moves': {}}\n tree_black = {'next_moves': {}}\n result = {\n 'white': tree_white,\n 'black': tree_black,\n 'error': None\n }\n\n try:\n archive_links = get_archive_links(username)\n for i, (period, games_data) in enumerate(download_games(archive_links)):\n for game_data in games_data:\n game = chess.pgn.read_game(io.StringIO(game_data['pgn']))\n if game_data['white']['username'].lower() == username:\n if game_data['white']['result'] == 'win':\n # I won as white\n update_game_tree(tree_white, game, +1)\n elif game_data['black']['result'] == 'win':\n # I lost as white\n update_game_tree(tree_white, game, -1)\n else:\n # I drew as white\n update_game_tree(tree_white, game, 0)\n else:\n if game_data['black']['result'] == 'win':\n # I won as black\n update_game_tree(tree_black, game, +1)\n elif game_data['white']['result'] == 'win':\n # I lost as black\n update_game_tree(tree_black, game, -1)\n else:\n # I drew as black\n update_game_tree(tree_black, game, 0)\n progress_recorder.set_progress(i + 1, len(archive_links), description=f\"Downloading games from {period}\")\n return result\n except Exception as exception:\n result['error'] = str(exception)\n return result", "repo_name": "220usamaahmed/ChesscomGameExplorerBackend", "sub_path": "api/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 3937, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 21, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 21, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request.error", "line_number": 24, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "celery_progress.backend.ProgressRecorder", "line_number": 67, "usage_type": "call"}, {"api_name": "chess.pgn.pgn.read_game", "line_number": 81, "usage_type": "call"}, {"api_name": "chess.pgn.pgn", "line_number": 81, "usage_type": "attribute"}, {"api_name": "chess.pgn", "line_number": 81, "usage_type": "name"}, {"api_name": "io.StringIO", "line_number": 81, "usage_type": "call"}, {"api_name": "celery.shared_task", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "33919612689", "text": "import json\nimport os\n\nfrom django.http import response\nfrom .models import CarDealer, DealerReview\nfrom requests.auth import HTTPBasicAuth\nimport requests\nfrom ibm_watson import NaturalLanguageUnderstandingV1\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom ibm_watson.natural_language_understanding_v1 import (\n Features,\n EntitiesOptions,\n KeywordsOptions,\n SentimentOptions,\n)\n\n\n# Create a `get_request` to make HTTP GET requests\n# e.g., response = requests.get(url, params=params, headers={'Content-Type': 'application/json'},\n# auth=HTTPBasicAuth('apikey', api_key))\ndef get_request(url, apikey, **kwargs):\n print(\"GET from {} \".format(url))\n try:\n # Call get method of requests library with URL and parameters\n if apikey:\n response = requests.get(\n url,\n params=kwargs,\n headers={\"Content-Type\": \"application/json\"},\n auth=HTTPBasicAuth(\"apikey\", apikey),\n )\n else:\n response = requests.get(\n url, params=kwargs, headers={\"Content-Type\": \"application/json\"}\n )\n\n except:\n # If any error occurs\n print(\"Network exception occurred\")\n status_code = response.status_code\n print(\"With status {} \".format(status_code))\n json_data = json.loads(response.text)\n return json_data\n\n\n# Create a `post_request` to make HTTP POST requests\n# e.g., response = requests.post(url, params=kwargs, json=payload)\ndef post_request(url, payload, **kwargs):\n print(\"GET from {} \".format(url))\n try:\n # Call get method of requests library with URL and parameters\n response = requests.post(\n url,\n params=kwargs,\n headers={\"Content-Type\": \"application/json\"},\n json=payload,\n )\n except:\n # If any error occurs\n print(\"Network exception occurred\")\n status_code = response.status_code\n print(\"With status {} \".format(status_code))\n json_data = json.loads(response.text)\n return json_data\n\n\n# Create a get_dealers_from_cf method to get dealers from a cloud function\n# def get_dealers_from_cf(url, **kwargs):\n# - Call get_request() with specified arguments\n# - Parse JSON results into a CarDealer object list\ndef get_dealers_from_cf(url, **kwargs):\n results = []\n # Call get_request with a URL parameter\n json_result = get_request(url, 0)\n if json_result:\n # Get the row list in JSON as dealers\n dealers = json_result[\"res\"][\"rows\"] # For each dealer object\n for dealer in dealers:\n # Get its content in `doc` object\n\n dealer_doc = dealer[\"doc\"]\n # Create a CarDealer object with values in `doc` object\n dealer_obj = CarDealer(\n address=dealer_doc[\"address\"],\n city=dealer_doc[\"city\"],\n full_name=dealer_doc[\"full_name\"],\n id=dealer_doc[\"id\"],\n lat=dealer_doc[\"lat\"],\n long=dealer_doc[\"long\"],\n short_name=dealer_doc[\"short_name\"],\n st=dealer_doc[\"st\"],\n zip=dealer_doc[\"zip\"],\n )\n results.append(dealer_obj)\n\n return results\n\n\n# Create a get_dealer_reviews_from_cf method to get reviews by dealer id from a cloud function\n# def get_dealer_by_id_from_cf(url, dealerId):\n# - Call get_request() with specified arguments\n# - Parse JSON results into a DealerView object list\n\n\ndef get_dealer_by_id_from_cf(url, dealer_id):\n # Call get_request with a URL parameter\n json_result = get_request(url + f\"?id={int(dealer_id)}\", 0)\n if json_result:\n # Get the row list in JSON as dealers\n dealer = json_result[\"res\"]\n # Get its content in `doc` object\n\n dealer_doc = dealer[\"docs\"][0]\n # Create a CarDealer object with values in `doc` object\n dealer_obj = CarDealer(\n address=dealer_doc[\"address\"],\n city=dealer_doc[\"city\"],\n full_name=dealer_doc[\"full_name\"],\n id=dealer_doc[\"id\"],\n lat=dealer_doc[\"lat\"],\n long=dealer_doc[\"long\"],\n short_name=dealer_doc[\"short_name\"],\n st=dealer_doc[\"st\"],\n zip=dealer_doc[\"zip\"],\n )\n\n return dealer_obj\n\n\ndef get_dealer_reviews_from_cf(url, dealerId):\n results = []\n # call get_request using the url param and dealerId\n json_result = get_request(url + f\"?dealerId={dealerId}\", 0)\n if json_result:\n # test result\n reviews = json_result[\"reviews\"] # For each dealer object\n for review in reviews:\n\n # Create a CarDealer object with values in `doc` object\n try:\n review_obj = DealerReview(\n dealership=review[\"dealership\"],\n sentiment=analyze_review_sentiments(review[\"review\"]),\n review=review[\"review\"],\n name=review[\"name\"],\n id=review[\"id\"],\n purchase=review[\"purchase\"],\n car_year=review[\"car_year\"],\n purchase_date=review[\"purchase_date\"],\n car_model=review[\"car_model\"],\n car_make=review[\"car_make\"],\n )\n results.append(review_obj)\n except:\n review_obj = DealerReview(\n dealership=review[\"dealership\"],\n sentiment=analyze_review_sentiments(review[\"review\"]),\n review=review[\"review\"],\n name=review[\"name\"],\n id=review[\"id\"],\n purchase=review[\"purchase\"],\n car_year=None,\n purchase_date=None,\n car_model=None,\n car_make=None,\n )\n results.append(review_obj)\n\n return results\n\n\n# Create an `analyze_review_sentiments` method to call Watson NLU and analyze text\n# def analyze_review_sentiments(text):\n# - Call get_request() with specified arguments\n# - Get the returned sentiment label such as Positive or Negative\ndef analyze_review_sentiments(text):\n authenticator = IAMAuthenticator(os.environ[\"NLU_API_KEY\"])\n natural_language_understanding = NaturalLanguageUnderstandingV1(\n version=\"2021-03-25\", authenticator=authenticator\n )\n\n natural_language_understanding.set_service_url(\n \"https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/d6547bbc-cfb0-4db4-b9a9-269cd97f02f0\"\n )\n\n response = natural_language_understanding.analyze(\n language=\"en\", text=text, features=Features(sentiment=SentimentOptions())\n ).get_result()\n\n result = response[\"sentiment\"][\"document\"][\"label\"]\n return result\n", "repo_name": "shifterbit/agfzb-CloudAppDevelopment_Capstone", "sub_path": "server/djangoapp/restapis.py", "file_name": "restapis.py", "file_ext": "py", "file_size_in_byte": 6857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.http.response", "line_number": 26, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.auth.HTTPBasicAuth", "line_number": 30, "usage_type": "call"}, {"api_name": "django.http.response", "line_number": 33, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "django.http.response.status_code", "line_number": 40, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 40, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.response.text", "line_number": 42, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 42, "usage_type": "name"}, {"api_name": "django.http.response", "line_number": 52, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.response.status_code", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 61, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}, {"api_name": "django.http.response.text", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.http.response", "line_number": 63, "usage_type": "name"}, {"api_name": "models.CarDealer", "line_number": 83, "usage_type": "call"}, {"api_name": "models.CarDealer", "line_number": 115, "usage_type": "call"}, {"api_name": "models.DealerReview", "line_number": 141, "usage_type": "call"}, {"api_name": "models.DealerReview", "line_number": 155, "usage_type": "call"}, {"api_name": "ibm_cloud_sdk_core.authenticators.IAMAuthenticator", "line_number": 177, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 177, "usage_type": "attribute"}, {"api_name": "ibm_watson.NaturalLanguageUnderstandingV1", "line_number": 178, "usage_type": "call"}, {"api_name": "django.http.response", "line_number": 186, "usage_type": "name"}, {"api_name": "ibm_watson.natural_language_understanding_v1.Features", "line_number": 187, "usage_type": "call"}, {"api_name": "ibm_watson.natural_language_understanding_v1.SentimentOptions", "line_number": 187, "usage_type": "call"}, {"api_name": "django.http.response", "line_number": 190, "usage_type": "name"}]} +{"seq_id": "31294877451", "text": "import pygame\r\nfrom pygame.sprite import Sprite\r\n\r\nclass Bullet(Sprite):\r\n def __init__(self, rlm_game, direction = 'up'):\r\n super().__init__()\r\n self.screen = rlm_game.screen\r\n self.settings = rlm_game.settings\r\n self.direction = direction\r\n \r\n if self.direction == 'up':\r\n self.image = pygame.image.load('images/magic_bolt.png')\r\n elif self.direction == 'down':\r\n self.image = pygame.transform.rotate(pygame.image.load('images/magic_bolt.png'), 180)\r\n elif self.direction == 'left':\r\n self.image = pygame.transform.rotate(pygame.image.load('images/magic_bolt.png'), 90)\r\n elif self.direction == 'right':\r\n self.image = pygame.transform.rotate(pygame.image.load('images/magic_bolt.png'), -90)\r\n \r\n self.rect = self.image.get_rect()\r\n \r\n self.rect.midtop = rlm_game.playerCharacter.rect.midtop\r\n \r\n self.y = float(self.rect.y)\r\n self.x = float(self.rect.x)\r\n\r\n def update(self):\r\n if self.direction == 'up':\r\n self.y -= self.settings.bullet_speed\r\n self.rect.y = self.y\r\n elif self.direction == 'down':\r\n self.y += self.settings.bullet_speed\r\n self.rect.y = self.y\r\n elif self.direction == 'left':\r\n self.x -= self.settings.bullet_speed\r\n self.rect.x = self.x\r\n elif self.direction == 'right':\r\n self.x += self.settings.bullet_speed\r\n self.rect.x = self.x\r\n\r\n def draw_bullet(self):\r\n self.screen.blit(self.image, self.rect)", "repo_name": "starlis13/CIT228", "sub_path": "forest_rogue/bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 1616, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.transform.rotate", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "31505160242", "text": "import pytest\n\nimport pennylane as qml\nfrom pennylane.ops import Hamiltonian, Prod, SProd, Sum\nfrom pennylane.pauli.pauli_arithmetic import PauliSentence\n\n\nclass TestDotSum:\n \"\"\"Unittests for the dot function when ``pauli=False``.\"\"\"\n\n def test_dot_returns_sum(self):\n \"\"\"Test that the dot function returns a Sum operator when ``pauli=False``.\"\"\"\n c = [1.0, 2.0, 3.0]\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n S = qml.dot(coeffs=c, ops=o)\n assert isinstance(S, Sum)\n for summand, coeff in zip(S.operands, c):\n if coeff == 1:\n assert isinstance(summand, qml.PauliX)\n else:\n assert isinstance(summand, SProd)\n assert summand.scalar == coeff\n\n def test_dot_returns_sprod(self):\n \"\"\"Test that the dot function returns a SProd operator when only one operator is input.\"\"\"\n O = qml.dot(coeffs=[2.0], ops=[qml.PauliX(0)])\n assert isinstance(O, SProd)\n assert O.scalar == 2\n\n def test_cast_tensor_to_prod(self):\n \"\"\"Test that `dot` casts all `Tensor` objects to `Prod`.\"\"\"\n result = qml.dot(\n coeffs=[1, 1, 1],\n ops=[\n qml.PauliX(0) @ qml.PauliY(0),\n qml.PauliX(0) @ qml.PauliY(0),\n qml.PauliX(0) @ qml.PauliY(0),\n ],\n )\n assert isinstance(result, Sum)\n for op in result:\n assert isinstance(op, Prod)\n\n def test_dot_groups_coeffs(self):\n \"\"\"Test that the `dot` function groups the coefficients.\"\"\"\n result = qml.dot(coeffs=[4, 4, 4], ops=[qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])\n assert isinstance(result, SProd)\n assert result.scalar == 4\n assert isinstance(result.base, Sum)\n assert len(result.base) == 3\n for op in result.base:\n assert isinstance(op, qml.PauliX)\n\n def test_dot_groups_coeffs_with_different_sign(self):\n \"\"\"test that the `dot` function groups coefficients with different signs.\"\"\"\n cs = [4, -4, 4]\n result = qml.dot(coeffs=cs, ops=[qml.PauliX(0), qml.PauliX(1), qml.PauliX(2)])\n assert isinstance(result, SProd)\n assert result.scalar == 4\n for op, c in zip(result.base, cs):\n if c == -4:\n assert isinstance(op, SProd)\n assert op.scalar == -1\n else:\n assert isinstance(op, qml.PauliX)\n\n def test_dot_different_number_of_coeffs_and_ops(self):\n \"\"\"Test that a ValueError is raised when the number of coefficients and operators does\n not match.\"\"\"\n with pytest.raises(\n ValueError,\n match=\"Number of coefficients and operators does not match\",\n ):\n qml.dot([1.0], [qml.PauliX(0), qml.PauliY(1)])\n\n def test_dot_empty_coeffs_or_ops(self):\n \"\"\"Test that a ValueError is raised when the number of coefficients and operators does\n not match.\"\"\"\n with pytest.raises(\n ValueError,\n match=\"Cannot compute the dot product of an empty sequence\",\n ):\n qml.dot([], [])\n\n @pytest.mark.autograd\n @pytest.mark.parametrize(\"dtype\", (float, complex))\n def test_dot_autograd(self, dtype):\n \"\"\"Test the dot function with the autograd interface.\"\"\"\n c = qml.numpy.array([1.0, 2.0, 3.0], dtype=dtype)\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n op_sum = qml.dot(c, o)\n op_sum_2 = Sum(\n qml.PauliX(0),\n SProd(qml.numpy.array(2.0, dtype=dtype), qml.PauliY(1)),\n SProd(qml.numpy.array(3.0, dtype=dtype), qml.PauliZ(2)),\n )\n assert qml.equal(op_sum, op_sum_2)\n\n @pytest.mark.tf\n @pytest.mark.parametrize(\"dtype\", (\"float64\", \"complex128\"))\n def test_dot_tf(self, dtype):\n \"\"\"Test the dot function with the tensorflow interface.\"\"\"\n import tensorflow as tf\n\n c = tf.constant([1.0, 2.0, 3.0], dtype=getattr(tf, dtype))\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n op_sum = qml.dot(c, o)\n op_sum_2 = Sum(\n qml.PauliX(0),\n SProd(tf.constant(2.0, dtype=getattr(tf, dtype)), qml.PauliY(1)),\n SProd(tf.constant(3.0, dtype=getattr(tf, dtype)), qml.PauliZ(2)),\n )\n assert qml.equal(op_sum, op_sum_2)\n\n @pytest.mark.torch\n @pytest.mark.parametrize(\"dtype\", (\"float64\", \"complex128\"))\n def test_dot_torch(self, dtype):\n \"\"\"Test the dot function with the torch interface.\"\"\"\n import torch\n\n c = torch.tensor([1.0, 2.0, 3.0], dtype=getattr(torch, dtype))\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n op_sum = qml.dot(c, o)\n op_sum_2 = Sum(\n qml.PauliX(0),\n SProd(torch.tensor(2.0, dtype=getattr(torch, dtype)), qml.PauliY(1)),\n SProd(torch.tensor(3.0, dtype=getattr(torch, dtype)), qml.PauliZ(2)),\n )\n assert qml.equal(op_sum, op_sum_2)\n\n @pytest.mark.jax\n @pytest.mark.parametrize(\"dtype\", (float, complex))\n def test_dot_jax(self, dtype):\n \"\"\"Test the dot function with the torch interface.\"\"\"\n import jax\n\n c = jax.numpy.array([1.0, 2.0, 3.0], dtype=dtype)\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n op_sum = qml.dot(c, o)\n op_sum_2 = Sum(\n qml.PauliX(0),\n SProd(jax.numpy.array(2.0, dtype=dtype), qml.PauliY(1)),\n SProd(jax.numpy.array(3.0, dtype=dtype), qml.PauliZ(2)),\n )\n assert qml.equal(op_sum, op_sum_2)\n\n\ncoeffs = [0.12345, 1.2345, 12.345, 123.45, 1234.5, 12345]\nops = [\n qml.PauliX(0),\n qml.PauliY(1),\n qml.PauliZ(2),\n qml.PauliX(3),\n qml.PauliY(4),\n qml.PauliZ(5),\n]\n\n\nclass TestDotPauliSentence:\n \"\"\"Unittest for the dot function when ``pauli=True``\"\"\"\n\n def test_dot_returns_pauli_sentence(self):\n \"\"\"Test that the dot function returns a PauliSentence class.\"\"\"\n ps = qml.dot(coeffs, ops, pauli=True)\n assert isinstance(ps, PauliSentence)\n\n def test_coeffs_and_ops(self):\n \"\"\"Test that the coefficients and operators of the returned PauliSentence are correct.\"\"\"\n ps = qml.dot(coeffs, ops, pauli=True)\n h = ps.hamiltonian()\n assert qml.math.allequal(h.coeffs, coeffs)\n assert all(qml.equal(op1, op2) for op1, op2 in zip(h.ops, ops))\n\n def test_dot_simplifies_linear_combination(self):\n \"\"\"Test that the dot function groups equal pauli words.\"\"\"\n ps = qml.dot(\n coeffs=[0.12, 1.2, 12], ops=[qml.PauliX(0), qml.PauliX(0), qml.PauliX(0)], pauli=True\n )\n assert len(ps) == 1\n h = ps.hamiltonian()\n assert len(h.ops) == 1\n assert qml.equal(h.ops[0], qml.PauliX(0))\n\n def test_dot_returns_hamiltonian_simplified(self):\n \"\"\"Test that hamiltonian computed from the PauliSentence created by the dot function is equal\n to the simplified hamiltonian.\"\"\"\n ps = qml.dot(coeffs, ops, pauli=True)\n h_ps = ps.hamiltonian()\n h = Hamiltonian(coeffs, ops)\n h.simplify()\n assert qml.equal(h_ps, h)\n\n @pytest.mark.autograd\n def test_dot_autograd(self):\n \"\"\"Test the dot function with the autograd interface.\"\"\"\n c = qml.numpy.array([1.0, 2.0, 3.0])\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n ps = qml.dot(c, o, pauli=True)\n\n ps_2 = qml.pauli.PauliSentence(\n {\n qml.pauli.PauliWord({0: \"X\"}): 1.0,\n qml.pauli.PauliWord({1: \"Y\"}): 2.0,\n qml.pauli.PauliWord({2: \"Z\"}): 3.0,\n }\n )\n assert ps == ps_2\n\n @pytest.mark.tf\n def test_dot_tf(self):\n \"\"\"Test the dot function with the tensorflow interface.\"\"\"\n import tensorflow as tf\n\n c = tf.constant([1.0, 2.0, 3.0])\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n ps = qml.dot(c, o, pauli=True)\n\n ps_2 = qml.pauli.PauliSentence(\n {\n qml.pauli.PauliWord({0: \"X\"}): tf.constant(1.0),\n qml.pauli.PauliWord({1: \"Y\"}): tf.constant(2.0),\n qml.pauli.PauliWord({2: \"Z\"}): tf.constant(3.0),\n }\n )\n assert ps == ps_2\n\n @pytest.mark.torch\n def test_dot_torch(self):\n \"\"\"Test the dot function with the torch interface.\"\"\"\n import torch\n\n c = torch.tensor([1.0, 2.0, 3.0])\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n ps = qml.dot(c, o, pauli=True)\n\n ps_2 = qml.pauli.PauliSentence(\n {\n qml.pauli.PauliWord({0: \"X\"}): torch.tensor(1.0),\n qml.pauli.PauliWord({1: \"Y\"}): torch.tensor(2.0),\n qml.pauli.PauliWord({2: \"Z\"}): torch.tensor(3.0),\n }\n )\n assert ps == ps_2\n\n @pytest.mark.jax\n def test_dot_jax(self):\n \"\"\"Test the dot function with the torch interface.\"\"\"\n import jax\n\n c = jax.numpy.array([1.0, 2.0, 3.0])\n o = [qml.PauliX(0), qml.PauliY(1), qml.PauliZ(2)]\n ps = qml.dot(c, o, pauli=True)\n\n ps_2 = qml.pauli.PauliSentence(\n {\n qml.pauli.PauliWord({0: \"X\"}): jax.numpy.array(1.0),\n qml.pauli.PauliWord({1: \"Y\"}): jax.numpy.array(2.0),\n qml.pauli.PauliWord({2: \"Z\"}): jax.numpy.array(3.0),\n }\n )\n assert ps == ps_2\n", "repo_name": "PennyLaneAI/pennylane", "sub_path": "tests/ops/functions/test_dot.py", "file_name": "test_dot.py", "file_ext": "py", "file_size_in_byte": 9449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1965, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pennylane.PauliX", "line_number": 14, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 14, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 14, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 15, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 16, "usage_type": "argument"}, {"api_name": "pennylane.PauliX", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pennylane.ops.SProd", "line_number": 21, "usage_type": "argument"}, {"api_name": "pennylane.dot", "line_number": 26, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 26, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 27, "usage_type": "argument"}, {"api_name": "pennylane.dot", "line_number": 32, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 35, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 35, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 36, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 36, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 37, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 37, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 40, "usage_type": "argument"}, {"api_name": "pennylane.ops.Prod", "line_number": 42, "usage_type": "argument"}, {"api_name": "pennylane.dot", "line_number": 46, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 46, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 47, "usage_type": "argument"}, {"api_name": "pennylane.ops.Sum", "line_number": 49, "usage_type": "argument"}, {"api_name": "pennylane.PauliX", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pennylane.dot", "line_number": 57, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 57, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 58, "usage_type": "argument"}, {"api_name": "pennylane.ops.SProd", "line_number": 62, "usage_type": "argument"}, {"api_name": "pennylane.PauliX", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 70, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 74, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 74, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 74, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 79, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 83, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 90, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 90, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 90, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 91, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 92, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 93, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pennylane.PauliY", "line_number": 94, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 95, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pennylane.PauliZ", "line_number": 95, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 97, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 85, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 105, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 106, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 106, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 106, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 107, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 108, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 109, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 110, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 110, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 111, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 111, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 113, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 100, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 121, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 122, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 122, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 122, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 123, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 124, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 125, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 126, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 126, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 127, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 127, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 116, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 116, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 137, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 138, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 138, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 138, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 139, "usage_type": "call"}, {"api_name": "pennylane.ops.Sum", "line_number": 140, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 141, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 142, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 142, "usage_type": "attribute"}, {"api_name": "pennylane.PauliY", "line_number": 142, "usage_type": "call"}, {"api_name": "pennylane.ops.SProd", "line_number": 143, "usage_type": "call"}, {"api_name": "jax.numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 143, "usage_type": "attribute"}, {"api_name": "pennylane.PauliZ", "line_number": 143, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 145, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 132, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 150, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 151, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 152, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 153, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 154, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 155, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 164, "usage_type": "call"}, {"api_name": "pennylane.pauli.pauli_arithmetic.PauliSentence", "line_number": 165, "usage_type": "argument"}, {"api_name": "pennylane.dot", "line_number": 169, "usage_type": "call"}, {"api_name": "pennylane.math.allequal", "line_number": 171, "usage_type": "call"}, {"api_name": "pennylane.math", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pennylane.equal", "line_number": 172, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 176, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 177, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 182, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 182, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 187, "usage_type": "call"}, {"api_name": "pennylane.ops.Hamiltonian", "line_number": 189, "usage_type": "call"}, {"api_name": "pennylane.equal", "line_number": 191, "usage_type": "call"}, {"api_name": "pennylane.numpy.array", "line_number": 196, "usage_type": "call"}, {"api_name": "pennylane.numpy", "line_number": 196, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 197, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 198, "usage_type": "call"}, {"api_name": "pennylane.pauli.PauliSentence", "line_number": 200, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 202, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 203, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 203, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 204, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 204, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 214, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 215, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 215, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 215, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 216, "usage_type": "call"}, {"api_name": "pennylane.pauli.PauliSentence", "line_number": 218, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 218, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 220, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 221, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 222, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 222, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 220, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 221, "usage_type": "call"}, {"api_name": "tensorflow.constant", "line_number": 222, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 232, "usage_type": "call"}, {"api_name": "pennylane.PauliX", "line_number": 233, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 233, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 233, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 234, "usage_type": "call"}, {"api_name": "pennylane.pauli.PauliSentence", "line_number": 236, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 236, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 238, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 238, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 239, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 239, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 240, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 240, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 240, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 227, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 250, "usage_type": "attribute"}, {"api_name": "pennylane.PauliX", "line_number": 251, "usage_type": "call"}, {"api_name": "pennylane.PauliY", "line_number": 251, "usage_type": "call"}, {"api_name": "pennylane.PauliZ", "line_number": 251, "usage_type": "call"}, {"api_name": "pennylane.dot", "line_number": 252, "usage_type": "call"}, {"api_name": "pennylane.pauli.PauliSentence", "line_number": 254, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 254, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 256, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 256, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 257, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pennylane.pauli.PauliWord", "line_number": 258, "usage_type": "call"}, {"api_name": "pennylane.pauli", "line_number": 258, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 256, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 256, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 257, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 257, "usage_type": "attribute"}, {"api_name": "jax.numpy.array", "line_number": 258, "usage_type": "call"}, {"api_name": "jax.numpy", "line_number": 258, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 245, "usage_type": "attribute"}]} +{"seq_id": "34078422217", "text": "from scipy.linalg import svdvals\r\nfrom numpy import linalg\r\nimport numpy as ny\r\nfrom pylab import *\r\nimport matplotlib.pyplot as plt \r\nfrom matplotlib import cm \r\nimport matplotlib.animation as animation \r\nfrom math import *\r\nimport sys\r\nfrom tkinter import *\r\nimport subprocess as sp\r\nfrom easygui import *\r\nimport os\r\nfrom pseudoAbscisseRayon import *\r\n\r\n\r\n\r\n#détermine la portion du plan complexe sur laquelle appliquer l'algorithme GRID\r\ndef limite(M,eps):\r\n\r\n\tn = len(M)\r\n\tborne_X = [0,0]\r\n\tborne_Y = [0,0]\r\n\tx = 0\r\n\ty = 0\r\n\tr = 0\r\n\tcercle = ((x,y),r)\r\n\tliste_cercle = []\r\n\tliste_c = []\r\n\tfor i in range(n):\r\n\t\tx = M[i][i].real\r\n\t\ty = M[i][i].imag\r\n\t\tr = 0\r\n\t\tfor j in range(n):\r\n\t\t\tif(i != j):\r\n\t\t\t\tr = r+norm(M[i][j])\r\n\t\tcercle = ((x,y),r)\r\n\t\tliste_c.append(cercle)\r\n\t\tr = r+sqrt(n)*eps\r\n\t\tcercle = ((x,y),r)\r\n\t\tliste_cercle.append(cercle)\r\n\t\tif(borne_X[0] > x-r):\r\n\t\t\tborne_X[0] = x-r\r\n\t\tif(borne_X[1] < x+r):\r\n\t\t\tborne_X[1] = x+r\r\n\t\tif(borne_Y[0] > y-r):\r\n\t\t\tborne_Y[0] = y-r\r\n\t\tif(borne_Y[1] < y+r):\r\n\t\t\tborne_Y[1] = y+r\r\n\treturn (borne_X,borne_Y,liste_cercle,liste_c)\r\n\r\n\r\n\r\ndef grid1(M,list_eps,pas,b1,b2):\r\n\t\"\"\"\r\n\r\n\tpremière implémentation de l'algorithme Grid, calcul le pseudospectre de la matrice M\r\n\tpour les valeurs de epsilon contenues dans list_eps(par ordre croissant en faisant pas*pas SVD. \r\n\taffiche le pseudo absicce si b1 = True et le pseudo rayon si b2= True\r\n\t\r\n\trenvoie les coordonnées de la fenêtre matplotlib au moment de sa destruction (utile pour le zoom)\r\n\r\n\t\"\"\"\r\n\r\n\t#on définit un rectangle via les disques de Gerschgorin\r\n\tborne_X,borne_Y,liste_cercle,liste_c = limite(M,list_eps[len(list_eps)-1])\r\n\ti = complex(0,1)\r\n\tn = len(M)\r\n\t#on maille le plan\r\n\tx = ny.linspace(borne_X[0],borne_X[1],pas)\r\n\ty = ny.linspace(borne_Y[0],borne_Y[1],pas)\r\n\tsigmin = zeros((pas,pas))\r\n\tlist_sig = []\r\n\r\n\t#gestion de la barre de chargement\r\n\tproc = 0\r\n\twindow = Tk()\r\n\twindow.title(\"processing\")\r\n\twindow.minsize(480,160)\r\n\twindow.maxsize(480,160)\r\n\t#window.configure(bg = 'green')\r\n\tlabel_title1 = Label(window,text= \"Calcul en cours :\",font ='Times')\r\n\tlabel_title1.pack(expand = YES)\r\n\tlabel_subtitle = Label(window,text= \"0%\",font ='Times')\r\n\tlabel_subtitle.pack(expand = YES)\r\n\tcan = Canvas(window)\r\n\tcan.pack(expand=YES)\r\n\tcan.create_rectangle(89,39,291,61,outline=\"black\")\r\n\tc1 = can.create_rectangle(90,40,90,60,outline=\"lime green\",fill=\"lime green\")\r\n\r\n\tfor l in range(pas):\r\n\t\tfor j in range(pas):\r\n\t\t\tlist_sig = svdvals((x[l]+y[j]*i)*eye(n)-M)\r\n\t\t\tsigmin[j][l] = list_sig[len(list_sig)-1]\r\n\t\t\tproc=proc+1\r\n\t\t\tlabel_subtitle.config(text=str(round(proc/(pas*pas)*100,1))+\"%\",font ='Times')\r\n\t\t\tc2 = can.create_rectangle(90,40,90+2*int(proc/(pas*pas)*100),60,outline=\"lime green\",fill=\"lime green\")\r\n\t\t\tcan.delete(c1)\r\n\t\t\tc1 = c2\r\n\t\t\tcan.update()\r\n\t\t\twindow.update()\r\n\twindow.destroy()\r\n\r\n\t#norm = \tcm.colors.Normalize(vmax = list_eps[len(list_eps)-1],vmin = list_eps[0])\r\n \t\r\n \t#affichage du pseudospectre\r\n\tif(len(list_eps)>1):\r\n\t\tnorm = \tcm.colors.BoundaryNorm(boundaries = list_eps,ncolors = 256)\r\n\t\tcontour(x,y,sigmin,levels = list_eps,cmap = 'cool',norm = norm) #Set1 tab10 tab20\r\n\t\tplt.colorbar()\r\n\telse : \r\n\t\tcontour(x,y,sigmin,list_eps,cmap = 'cool') #Set1 tab10 tab20\t\tplt.colorbar()\r\n\tval_p,vecteurs=linalg.eig(M)\r\n\tfig = plt.gcf()\r\n\tax = fig.gca()\r\n\tfig.patch.set_facecolor('#c0c0c0')\r\n\tax.patch.set_facecolor('#9d9d9d')\r\n\r\n\t#affichage du pseudo abcisse\r\n\tif(b1):\r\n\t\tfor eps in list_eps:\r\n\t\t\tp = abscissae(M,eps,axi = ax)\r\n\t\t\t#plt.plot(p.real,p.imag,\"b:.\")\r\n\t\t\tp = radii(M,eps,axi = ax)\r\n\t\t\t#plt.plot(p.real,p.imag,\"r:.\")\r\n\t#affichage du pseudo rayon\r\n\tif(b2):\r\n\r\n\t\tfor i in range (len(liste_cercle)):\r\n\t\t\tif(b1):\r\n\t\t\t\tco,r1 = liste_cercle[i]\r\n\t\t\t\tax.add_artist(plt.Circle(co,radius =r1,color ='k',fill=False,label=\"d2\"))\r\n\t\t\tif(b2):\r\n\t\t\t\tco,r1 = liste_c[i]\r\n\t\t\t\tax.add_artist(plt.Circle(co,ls = \"--\",radius =r1,color ='b',fill=False,label=\"d1\"))\r\n\t\t\t\t\r\n\tfor i in range(len(val_p)):\r\n\t\tplt.plot(val_p[i].real,val_p[i].imag,'k:.',)\r\n\ttitle(\"Pseudospectre GRID\")\r\n\taxis('equal')\r\n\tplt.show()\r\n\t\r\n\treturn list(ax.axis())\r\n\r\n\t\r\n\r\n\r\ndef gridbis(M,list_eps,pas,b1,b2,x_min,x_max,y_min,y_max):\r\n\t\"\"\"\r\n\tfonction de zoom, fonctionnement similaire à grid1.\r\n\t\"\"\"\r\n\tx = ny.linspace(x_min,x_max,pas)\r\n\ty = ny.linspace(y_min,y_max,pas)\r\n\ti = complex(0,1)\r\n\tn = len(M)\r\n\tsigmin = zeros((pas,pas))\r\n\tlist_sig = []\r\n\tproc = 0\r\n\twindow = Tk()\r\n\twindow.title(\"processing\")\r\n\twindow.minsize(480,160)\r\n\twindow.maxsize(480,160)\r\n\tlabel_title1 = Label(window,text= \"Calcul en cours :\")\r\n\tlabel_title1.pack(expand = YES)\r\n\tlabel_subtitle = Label(window,text= \"0%\",font = 'Times')\r\n\tlabel_subtitle.pack(expand = YES)\r\n\tcan = Canvas(window)\r\n\tcan.pack(expand=YES)\r\n\tcan.create_rectangle(89,39,291,61,outline=\"black\")\r\n\tc1 = can.create_rectangle(90,40,90,60,outline=\"lime green\",fill=\"lime green\")\r\n\tfor l in range(pas):\r\n\t\tfor j in range(pas):\r\n\t\t\tlist_sig = svdvals((x[l]+y[j]*i)*eye(n)-M)\r\n\t\t\tsigmin[j][l] = list_sig[len(list_sig)-1]\r\n\t\t\tproc=proc+1\r\n\t\t\tlabel_subtitle.config(text=str(round(proc/(pas*pas)*100,1))+\"%\",font ='Times')\r\n\t\t\tc2 = can.create_rectangle(90,40,90+2*int(proc/(pas*pas)*100),60,outline=\"lime green\",fill=\"lime green\")\r\n\t\t\tcan.delete(c1)\r\n\t\t\tc1 = c2\r\n\t\t\tcan.update()\r\n\t\t\twindow.update()\r\n\twindow.destroy()\r\n\t\r\n\r\n\r\n\tif(len(list_eps)>1):\r\n\t\tnorm = \tcm.colors.BoundaryNorm(boundaries = list_eps,ncolors = 256)\r\n\t\tcontour(x,y,sigmin,list_eps,cmap = 'cool',norm = norm) #Set1 tab10 tab20\r\n\t\tplt.colorbar()\r\n\telse : \r\n\t\tcontour(x,y,sigmin,list_eps,cmap = 'cool') \r\n\r\n\tval_p,vecteurs=linalg.eig(M)\r\n\tfig = plt.gcf()\r\n\tax = fig.gca()\r\n\tfig.patch.set_facecolor('#c0c0c0')\r\n\tax.patch.set_facecolor('#9d9d9d')\r\n\r\n\tif(b1):\r\n\t\tfor eps in list_eps:\r\n\t\t\tp = abscissae(M,eps,axi = ax)\r\n\t\t\t#plt.plot(p.real,p.imag,\"b:.\")\r\n\t\t\tp = radii(M,eps,axi = ax)\r\n\t\t\t#plt.plot(p.real,p.imag,\"r:.\")\r\n\t\r\n\tif(b2):\r\n\t\tfor i in range (len(liste_cercle)):\r\n\t\t\tif(b1):\r\n\t\t\t\tco,r1 = liste_cercle[i]\r\n\t\t\t\tax.add_artist(plt.Circle(co,radius =r1,color ='k',fill=False,label=\"d2\"))\r\n\t\t\tif(b2):\r\n\t\t\t\tco,r1 = liste_c[i]\r\n\t\t\t\tax.add_artist(plt.Circle(co,ls = \"--\",radius =r1,color ='b',fill=False,label=\"d1\"))\r\n\t\t\t\t\r\n\tfor i in range(len(val_p)):\r\n\t\tif(val_p[i].real>=x_min and val_p[i].real<=x_max and val_p[i].imag<= y_max and val_p[i].imag>=y_min):\r\n\t\t\tplt.plot(val_p[i].real,val_p[i].imag,'k:.',)\r\n\ttitle(\"Pseudospectre GRID\")\r\n\taxis('equal')\r\n\r\n\tplt.show()\r\n\t#plt.savefig(\"fig0\",format = \"png\")\r\n\treturn list(ax.axis())\r\n\r\n\r\n\r\n\r\n\r\n", "repo_name": "Paxheim/Projet-Pseudospectre", "sub_path": "grid.py", "file_name": "grid.py", "file_ext": "py", "file_size_in_byte": 6438, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.linalg.svdvals", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.cm.colors.BoundaryNorm", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.cm.colors", "line_number": 108, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.linalg.eig", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 153, "usage_type": "call"}, {"api_name": "scipy.linalg.svdvals", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.cm.colors.BoundaryNorm", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.cm.colors", "line_number": 187, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "numpy.linalg.eig", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "6324511293", "text": "import demistomock as demisto\n\n\nclass TestParseWordDoc:\n @staticmethod\n def mock_results(mocker):\n mocker.patch.object(demisto, \"results\")\n\n @staticmethod\n def mock_context(mocker, args_value=None):\n if not args_value:\n args_value = {\n \"entryID\": \"entry_id\",\n }\n mocker.patch.object(demisto, \"args\", return_value=args_value)\n\n @staticmethod\n def mock_file_path(mocker, path, name):\n mocker.patch.object(demisto, \"getFilePath\", return_value={\n \"path\": path,\n \"name\": name\n })\n\n @staticmethod\n def mock_demisto(mocker, args_value=None, file_obj=None):\n TestParseWordDoc.mock_results(mocker)\n TestParseWordDoc.mock_context(mocker, args_value)\n if file_obj:\n TestParseWordDoc.mock_file_path(mocker, **file_obj)\n\n @staticmethod\n def get_demisto_results():\n return demisto.results.call_args[0][0]\n\n @staticmethod\n def create_file_object(file_path):\n return {\n \"path\": file_path,\n \"name\": file_path.split(\"/\")[-1]\n }\n\n def test_parse_word_doc(self, mocker):\n \"\"\"\n Given:\n - A docx file\n\n When:\n - Run the ParseWordDoc script\n\n Then:\n - Verify that the docx file has now returned as .txt\n\n \"\"\"\n from ParseWordDoc import main\n self.mock_demisto(mocker, file_obj=self.create_file_object(\"./test_data/file-sample.docx\"))\n main()\n result = self.get_demisto_results()\n assert result.get('File') == 'file-sample.txt'\n\n\ndef test_extract_urls_xml_with_hyperlink():\n \"\"\"\n Given:\n - A docx file with hyperlink\n\n When:\n - Run the extract_urls_xml method\n\n Then:\n - Verify that the method extracting the url from the document\n\n \"\"\"\n from ParseWordDoc import extract_urls_xml\n urls = extract_urls_xml('./test_data/file-sample2.docx')\n assert urls == ['https://typora.io']\n\n\ndef test_extract_urls_xml_without_hyperlink():\n \"\"\"\n Given:\n - A docx file without hyperlink\n\n When:\n - Run the extract_urls_xml method\n\n Then:\n - Verify that the method extracting none urls from the document\n\n \"\"\"\n from ParseWordDoc import extract_urls_xml\n urls = extract_urls_xml('./test_data/file-sample.docx')\n assert urls == []\n\n\ndef test_extract_urls_docx_without_hyperlink():\n \"\"\"\n Given:\n - A docx file without hyperlink\n\n When:\n - Run the extract_urls_docx method\n\n Then:\n - Verify that the method extracting none urls from the document\n\n \"\"\"\n from docx import Document\n from ParseWordDoc import extract_urls_docx\n document = Document('./test_data/file-sample2.docx')\n urls = extract_urls_docx(document)\n assert urls == []\n\n\ndef test_extract_urls_docx_with_hyperlinks():\n \"\"\"\n Given:\n - A docx file with hyperlinks\n\n When:\n - Run the extract_urls_docx method\n\n Then:\n - Verify that the method extracting the urls from the document\n\n \"\"\"\n from docx import Document\n from ParseWordDoc import extract_urls_docx\n document = Document('./test_data/MS-DOCX-190319.docx')\n urls = extract_urls_docx(document)\n assert 'https://go.microsoft.com/fwlink/?LinkId=90607' in urls\n", "repo_name": "demisto/content", "sub_path": "Packs/CommonScripts/Scripts/ParseWordDoc/ParseWordDoc_test.py", "file_name": "ParseWordDoc_test.py", "file_ext": "py", "file_size_in_byte": 3330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1023, "dataset": "github-code", "pt": "50", "api": [{"api_name": "demistomock.results", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ParseWordDoc.main", "line_number": 56, "usage_type": "call"}, {"api_name": "ParseWordDoc.extract_urls_xml", "line_number": 74, "usage_type": "call"}, {"api_name": "ParseWordDoc.extract_urls_xml", "line_number": 91, "usage_type": "call"}, {"api_name": "docx.Document", "line_number": 109, "usage_type": "call"}, {"api_name": "ParseWordDoc.extract_urls_docx", "line_number": 110, "usage_type": "call"}, {"api_name": "docx.Document", "line_number": 128, "usage_type": "call"}, {"api_name": "ParseWordDoc.extract_urls_docx", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "10894818618", "text": "import vsketch\nfrom shapely import geometry, affinity\nimport numpy as np\n\nimport sys, os\nsys.path.append(os.path.join(os.path.abspath(__file__), \"../../\"))\nimport utils \n\nclass Day06TradeStylesSketch(vsketch.SketchClass):\n \n rand_x = vsketch.Param(75)\n rand_y = vsketch.Param(55)\n points = vsketch.Param(3)\n\n line_step = vsketch.Param(0.5)\n def draw(self, vsk: vsketch.Vsketch) -> None:\n vsk.size(\"205x130mm\", landscape=False)\n vsk.scale(\"mm\")\n widthmm = utils.css_to_mm(vsk.width)\n\n line_coords = []\n line_count = widthmm / self.line_step\n\n for y in range(int(line_count)):\n y = vsk.map(y, 0, line_count , 0, widthmm)\n line_coords.append(((- widthmm/2,y - widthmm/2),(widthmm/2,y - widthmm/2)))\n \n lines = geometry.MultiLineString(line_coords)\n\n first_pos = (vsk.random(self.rand_x), vsk.random(self.rand_y) )\n prev_pos = first_pos\n\n for i in range(self.points):\n \n if i == self.points - 1:\n new_pos = first_pos\n else:\n sign = (-1, 1)[i % 2]\n new_pos = (vsk.random(self.rand_x) * sign , vsk.random(self.rand_y) * 2 - self.rand_y )\n\n triangle = geometry.Polygon([(0,0), new_pos, prev_pos])\n\n # I don't know why this angle finding works, took a lot of trial and error\n # I need to get better at trigonometry I guess :()\n angle = utils.find_angle((np.array(prev_pos) - np.array(new_pos), np.array(new_pos) - np.array(prev_pos))[prev_pos[1] < new_pos[1]], (0,0), (1,0))\n translated_lines = affinity.translate(lines, 0, vsk.random(self.line_step))\n rotated_lines = affinity.rotate(translated_lines, angle, use_radians=True)\n\n vsk.geometry(rotated_lines.intersection(triangle))\n\n prev_pos = new_pos\n\n\n def finalize(self, vsk: vsketch.Vsketch) -> None:\n vsk.vpype(\"rotate 180 linemerge linesimplify reloop linesort gwrite last.gcode\")\n\n\nif __name__ == \"__main__\":\n Day06TradeStylesSketch.display()\n", "repo_name": "hapiel/Genuary-2022", "sub_path": "day06_trade_styles/sketch_day06_trade_styles.py", "file_name": "sketch_day06_trade_styles.py", "file_ext": "py", "file_size_in_byte": 2092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "vsketch.SketchClass", "line_number": 9, "usage_type": "attribute"}, {"api_name": "vsketch.Param", "line_number": 11, "usage_type": "call"}, {"api_name": "vsketch.Param", "line_number": 12, "usage_type": "call"}, {"api_name": "vsketch.Param", "line_number": 13, "usage_type": "call"}, {"api_name": "vsketch.Param", "line_number": 15, "usage_type": "call"}, {"api_name": "vsketch.Vsketch", "line_number": 16, "usage_type": "attribute"}, {"api_name": "utils.css_to_mm", "line_number": 19, "usage_type": "call"}, {"api_name": "shapely.geometry.MultiLineString", "line_number": 28, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 28, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 41, "usage_type": "call"}, {"api_name": "shapely.geometry", "line_number": 41, "usage_type": "name"}, {"api_name": "utils.find_angle", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "shapely.affinity.translate", "line_number": 46, "usage_type": "call"}, {"api_name": "shapely.affinity", "line_number": 46, "usage_type": "name"}, {"api_name": "shapely.affinity.rotate", "line_number": 47, "usage_type": "call"}, {"api_name": "shapely.affinity", "line_number": 47, "usage_type": "name"}, {"api_name": "vsketch.Vsketch", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "71651256154", "text": "from collections import deque\ndef solution(maps):\n N, M = len(maps), len(maps[0])\n visit = [[0 for i in range(M)] for i in range(N)]\n Q = deque()\n Q.append([0,0])\n visit[0][0] = 1\n dx, dy = [1,0,-1,0], [0,1,0,-1]\n\n while Q:\n x, y = Q.popleft()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n if nx < 0 or nx >= N or ny < 0 or ny >= M: continue\n if maps[nx][ny] == 0 or visit[nx][ny]: continue\n Q.append([nx, ny])\n visit[nx][ny] = visit[x][y] + 1\n\n\n return visit[-1][-1] if visit[-1][-1] else -1", "repo_name": "Jungdomo/PCT", "sub_path": "2/1844/문정민.py", "file_name": "문정민.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "collections.deque", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "27331625130", "text": "from behave.model import Feature, Scenario\n\nfrom feature_tests.domain.steps.context import Context\nfrom feature_tests.feature_test_helpers import TestMode\n\n\ndef before_feature(context: Context, feature: Feature):\n # At present we only run domain tests in 'local' mode\n test_mode = TestMode.parse(config=context.config)\n if test_mode is not TestMode.LOCAL:\n feature.mark_skipped()\n\n\ndef before_scenario(context: Context, scenario: Scenario):\n context.questionnaires = {}\n context.ods_organisations = {}\n context.product_teams = {}\n context.events = []\n context.error = None\n context.result = None\n context.subject = None\n", "repo_name": "NHSDigital/connecting-party-manager", "sub_path": "feature_tests/domain/environment.py", "file_name": "environment.py", "file_ext": "py", "file_size_in_byte": 657, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "50", "api": [{"api_name": "feature_tests.domain.steps.context.Context", "line_number": 7, "usage_type": "name"}, {"api_name": "behave.model.Feature", "line_number": 7, "usage_type": "name"}, {"api_name": "feature_tests.feature_test_helpers.TestMode.parse", "line_number": 9, "usage_type": "call"}, {"api_name": "feature_tests.feature_test_helpers.TestMode", "line_number": 9, "usage_type": "name"}, {"api_name": "feature_tests.feature_test_helpers.TestMode.LOCAL", "line_number": 10, "usage_type": "attribute"}, {"api_name": "feature_tests.feature_test_helpers.TestMode", "line_number": 10, "usage_type": "name"}, {"api_name": "feature_tests.domain.steps.context.Context", "line_number": 14, "usage_type": "name"}, {"api_name": "behave.model.Scenario", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "26177991842", "text": "import base64\nimport json\nimport os\nimport time\nimport random\n\nimport jwt\nfrom django.conf import settings\nfrom django.conf.global_settings import MEDIA_URL\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom data.models import Teacher, Image, Course, Grade\n\nimport hashlib\ndef register(request):\n #判断前端的请求方式\n if request.method ==\"POST\":\n #print(request.body)\n #数据格式是json字节串:b'{\"teacheraccount\":\"\",\"teacherpswd1\":\"\",\"teacherpswd2\":\"\",\"teacheremail\":\"\",\"phone\":\"\"}'\n # request.post 只能获取post表单提交数据\n # request.body 能获取除表单提交的其他数据\n teacher_data = request.body\n if not teacher_data:\n result = {\"code\":10101,\"error\":\"Please give me a data\"}\n return JsonResponse(result)\n #获得json数据\n json_obj = json.loads(teacher_data)\n #检查数据\n teacher_account = json_obj.get(\"teacheraccount\")\n teacherpswd1 = json_obj.get(\"teacherpswd1\")\n teacherpswd2 = json_obj.get(\"teacherpswd2\")\n teacheremail = json_obj.get(\"teacheremail\")\n teacherphone = json_obj.get(\"phone\")\n if not teacher_account:\n result = {\"code\":10102,\"error\":\"请输入账号\"}\n return JsonResponse(result)\n if not teacherpswd1:\n result = {\"code\":10103,\"error\":\"请输入密码\"}\n return JsonResponse(result)\n if not teacherpswd2:\n result = {\"code\": 10103, \"error\":\"请输入密码\"}\n return JsonResponse(result)\n if teacherpswd1 != teacherpswd2:\n result ={\"code\":10106,\"error\":\"两次密码输入不一致\"}\n return JsonResponse(result)\n if not teacheremail:\n result = {\"code\": 10104, \"error\":\"请输入邮箱\"}\n return JsonResponse(result)\n if not teacherphone :\n result = {\"code\": 10105, \"error\":\"请输入手机号\"}\n return JsonResponse(result)\n #检查老师账号是否可用\n old_teacher = Teacher.objects.filter(teacher_account=teacher_account)\n if old_teacher:\n result = {\"code\":10107,\"error\":\"该用户名已经存在!\"}\n return JsonResponse(result)\n #密码加密\n m = hashlib.md5()\n m.update(teacherpswd2.encode())\n #创建用户:\n try:\n Teacher.objects.create(teacher_account=teacher_account,teacher_password=m.hexdigest(),\n teacher_email=teacheremail,teacher_phone=teacherphone)\n except Exception as e :\n result ={\"code\":10108,\"error\":\"该用户已存在!!!\"}\n return JsonResponse(result)\n #发送token\n token = make_token(teacher_account)\n result = {\"code\":200,\"teacher_account\":teacher_account,\"data\":{\"token\":token.decode()}}\n return JsonResponse(result)\n#定义一个制作token的函数\ndef make_token(teacher_account,exp=3600*24):\n now = time.time()\n #key\n key = settings.TOKEN_KEY\n #官方jwt\n return jwt.encode({\"teacher_account\":teacher_account,\"exp\":now+exp},key,algorithm=\"HS256\")\n\n#老师登录\ndef login(request):\n if request.method == \"POST\":\n #获取除post方式之外的其他的数据\n teacher_data = request.body\n #检查数据\n if not teacher_data:\n result = {\"code\":10109,\"error\":\"Please gave me data\"}\n return JsonResponse(result)\n #获取json数据\n json_obj = json.loads(teacher_data)\n teacher_account = json_obj.get(\"teacher_account\")\n teacher_password = json_obj.get(\"pwd\")\n #检查数据\n if not teacher_account:\n result = {\"code\":10110,\"error\":\"请填写账号\"}\n return JsonResponse(result)\n if not teacher_password:\n result ={\"code\":10111,\"error\":\"请填写密码\"}\n return JsonResponse(result)\n #从数据库查询该用户名是否存在\n teacher = Teacher.objects.filter(teacher_account=teacher_account)\n if not teacher:\n result = {\"code\":10112,\"error\":\"账号或者密码错误\"}\n return JsonResponse(result)\n #前端的发送的密码进行加密\n m = hashlib.md5()\n m.update(teacher_password.encode())\n #账号存在 检查密码是否正确\n if m.hexdigest() != teacher[0].teacher_password:\n result = {\"code\":10113,\"error\":\"账号或者密码错误\"}\n return JsonResponse(result)\n #发送token\n token = make_token(teacher_account)\n result = {\"code\": 200,\"teacher_account\":teacher_account,\"data\":{\"token\":token.decode()}}\n return JsonResponse(result)\n if request.method != \"POST\":\n result = {\"code\": 10114, \"error\": \"PLease use POST request\"}\n return JsonResponse(result)\n#验证登录装饰器\ndef login_check1(fn):\n def swarp(request,*args,**kwargs):\n # 校验token\n teacher_data = request.body\n json_obj = json.loads(teacher_data)\n teacher_token = json_obj.get(\"teacher_token\")\n if not teacher_token:\n result = {'code': 10115, 'error': 'Please login'}\n print('logging check no token')\n return JsonResponse(result)\n try:\n res = jwt.decode(teacher_token, settings.TOKEN_KEY, algorithms='HS256')\n # print(res)\n except Exception as e:\n print('---jwt error is %s'%(e))\n result = {'code': 10116, 'error': 'Please login'}\n return JsonResponse(result)\n teacher_account = res['teacher_account']\n try:\n teacher = Teacher.objects.get(teacher_account=teacher_account)\n except Exception as e:\n result = {\"code\":10118,\"error\":\"该用户未曾登录或者注册\"}\n return JsonResponse(result)\n request.teacher = teacher\n request.teacher_account = teacher_account\n return fn(request,*args,**kwargs)\n return swarp\n#保存老师信息\n@login_check1\ndef save(request):\n if request.method ==\"POST\":\n teacher_account = request.teacher_account\n teacher_data = request.body\n #解析前端json串\n json_obj = json.loads(teacher_data)\n # print(json_obj)\n teacher = Teacher.objects.get(teacher_account=teacher_account)\n teacher.teacher_name = json_obj.get(\"name\")\n teacher.teacher_sex = json_obj.get(\"sex\")\n teacher_bir = json_obj.get(\"bir\")\n year = teacher_bir[:4]\n month = teacher_bir[4:6]\n data_list = [year,month,\"01\"]\n teacher.teacher_age1 = (\"-\").join(data_list)\n print(teacher.teacher_age1)\n teacher.teacher_address = json_obj.get(\"address\")\n school_name = json_obj.get(\"school_name\")\n print(school_name)\n cate = json_obj.get(\"cate\")\n teacher.teacher_education = school_name + \" \"+cate\n teacher.teacher_address_now = json_obj.get('address')\n teacher.teacher_certificate =json_obj.get(\"certificate\")\n teacher.teacher_teaching_area =json_obj.get(\"teaching_area\")\n teacher.teacher_teaching_way =json_obj.get(\"teaching_way\")\n teacher.teacher_self_assessment = json_obj.get(\"self_assessment\")\n teacher.teacher_tutor_type = json_obj.get(\"tutor_type\")\n available_subjects = json_obj.get(\"available_subjects\")\n available_grade = json_obj.get(\"available_grad\")\n #查看该老师是否重复存入了相同的科目和年级\n try:\n teacher_course = teacher.fk_teacher_course_id.get(course_name=available_subjects)\n teacher_grade = teacher.fk_teacher_grade_id.get(grade_name = available_grade)\n except Exception as e:\n print(e)\n # 老师创建了一门课程 和一个年级\n try:\n course = Course.objects.create(course_name=available_subjects)\n except Exception as e:\n print(e)\n result = {\"code\": 10228, \"error\": \"该科目已经存在!!\"}\n return JsonResponse(result)\n # 给科目增加一个老师\n course.teacher_set.add(teacher)\n try:\n grade = Grade.objects.create(grade_name=available_grade)\n except Exception as e:\n print(e)\n result = {\"code\": 10229, \"error\": \"该年级已经存在!!\"}\n return JsonResponse(result)\n # 给科目增加\n grade.teacher_set.add(teacher)\n # 给科目增加对应年级\n course.grade_set.add(grade)\n else:\n #判断课程和年级是否已经关联\n if teacher_grade.fk_course_id.course_name == available_subjects:\n result = {\"code\":10330,\"error\":\"课程和年级已经绑定\"}\n return JsonResponse(result)\n #对身份证号加密\n cardId = json_obj.get(\"id\")\n m = hashlib.md5()\n m.update(cardId.encode())\n teacher.teacher_cardID = m.hexdigest()\n #对数据进行更新\n try:\n teacher.save()\n except Exception as e:\n print(e)\n result = {\"code\":10119,\"error\":\"请检查数据\"}\n return JsonResponse(result)\n result = {\"code\":200,\"data\":\"保存成功\"}\n return JsonResponse(result)\n\n# 发表老师成果\n@login_check1\ndef send_result(request):\n if request.method == \"POST\":\n #获取已经登录老师的对象\n teacher = request.teacher\n teacher_data = request.body\n #检查数据\n if not teacher_data:\n result = {\"code\":10120,\"error\":\"请输入数据!!\"}\n return JsonResponse(result)\n json_obj = json.loads(teacher_data)\n customerMessage = json_obj.get(\"customerMessage\")\n print(customerMessage)\n if not customerMessage:\n result = {\"code\": 10121, \"error\": \"请输入数据!!\"}\n return JsonResponse(result)\n teacher.teacher_achievement = customerMessage\n #对数据进行更新\n try:\n teacher.save()\n except Exception as e:\n print(e)\n result = {\"code\":10122,\"error\":\"请检查数据\"}\n return JsonResponse(result)\n result = {\"code\": 200, \"data\":\"展示成功\"}\n return JsonResponse(result)\n\ndef sort(request):\n if request.method == \"POST\":\n\n result = {\"code\":200,\"data\":\"成功\"}\n return JsonResponse(result)\ndef login_check(request):\n if request.method == \"POST\":\n teacher_data = request.body\n json_obj = json.loads(teacher_data)\n teacher_token = json_obj.get(\"teacher_token\")\n if not teacher_token:\n result = {'code': 10115, 'error': 'Please login'}\n print('logging check no token')\n return JsonResponse(result)\n try:\n res = jwt.decode(teacher_token, settings.TOKEN_KEY, algorithms='HS256')\n # print(res)\n except Exception as e:\n print('---jwt error is %s' % (e))\n result = {'code': 10116, 'error': 'Please login'}\n return JsonResponse(result)\n teacher_account = res['teacher_account']\n try:\n teacher = Teacher.objects.get(teacher_account=teacher_account)\n except Exception as e:\n result = {\"code\": 10118, \"error\": \"该用户未曾登录或者注册\"}\n return JsonResponse(result)\n result = {\"code\":200,\"data\":\"验证成功\"}\n return JsonResponse(result)\ndef updateimg(request):\n if request.method ==\"POST\":\n file_img = request.FILES['img']\n file_name = \"images/\"+ file_img.name\n is_img = file_img.name.split('.')[-1]\n if is_img not in ('jpeg', 'jpg', 'png', 'gif', 'tmp'):\n result = {\"code\": 10223, \"error\": \"图片格式不对!\"}\n return JsonResponse(result)\n teacher_token = request.POST.get(\"teacher_token\")\n #获得老师的账号\n res = jwt.decode(teacher_token,key=settings.TOKEN_KEY,algorithms=\"HS256\")\n teacher_account = res[\"teacher_account\"]\n teacher = Teacher.objects.get(teacher_account=teacher_account)\n filename = os.path.join(settings.MEDIA_ROOT, file_img.name)\n with open(filename, 'wb') as f:\n data = file_img.file.read()\n f.write(data)\n #查询照片\n try:\n img = Image.objects.create(fk_teacher_id=teacher,image_url=file_name)\n except Exception as e:\n print(e)\n img = Image.objects.get(fk_teacher_id=teacher)\n img.image_url = file_name\n img.save()\n result = {\"code\":10224,\"error\":\"该老师已存图片\",\"data\":file_name}\n return JsonResponse(result)\n\n result = {\"code\":200,\"data\":file_name}\n return JsonResponse(result)\n\n#获取老师基本信息\n@login_check1\ndef get_base_info(request):\n if request.method == \"POST\":\n #获得老师对象\n teacher = request.teacher\n teacher_name = teacher.teacher_name\n if not teacher_name:\n result = {\"code\":10300,\"error\":\"该老师还未填写详细信息,请先填写详细信息\"}\n return JsonResponse(result)\n teacher_name = teacher.teacher_name[0]\n #反向一对一查询数据\n try:\n teacher_img = teacher.image.image_url\n except Exception as e:\n print(e)\n result = {\"code\":10225,\"error\":\"该老师还未存证件照\"}\n return JsonResponse(result)\n result = {\"code\":200,\"data\":{\"img_url\":str(teacher_img),\"teacher_name\":teacher_name}}\n return JsonResponse(result)\n#获取老师的数据\ndef getinfo(request):\n if request.method ==\"GET\":\n teacher_account = request.GET.get(\"teacher_account\")\n #查询老师的信息\n teacher_data = Teacher.objects.filter(teacher_account=teacher_account)\n teacher_name = teacher_data[0].teacher_name[0]\n teacher_sex = teacher_data[0].teacher_sex\n if teacher_sex == \"1\":\n teacher_sex = \"男\"\n else:\n teacher_sex = \"女\"\n #出生日期\n teacher_age1 = teacher_data[0].teacher_age1\n #地址\n teacher_address = teacher_data[0].teacher_address\n #目前地址\n teacher_address_now = teacher_data[0].teacher_address_now\n #学历\n teacher_education = teacher_data[0].teacher_education\n #联系电话\n teacher_phone = teacher_data[0].teacher_phone\n #成果\n teacher_achievement = teacher_data[0].teacher_achievement\n #身份\n teacher_tutor_type = teacher_data[0].teacher_tutor_type\n #证书\n teacher_certificate = teacher_data[0].teacher_certificate\n #授课区域\n teacher_teaching_area = teacher_data[0].teacher_teaching_area\n #辅导方式\n teacher_teaching_way = teacher_data[0].teacher_teaching_way\n #自我评价\n teacher_self_assessment = teacher_data[0].teacher_self_assessment\n # 反向一对一查询数据\n try:\n teacher = Teacher.objects.get(teacher_account=teacher_account)\n teacher_img = teacher.image.image_url\n except Exception as e:\n print(e)\n result = {\"code\": 10226, \"error\": \"该老师还未存证件照\"}\n return JsonResponse(result)\n #通过课程查询老师 和年级\n teacher_course = (\",\").join([teacher_course.course_name for teacher_course in teacher.fk_teacher_course_id.all()])\n teacher_grade = (\",\").join([teacher_grade.grade_name for teacher_grade in teacher.fk_teacher_grade_id.all()])\n data = {\"teacher_name\":teacher_name,\"teacher_sex\":teacher_sex,\"teacher_age1\":teacher_age1,\n \"teacher_address\":teacher_address,\"teacher_education\":teacher_education[0:-2],\n \"teacher_cate\":teacher_education[-2:],\"teacher_phone\":teacher_phone,\n \"teacher_achievement\":teacher_achievement,\"teacher_img\":str(teacher_img),\n \"teacher_tutor_type\":teacher_tutor_type,\"teacher_address_now\":teacher_address_now,\n \"teacher_course\":teacher_course,\"teacher_grade\":teacher_grade,\"teacher_certificate\":teacher_certificate,\n \"teacher_teaching_area\":teacher_teaching_area,\"teacher_teaching_way\":teacher_teaching_way,\n \"teacher_self_assessment\":teacher_self_assessment}\n result = {\"code\":200,\"data\":data}\n return JsonResponse(result)\n\n\ndef search(request):\n #查询老师数据库前十二条信息发给前段 示例如下\n if request.method == \"GET\":\n data = []\n\n #根据最新时间\n teachers = Teacher.objects.order_by(\"-teacher_createtime\")\n\n for teacher in teachers:\n #判断看是否有图片存入\n try:\n img_url = teacher.image.image_url\n except Exception as e:\n print(e)\n #如果有错误就将退出循环\n continue\n else:\n dict = {\"id\":\"1000\"+str(teacher.id),\"teacher_account\":teacher.teacher_account,\"teacher_name\":teacher.teacher_name,\"img_url\":str(img_url),\n \"teacher_type\":teacher.teacher_tutor_type,\"teacher_info\":teacher.teacher_self_assessment}\n data.append(dict)\n continue\n result = {\"code\":200,\"data\":data}\n return JsonResponse(result)\n", "repo_name": "facethesun/Nowubai_website", "sub_path": "nowubai_website/teacher/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 17504, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.http.JsonResponse", "line_number": 27, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 44, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 47, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 53, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 55, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 58, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 60, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.create", "line_number": 64, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 64, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 68, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "django.conf.settings.TOKEN_KEY", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 77, "usage_type": "name"}, {"api_name": "jwt.encode", "line_number": 79, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 89, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 91, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 97, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 100, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.filter", "line_number": 102, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 102, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 105, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 107, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 112, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 116, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 119, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 125, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 130, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 132, "usage_type": "call"}, {"api_name": "django.conf.settings.TOKEN_KEY", "line_number": 132, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 132, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 137, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.get", "line_number": 140, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 140, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 143, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 155, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.get", "line_number": 157, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 157, "usage_type": "name"}, {"api_name": "data.models.Course.objects.create", "line_number": 187, "usage_type": "call"}, {"api_name": "data.models.Course.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "data.models.Course", "line_number": 187, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 191, "usage_type": "call"}, {"api_name": "data.models.Grade.objects.create", "line_number": 195, "usage_type": "call"}, {"api_name": "data.models.Grade.objects", "line_number": 195, "usage_type": "attribute"}, {"api_name": "data.models.Grade", "line_number": 195, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 199, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 208, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 211, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 220, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 222, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 234, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 235, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 240, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 248, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 250, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 256, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 260, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 265, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 267, "usage_type": "call"}, {"api_name": "django.conf.settings.TOKEN_KEY", "line_number": 267, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 267, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 272, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.get", "line_number": 275, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 275, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 275, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 278, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 280, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 288, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 291, "usage_type": "call"}, {"api_name": "django.conf.settings.TOKEN_KEY", "line_number": 291, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 291, "usage_type": "name"}, {"api_name": "data.models.Teacher.objects.get", "line_number": 293, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 293, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 293, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 294, "usage_type": "call"}, {"api_name": "os.path", "line_number": 294, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 294, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 294, "usage_type": "name"}, {"api_name": "data.models", "line_number": 296, "usage_type": "name"}, {"api_name": "data.models", "line_number": 297, "usage_type": "argument"}, {"api_name": "data.models.Image.objects.create", "line_number": 300, "usage_type": "call"}, {"api_name": "data.models.Image.objects", "line_number": 300, "usage_type": "attribute"}, {"api_name": "data.models.Image", "line_number": 300, "usage_type": "name"}, {"api_name": "data.models.Image.objects.get", "line_number": 303, "usage_type": "call"}, {"api_name": "data.models.Image.objects", "line_number": 303, "usage_type": "attribute"}, {"api_name": "data.models.Image", "line_number": 303, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 307, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 310, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 321, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 329, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 331, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects.filter", "line_number": 337, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 337, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 337, "usage_type": "name"}, {"api_name": "data.models.Teacher.objects.get", "line_number": 368, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 368, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 368, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 373, "usage_type": "call"}, {"api_name": "data.models", "line_number": 377, "usage_type": "name"}, {"api_name": "data.models", "line_number": 385, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 386, "usage_type": "call"}, {"api_name": "data.models", "line_number": 392, "usage_type": "name"}, {"api_name": "data.models.Teacher.objects.order_by", "line_number": 395, "usage_type": "call"}, {"api_name": "data.models.Teacher.objects", "line_number": 395, "usage_type": "attribute"}, {"api_name": "data.models.Teacher", "line_number": 395, "usage_type": "name"}, {"api_name": "data.models.append", "line_number": 408, "usage_type": "call"}, {"api_name": "data.models", "line_number": 408, "usage_type": "name"}, {"api_name": "data.models", "line_number": 410, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 411, "usage_type": "call"}]} +{"seq_id": "16095067502", "text": "import cgi\nimport re\nimport sys\nimport os\n\nKEYWORDS = {\n 'class',\n 'constructor',\n 'function',\n 'method',\n 'field',\n 'static',\n 'var',\n 'int',\n 'char',\n 'boolean',\n 'void',\n 'true',\n 'false',\n 'null',\n 'this',\n 'let',\n 'do',\n 'if',\n 'else',\n 'while',\n 'return',\n}\n\nSYMBOLS = {\n '{', '}', '(', ')', '[', ']', '.',\n ',', ';', '+', '-', '*', '/', '&', '|',\n '<', '>', '=', '~'\n}\n\nKEYWORD = 'keyword'\nSYMBOL = 'symbol'\nIDENTIFIER = 'identifier'\nINT_CONST = 'integerConstant'\nSTRING_CONST = 'stringConstant'\n\nTOKEN_TYPES = (\n KEYWORD,\n SYMBOL,\n IDENTIFIER,\n INT_CONST,\n STRING_CONST\n)\n\n\nREGEX_MAPPING = {\n KEYWORD: '(' + r'|'.join(KEYWORDS) + ')\\\\W',\n SYMBOL: r'([&)\\]+\\*\\-,\\/<.}([{;~=|>])',\n INT_CONST: r'(\\d+)',\n STRING_CONST: r'(\".*?\")',\n IDENTIFIER: r'([a-zA-Z_]+[a-zA-Z_\\d]*)'\n}\n\n\ndef remove_comments(line):\n start_from = 0\n while start_from != -1:\n comment = line.find('//', start_from)\n if comment == -1:\n return line\n # terminate if there is an even\n # number of quotes before comment\n if line[:comment].count('\"') % 2 == 0:\n start_from = -1\n else:\n start_from = comment + 1\n\n return line[:comment].rstrip()\n\n\nclass JackTokenizer(object):\n cleaned_input = None\n\n def __init__(self, f):\n # read whole input into a stream\n input_stream = ''.join(\n remove_comments(line)\n for line in f\n ).replace('\\n', '').replace('\\r', '')\n # get rid of the block comments\n regex = r'(\\/\\*.*?\\*\\/)'\n input_stream += '/**/'\n spans = [m.span() for m in re.finditer(regex, input_stream)]\n cleaned_input = ''\n last_hi = 0\n for lo, hi in spans:\n cleaned_input += input_stream[last_hi:lo]\n last_hi = hi\n\n self.cleaned_input = cleaned_input.lstrip()\n\n def match_and_extract(self, token_type, advance=True):\n regex = REGEX_MAPPING[token_type]\n match = re.match(\n regex,\n self.cleaned_input\n )\n if match:\n word = match.groups()[0]\n token = cgi.escape(word).strip('\"')\n if advance:\n self.cleaned_input = self.cleaned_input.replace(\n word, '', 1\n ).lstrip()\n return (token, token_type)\n\n def _next(self, advance=True):\n for token_type in TOKEN_TYPES:\n match = self.match_and_extract(token_type, advance)\n if match:\n return match\n raise RuntimeError\n\n def next(self):\n if not self.cleaned_input:\n raise StopIteration\n\n return self._next()\n\n def peek_next(self):\n return self._next(advance=False)\n\n\nclass ParseTree(object):\n \"\"\"\n Class to store write-only Parse tree as an XML-formatted string\n \"\"\"\n TAB_SIZE = 2\n representation = None\n level = 0\n\n def __init__(self):\n self.representation = ''\n self.level = 0\n\n def append_tag_with_text(self, tag, text):\n self.representation += self.indentation\n self.representation += '<%s> %s \\n' % (tag, text, tag)\n\n def open_tag(self, tag):\n self.representation += self.indentation\n self.representation += '<%s>\\n' % tag\n self.level += 1\n\n def close_tag(self, tag):\n self.level -= 1\n self.representation += self.indentation\n self.representation += '\\n' % tag\n\n @property\n def indentation(self):\n assert self.level >= 0\n return (self.level * self.TAB_SIZE) * ' '\n\n def __str__(self):\n return self.representation\n\n def __repr__(self):\n return self.representation\n \n\nclass CompilationEngine(object):\n tokenizer = None\n\n token = None\n token_type = None\n parse_tree = None\n\n def __init__(self, f):\n self.tokenizer = JackTokenizer(f)\n self.token, self.token_type = next(self.tokenizer)\n self.parse_tree = ParseTree()\n # assign tree methods to current class\n # to avoid self.parse_tree boilerplate\n self.append_tag_with_text = self.parse_tree.append_tag_with_text\n self.open_tag = self.parse_tree.open_tag\n self.close_tag = self.parse_tree.close_tag\n self.compile_class()\n\n def eat_and_append_token(self, token_type, token):\n self.append_tag_with_text(\n token_type,\n token\n )\n self.eat(token_type, token)\n\n def compile_class(self):\n self.open_tag('class')\n self.eat_and_append_token(\n KEYWORD,\n 'class'\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n self.eat_and_append_token(\n SYMBOL,\n '{'\n )\n while self.token in ('static', 'field'):\n self.compile_class_var_dec()\n\n while self.token in ('function', 'method', 'constructor'):\n self.compile_subroutine()\n\n self.eat_and_append_token(SYMBOL, '}')\n self.close_tag('class')\n\n def compile_class_var_dec(self):\n self.open_tag('classVarDec')\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n # identifier\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n while self.token == ',':\n self.eat_and_append_token(\n SYMBOL,\n ','\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n self.eat_and_append_token(\n SYMBOL,\n ';'\n )\n self.close_tag('classVarDec')\n\n def compile_subroutine(self):\n self.open_tag('subroutineDec')\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n # return type\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n self.eat_and_append_token(\n SYMBOL,\n '('\n )\n self.compile_parameter_list()\n self.eat_and_append_token(\n SYMBOL,\n ')'\n )\n\n self.open_tag('subroutineBody')\n\n self.eat_and_append_token(\n SYMBOL,\n '{'\n )\n\n while self.token == 'var':\n self.compile_var_dec()\n\n self.compile_statements()\n\n self.eat_and_append_token(\n SYMBOL,\n '}'\n )\n self.close_tag('subroutineBody')\n self.close_tag('subroutineDec')\n\n def compile_parameter_list(self):\n self.open_tag('parameterList')\n while self.token_type in (IDENTIFIER, KEYWORD):\n self.eat_and_append_token(\n self.token_type, self.token\n )\n self.eat_and_append_token(IDENTIFIER, self.token)\n if self.token == ',':\n self.eat_and_append_token(SYMBOL, ',')\n\n self.close_tag('parameterList')\n\n def compile_var_dec(self):\n self.open_tag('varDec')\n self.eat_and_append_token(\n KEYWORD,\n 'var'\n )\n\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n while self.token == ',':\n self.eat_and_append_token(\n SYMBOL,\n ','\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n self.eat_and_append_token(\n SYMBOL, ';'\n )\n self.close_tag('varDec')\n\n def compile_statements(self):\n self.open_tag('statements')\n statement_map = {\n 'let': self.compile_let,\n 'if': self.compile_if,\n 'while': self.compile_while,\n 'do': self.compile_do,\n 'return': self.compile_return\n }\n while self.token in statement_map:\n statement_map[self.token]()\n\n self.close_tag('statements')\n\n def compile_subroutine_call(self):\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n # . or (\n self.eat_and_append_token(\n SYMBOL,\n self.token\n )\n if self.token_type == IDENTIFIER:\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n self.eat_and_append_token(\n SYMBOL, '('\n )\n self.compile_expression_list()\n self.eat_and_append_token(\n SYMBOL, ')'\n )\n\n def compile_do(self):\n self.open_tag('doStatement')\n\n self.eat_and_append_token(\n KEYWORD, 'do'\n )\n self.compile_subroutine_call()\n self.eat_and_append_token(SYMBOL, ';')\n self.close_tag('doStatement')\n\n def compile_let(self):\n self.open_tag('letStatement')\n\n self.eat_and_append_token(\n KEYWORD,\n 'let'\n )\n\n self.eat_and_append_token(\n IDENTIFIER,\n self.token\n )\n\n if self.token == '[':\n self.eat_and_append_token(\n SYMBOL, '['\n )\n self.compile_expression()\n self.eat_and_append_token(\n SYMBOL, ']'\n )\n\n self.eat_and_append_token(\n SYMBOL,\n '='\n )\n self.compile_expression()\n\n self.eat_and_append_token(\n SYMBOL,\n ';'\n )\n\n self.close_tag('letStatement')\n\n def compile_while(self):\n self.open_tag('whileStatement')\n self.eat_and_append_token(KEYWORD, 'while')\n self.eat_and_append_token(SYMBOL, '(')\n self.compile_expression()\n self.eat_and_append_token(SYMBOL, ')')\n self.eat_and_append_token(SYMBOL, '{')\n self.compile_statements()\n self.eat_and_append_token(SYMBOL, '}')\n self.close_tag('whileStatement')\n\n def compile_return(self):\n self.open_tag('returnStatement')\n self.eat_and_append_token(KEYWORD, 'return')\n if self.token != ';':\n self.compile_expression()\n self.eat_and_append_token(SYMBOL, ';')\n self.close_tag('returnStatement')\n\n def compile_if(self):\n self.open_tag('ifStatement')\n self.eat_and_append_token(KEYWORD, 'if')\n self.eat_and_append_token(SYMBOL, '(')\n self.compile_expression()\n self.eat_and_append_token(SYMBOL, ')')\n\n self.eat_and_append_token(SYMBOL, '{')\n self.compile_statements()\n self.eat_and_append_token(SYMBOL, '}')\n\n if self.token == 'else':\n self.eat_and_append_token(KEYWORD, 'else')\n self.eat_and_append_token(SYMBOL, '{')\n self.compile_statements()\n self.eat_and_append_token(SYMBOL, '}')\n\n self.close_tag('ifStatement')\n\n def compile_expression(self):\n self.open_tag('expression')\n\n self.compile_term()\n operators = map(cgi.escape, (\n '+',\n '-',\n '*',\n '/',\n '&',\n '|',\n '<',\n '>',\n '='\n ))\n while self.token in operators:\n self.eat_and_append_token(\n SYMBOL,\n self.token\n )\n self.compile_term()\n\n self.close_tag('expression')\n\n def compile_term(self):\n self.open_tag('term')\n\n ll_one_types = (\n INT_CONST,\n STRING_CONST\n )\n ll_one_constants = (\n 'true',\n 'false',\n 'null',\n 'this'\n )\n unary_operators = ('-', '~',)\n\n if self.token_type in ll_one_types or self.token in ll_one_constants:\n self.eat_and_append_token(\n self.token_type,\n self.token\n )\n elif self.token in unary_operators:\n self.eat_and_append_token(\n SYMBOL, self.token\n )\n self.compile_term()\n elif self.token_type == IDENTIFIER: # LL(2)\n token, _ = self.tokenizer.peek_next()\n if token == '[':\n self.eat_and_append_token(\n IDENTIFIER, self.token\n )\n self.eat_and_append_token(\n SYMBOL, '['\n )\n self.compile_expression()\n self.eat_and_append_token(\n SYMBOL, ']'\n )\n elif token in ('.', '('):\n self.compile_subroutine_call()\n else:\n self.eat_and_append_token(\n IDENTIFIER, self.token\n )\n elif self.token == '(':\n self.eat_and_append_token(\n SYMBOL, '('\n )\n self.compile_expression()\n self.eat_and_append_token(\n SYMBOL, ')'\n )\n else:\n raise NotImplementedError\n self.close_tag('term')\n\n def compile_expression_list(self):\n self.open_tag('expressionList')\n while self.token != ')':\n self.compile_expression()\n if self.token == ',':\n self.eat_and_append_token(\n SYMBOL,\n ','\n )\n self.close_tag('expressionList')\n\n def eat(self, token_type, token):\n if self.token != token:\n raise RuntimeError(\n 'Unexpected token '\n 'self.token: %s, argument token: %s' % (self.token, token)\n )\n if self.token_type != token_type:\n raise RuntimeError(\n 'Unexpected token_type '\n 'self.token_type: %s, argument token_type: %s' % (\n self.token_type, token_type\n )\n )\n try:\n self.token, self.token_type = next(self.tokenizer)\n except StopIteration:\n self.token = self.token_type = None\n\n\nfile_list = []\npath = sys.argv[1]\nis_dir = os.path.isdir(path)\nif not is_dir:\n file_list.append(path)\nelse:\n if not path.endswith('/'):\n path = path + '/'\n for filename in os.listdir(path):\n if filename.endswith('.jack'):\n path_to_file = os.path.join(path, filename)\n file_list.append(path_to_file)\n\n\nfor filename in file_list:\n with open(filename.replace('.jack', '.xml'), 'w') as f:\n current_file = open(filename)\n compiled = CompilationEngine(current_file)\n f.write(str(compiled.parse_tree))\n current_file.close()\n", "repo_name": "isaevpd/nand2tetris", "sub_path": "project_10/JackAnalyzer.py", "file_name": "JackAnalyzer.py", "file_ext": "py", "file_size_in_byte": 14997, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "re.finditer", "line_number": 88, "usage_type": "call"}, {"api_name": "re.match", "line_number": 99, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 105, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 464, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 571, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 578, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path", "line_number": 580, "usage_type": "attribute"}]} +{"seq_id": "26201335662", "text": "\"\"\"Provide utility function for the LivingPark notebook for paper replication.\"\"\"\nimport datetime\nimport glob\nimport math\nimport os.path\nimport pkgutil\nimport subprocess\nimport sys\nimport warnings\nfrom pprint import pprint\n\nimport nilearn.plotting as nplt\nimport numpy as np\nimport pandas as pd\nimport ppmi_downloader\nimport pytz # type: ignore\nfrom boutiques.descriptor2func import function as descriptor2func\nfrom dateutil.parser import parse # type: ignore\nfrom dateutil.relativedelta import relativedelta # type: ignore\nfrom IPython.display import HTML\nfrom IPython.display import Image as ImageDisplay\nfrom matplotlib import axes\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n\nclass LivingParkUtils:\n \"\"\"Contain functions to be reused across LivingPark notebooks.\"\"\"\n\n def __init__(\n self,\n data_cache_path: str = \".cache\",\n ) -> None:\n \"\"\"Initialize a LivingPark notebook.\n\n Parameters\n ----------\n data_cache_path: str, default \".cache\"\n Local path where to store the dataset cache.\n Keep default value unless you know what you're doing.\n \"\"\"\n self.data_cache_path = data_cache_path\n self.study_files_dir = os.path.abspath(os.path.join(\"inputs\", \"study_files\"))\n\n self.setup_notebook_cache()\n os.makedirs(self.study_files_dir, exist_ok=True)\n\n def setup_notebook_cache(self) -> None:\n \"\"\"Create, install, and update the cache directory, if needed.\n\n Notes\n -----\n Aggregate the inputs and outputs into a single dataset by creating symlinks.\n \"\"\"\n for x in (\"\", \"inputs\", \"outputs\"):\n os.makedirs(os.path.join(self.data_cache_path, x), exist_ok=True)\n\n # Make or update links to cache\n for x in [\"inputs\", \"outputs\"]:\n if os.path.islink(x):\n print(f\"removing link {x}\")\n os.remove(x)\n elif os.path.exists(x):\n raise Exception(f\"Directory {x} exists and is not a symlink.\")\n else:\n print(f\"{x} doesnt exist\")\n os.symlink(os.path.join(self.data_cache_path, x), x)\n\n def notebook_init(self) -> HTML:\n \"\"\"Initialize a paper replication notebook.\n\n It ignores cell warnings, install dependencies, show execution time, and create\n a toggle button for displaying/hiding code cells.\n\n Returns\n -------\n HTML\n An HTML button to hide/show code cells in the notebooks.\n \"\"\"\n warnings.filterwarnings(\"ignore\")\n\n print(\"Installing notebook dependencies (see log in install.log)... \")\n with open(\"install.log\", \"wb\") as fout:\n subprocess.check_call(\n [\n sys.executable,\n \"-m\",\n \"pip\",\n \"install\",\n \"-U\",\n \"-r\",\n \"requirements.txt\",\n ],\n stdout=fout,\n stderr=fout,\n )\n\n now = datetime.datetime.now(pytz.utc).strftime(\"%Y-%m-%d %H:%M:%S %Z %z\")\n print(f\"This notebook was run on {now}\")\n\n return HTML(\n filename=os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"toggle_button.html\",\n )\n )\n\n def download_ppmi_metadata(\n self,\n required_files: list,\n force: bool = False,\n headless: bool = True,\n timeout: int = 600,\n ) -> None:\n \"\"\"Download PPMI required study files, if not available.\n\n Parameters\n ----------\n required_files : list\n Required PPMI study files (cvs files) supported by ppmi_downloader.\n force : bool, optional\n headless : bool, default True\n If True, prevent broswer window to open during download.\n timeout : int, default 600\n Number of second before the download times out.\n\n Raises\n ------\n Exception:\n If failure occurs during download.\n \"\"\"\n if force:\n missing_files = required_files\n else:\n missing_files = [\n x\n for x in required_files\n if not os.path.exists(os.path.join(self.study_files_dir, x))\n ]\n\n if len(missing_files) > 0:\n pprint(f\"Downloading files: {missing_files}\")\n try:\n ppmi = ppmi_downloader.PPMIDownloader()\n ppmi.download_metadata(\n missing_files,\n destination_dir=self.study_files_dir,\n headless=headless,\n timeout=timeout,\n )\n except Exception as e:\n print(\"Download failed!\")\n raise (e)\n\n print(\"Download completed!\")\n\n else:\n print(\"Download skipped: No missing files!\")\n\n # def __install_datalad_cache(self) -> None:\n # \"\"\"Install the DataLad dataset.\n\n # Notes\n # -----\n # Requires a functional ssh connection to `self.ssh_username`@`self.host`.\n # Located at `self.host_dir`/`self.notebook_name`/`self.data_cache_path`.\n # \"\"\"\n # if os.path.exists(self.data_cache_path):\n # # noqa: TODO check if path is a valid DataLad dataset without doing d.status because it's too long.\n # d = datalad.api.Dataset(self.data_cache_path)\n # d.update(how=\"merge\")\n # else:\n # datalad.api.install(\n # source=(\n # f\"{self.ssh_username}@{self.ssh_host}:\"\n # f\"{self.ssh_host_dir}/{self.notebook_name}\"\n # ),\n # path=self.data_cache_path,\n # )\n\n def clean_protocol_description(self, desc: str) -> str:\n \"\"\"Create valid protocol description for file names (as done by PPMI).\n\n Parameters\n ----------\n str\n Protocol description. Example: \"MPRAGE GRAPPA\"\n \"\"\"\n return (\n desc.replace(\" \", \"_\").replace(\"(\", \"_\").replace(\")\", \"_\").replace(\"/\", \"_\")\n )\n\n def find_nifti_file_in_cache(\n self,\n subject_id: str,\n event_id: str,\n protocol_description: str,\n base_dir: str = \"inputs\",\n ) -> str | None:\n \"\"\"Return cached nifti files, if any.\n\n Search for nifti file matching `subject_id`, `event_id` and\n `protocol_description` in the cache directory.\n If not found, search for nifti file matching `subject_id` and `event_id` only,\n and return it if a single file is found.\n\n Parameters\n ----------\n subject_id: str\n Subject ID\n event_id: str\n Event ID. Example: BL\n protocol_description: str\n Protocol description. Example: \"MPRAGE GRAPPA\"\n base_dir: str, default \"inputs\"\n TODO Describe this. Not sure what it is exactly.\n\n Returns\n -------\n str or None\n File name matching the `subject_id`, `event_id`, and if possible\n `protocol_description`. None if no matching file is found.\n \"\"\"\n expression = os.path.join(\n self.data_cache_path,\n base_dir,\n f\"sub-{subject_id}\",\n f\"ses-{event_id}\",\n \"anat\",\n f\"PPMI_*{self.clean_protocol_description(protocol_description)}*.nii\",\n )\n files = glob.glob(expression)\n assert len(files) <= 1, f\"More than 1 Nifti file matched by {expression}\"\n if len(files) == 1:\n return files[0]\n # print(\n # \"Warning: no nifti file found for: \"\n # f\"{(subject_id, event_id, protocol_description)} with strict glob \"\n # \"expression. Trying with lenient glob expression.\"\n # )\n expression = os.path.join(\n self.data_cache_path,\n base_dir,\n f\"sub-{subject_id}\",\n f\"ses-{event_id}\",\n \"anat\",\n \"PPMI_*.nii\",\n )\n files = glob.glob(expression)\n assert len(files) <= 1, f\"More than 1 Nifti file matched by {expression}\"\n if len(files) == 1:\n return files[0]\n # print(\n # f\"Warning: no nifti file found for: \"\n # f\"{(subject_id, event_id, protocol_description)} \"\n # \"with lenient expression, returning None\"\n # )\n return None\n\n def disease_duration(self) -> pd.DataFrame:\n \"\"\"Return a DataFrame containing disease durations.\n\n Returns\n -------\n pd.DataFrame\n DataFrame containing disease durations for each (patient,event) pair found\n in \"MDS_UPDRS_Part_III.csv\".\n \"\"\"\n # Download required files\n self.download_ppmi_metadata(\n [\"MDS_UPDRS_Part_III.csv\", \"PD_Diagnosis_History.csv\"]\n )\n\n pddxdt = pd.read_csv(\n os.path.join(self.study_files_dir, \"PD_Diagnosis_History.csv\")\n )[[\"PATNO\", \"EVENT_ID\", \"PDDXDT\"]]\n pddxdt = pddxdt[(pddxdt[\"EVENT_ID\"] == \"SC\") & pddxdt[\"PDDXDT\"].notna()]\n pdxdur = pd.read_csv(\n os.path.join(self.study_files_dir, \"MDS_UPDRS_Part_III.csv\"),\n low_memory=False,\n )[[\"PATNO\", \"EVENT_ID\", \"INFODT\"]]\n\n PDDXDT_map = dict(zip(pddxdt[\"PATNO\"].values, pddxdt[\"PDDXDT\"].values))\n pdxdur[\"PDDXDT\"] = pdxdur[\"PATNO\"].map(PDDXDT_map)\n\n pdxdur[\"PDXDUR\"] = pdxdur.apply(\n lambda row: relativedelta(parse(row[\"INFODT\"]), parse(row[\"PDDXDT\"])).months\n if row[\"PDDXDT\"] is not np.nan\n else np.nan,\n axis=1,\n )\n pdxdur.drop(labels=[\"INFODT\", \"PDDXDT\"], inplace=True, axis=1)\n\n return pdxdur\n\n def moca2mmse(self, moca_score: int) -> int:\n \"\"\"Return a MMSE score given a MoCA score.\n\n Parameters\n ----------\n moca_score: int\n MoCA score\n\n Returns\n -------\n int\n MMSE score corresponding to the MoCA score\n Conversion made using Table 2 in\n https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4371590\n\n \"\"\"\n mapping = {\n 1: 6,\n 2: 9,\n 3: 11,\n 4: 12,\n 5: 13,\n 6: 14,\n 7: 15,\n 8: 15,\n 9: 16,\n 10: 17,\n 11: 18,\n 12: 18,\n 13: 19,\n 14: 20,\n 15: 21,\n 16: 22,\n 17: 22,\n 18: 23,\n 19: 24,\n 20: 25,\n 21: 26,\n 22: 26,\n 23: 27,\n 24: 28,\n 25: 28,\n 26: 29,\n 27: 29,\n 28: 30,\n 29: 30,\n 30: 30,\n }\n\n try:\n if math.isnan(moca_score):\n return np.nan\n else:\n return mapping[moca_score]\n except Exception as e:\n print(e)\n return moca_score\n\n def reformat_plot_labels(self, dist: pd.Series, ax: axes.Axes, freq: int) -> None:\n \"\"\"Reformat tick locations and labels of the x-axis on a plot.\n\n Parameters\n ----------\n dist: pd.Series\n Series representing the number of elements\n for each distinct values of a column\n ax: axes.Axes\n Matplotlib's Axes class to access figure\n elements and set the coordinate system\n freq: int\n interval between labels\n\n Returns\n -------\n None\n \"\"\"\n ax.set_xticklabels([x.removesuffix(\".0\") for x in dist.index.astype(str)])\n for label in ax.xaxis.get_ticklabels():\n try:\n if int(label.get_text()) % freq != 0:\n label.set_visible(False)\n except Exception:\n pass\n\n def download_missing_nifti_files(\n self, cohort: pd.DataFrame, link_in_outputs=False\n ) -> None:\n \"\"\"Download missing nifti files required by cohort.\n\n For each subject in cohort, look for T1-weighted nifti image file in\n notebook cache. Download all the missing files from PPMI, move them\n to notebook cache (inputs directory), and add their names to cohort.\n\n Parameters\n ----------\n cohort: pd.DataFrame\n A Pandas DataFrame containing columns PATNO (PPMI patient id), EVENT_ID\n (MRI visit, for instance 'V06'), and Description (for instance\n 'MPRAGE GRAPPA'). Can be built from the file produced by\n 'MRI metadata.ipynb'. A column 'File name' will be added to the DataFrame\n if not already present. This column\n will contain the paths of the T1-weighted nifti files associated with the\n patient, MRI visit, and protocol description.\n\n link_in_outputs: bool\n If True, create symbolic links to input nifti files in\n outputs/pre-processing. Useful for processing tools that\n write next to input files, such as SPM.\n\n Returns\n -------\n None\n \"\"\"\n # Find nifti file names in cohort\n cohort[\"File name\"] = cohort.apply(\n lambda x: self.find_nifti_file_in_cache(\n x[\"PATNO\"], x[\"EVENT_ID\"], x[\"Description\"]\n ),\n axis=1,\n )\n print(\n f\"Number of available subjects: {len(cohort[cohort['File name'].notna()])}\"\n )\n print(f\"Number of missing subjects: {len(cohort[cohort['File name'].isna()])}\")\n\n # Download missing file names\n try:\n ppmi_dl = ppmi_downloader.PPMIDownloader()\n missing_subject_ids = cohort[cohort[\"File name\"].isna()][\"PATNO\"]\n print(f\"Downloading image data of {len(missing_subject_ids)} subjects\")\n ppmi_dl.download_imaging_data(\n missing_subject_ids,\n type=\"nifti\",\n timeout=120 * len(missing_subject_ids),\n headless=False,\n )\n except Exception as e:\n print(\"Download failed!\")\n raise (e)\n\n # Find cohort file names among downloaded files\n results_path = \"outputs\"\n ppmi_fd = ppmi_downloader.PPMINiftiFileFinder()\n for _, row in cohort.iterrows():\n if row[\"File name\"] is None:\n filename = ppmi_fd.find_nifti(\n row[\"PATNO\"], row[\"EVENT_ID\"], row[\"Description\"]\n )\n if filename is None:\n print(\n \"Not found: \"\n + f\"{row['PATNO'], row['EVENT_ID'], row['Description']}\"\n )\n else: # copy file to dataset\n dest_dir = os.path.join(\n \"inputs\",\n f'sub-{row[\"PATNO\"]}',\n f'ses-{row[\"EVENT_ID\"]}',\n \"anat\",\n )\n os.makedirs(dest_dir, exist_ok=True)\n dest_file = os.path.join(dest_dir, os.path.basename(filename))\n os.rename(filename, dest_file)\n row[\"File name\"] = dest_file\n\n # Update file names in cohort\n cohort[\"File name\"] = cohort.apply(\n lambda x: self.find_nifti_file_in_cache(\n x[\"PATNO\"], x[\"EVENT_ID\"], x[\"Description\"]\n ),\n axis=1,\n )\n\n # Create symlinks to inputs if necessary\n if link_in_outputs:\n for file_name in cohort[\"File name\"].values:\n dest_dir = os.path.dirname(file_name).replace(\n os.path.join(self.data_cache_path, \"inputs\"),\n os.path.join(results_path, \"pre_processing\"),\n )\n dest_file = os.path.join(\n dest_dir,\n os.path.basename(file_name.replace(self.data_cache_path, \"\")),\n )\n if not os.path.exists(dest_file):\n # print(dest_dir, file_name, dest_file)\n os.makedirs(dest_dir, exist_ok=True)\n os.symlink(\n os.path.relpath(os.path.abspath(file_name), start=dest_file),\n dest_file,\n )\n\n def cohort_id(self, cohort: pd.DataFrame) -> str:\n \"\"\"Return a unique id for the cohort.\n\n The id is built as the hash of the sorted list of patient ids in the cohort.\n Since cohort_ids may be used to create file names, negative signs ('-')\n are replaced with underscore characters ('_') since SPM crashes on file names\n containing negative signs. Therefore, the cohort id is a string that cannot\n be cast to an integer.\n\n Parameters\n ----------\n cohort: pd.DataFrame\n A Pandas DataFrame with a column named 'PATNO'.\n\n Return\n ------\n string\n A string containing the unique id of the cohort.\n \"\"\"\n return str(hash(tuple(sorted(cohort[\"PATNO\"])))).replace(\"-\", \"_\")\n\n def write_spm_batch_files(\n self,\n template_job_filename: str,\n replaced_keys: dict,\n executable_job_file_name: str,\n ) -> None:\n \"\"\"Write SPM batch files from a template by replacing placeholder keys in it.\n\n Open the SPM batch file in template_job_filename, search and replace keys found\n in replaced_keys, and write the result in two files, a \"batch\" file and a \"job\"\n file. Output file names are built from job_file_name. Job file names must end\n with '_job.m'\n\n Parameters\n ----------\n template_job_filename: str\n File name of template SPM job. Contains placeholder keys to be replaced to\n create an executable batch. No format is specified for the keys, make sure\n that they are uniquely identified in the template file!\n\n replaced_keys: dict\n Dictionary containing keys to be replaced by values in the template job\n file. Example: {'[IMAGE]': 'inputs/sub-1234/ses-1/anat/image.nii'}. Make\n sure that the keys are present in the template job file name!\n\n executable_job_file_name: str\n File name where to write the executable job file. Must end in '_job.m'.\n An SPM batch file calling this job file will also be written with a\n '_batch.m' ending.\n\n Return\n ------\n None\n \"\"\"\n\n def replace_keys(string, replace_keys):\n for k in replace_keys:\n string = string.replace(k, replace_keys[k])\n return string\n\n # Read template file\n with open(template_job_filename) as f:\n content = f.read()\n\n assert template_job_filename.endswith(\"_job.m\")\n assert executable_job_file_name.endswith(\"_job.m\")\n\n with open(executable_job_file_name, \"w\") as f:\n f.write(replace_keys(content, replaced_keys))\n\n print(f\"Job batch file written in {os.path.basename(executable_job_file_name)}\")\n\n # Batch file\n content_batch = pkgutil.get_data(\n __name__, os.path.join(\"templates\", \"call_batch.m\")\n )\n assert content_batch is not None, \"Cannot read batch template file.\"\n content_batch_str = content_batch.decode(\"utf-8\")\n tempfile_name_batch = executable_job_file_name.replace(\"_job\", \"_batch\")\n\n with open(tempfile_name_batch, \"w\") as f:\n job_dir = os.path.dirname(os.path.abspath(executable_job_file_name))\n f.write(\n replace_keys(\n content_batch_str,\n {\n \"[BATCH]\": f\"addpath('{job_dir}')\"\n + os.linesep\n + os.path.basename(executable_job_file_name.replace(\".m\", \"\"))\n },\n )\n )\n\n print(f\"Batch file written in {os.path.basename(tempfile_name_batch)}\")\n\n def run_spm_batch_file(\n self,\n executable_job_file_name: str,\n boutiques_descriptor: str = \"zenodo.6881412\",\n force: bool = False,\n ) -> None:\n \"\"\"Run an SPM batch file using Boutiques.\n\n Requires Docker or Singularity container engines (Singularity untested yet in\n this context). Download the Boutiques descriptor from Zenodo or use the local\n file passed as argument. Download the Docker container, create a Boutiques\n invocation and run it. Write logs in log file created from\n executable_job_file_name (example: pre_processing_1234.log). If log file\n already exists, skip execution unless force is set to True.\n\n Parameters\n ----------\n executable_job_file_name: str\n An SPM job file ready to be executed.\n Example: 'code/batches/pre_processing_1234_job.m'.\n See self.write_spm_batch_files for a possible way to create such a file.\n\n\n boutiques_descriptor: str\n A Boutiques descriptor in the form of a Zenodo id, local file name, or\n JSON string. Don't modify the default value unless you know what you are\n doing.\n\n force: bool\n Force execution even if log file already exists for this execution.\n Default: False.\n\n Return\n ------\n boutiques.ExecutionOutput\n Boutiques execution output object containing exit code and various logs.\n \"\"\"\n log_dir = os.path.join(\"outputs\", \"logs\")\n os.makedirs(log_dir, exist_ok=True)\n\n log_file_name = os.path.abspath(\n os.path.join(\n log_dir,\n os.path.basename(executable_job_file_name.replace(\"_job.m\", \".log\")),\n )\n )\n spm_batch_file = executable_job_file_name.replace(\"_job\", \"_batch\")\n\n if not force:\n if os.path.exists(log_file_name):\n print(\n f\"Log file {os.path.basename(log_file_name)} exists, \"\n + \"skipping batch execution (remove file or use force=True \"\n + \"to force execution)\"\n )\n return\n else:\n print(\n f\"Log file {os.path.basename(log_file_name)} does not exist, \"\n + \"running batch\"\n )\n\n # Initialize Boutiques Python function for descriptor\n spm_batch = descriptor2func(boutiques_descriptor)\n\n output = spm_batch(\n \"launch\",\n \"-s\",\n \"-u\",\n spm_batch_file=spm_batch_file,\n log_file_name=log_file_name,\n )\n\n assert (\n output.exit_code == 0\n ), f\"Execution error, inspect output object for logs: {output}\"\n\n print(\"Execution was successful.\")\n\n return output\n\n def smwc_scan(\n self,\n tissue_class: int,\n patno: int,\n visit: str,\n pre_processing_dir: str = \"pre_processing\",\n ) -> str:\n \"\"\"Find the SPM tissue class file of patient at visit with given protocol.\n\n Scans the outputs directory for an SPM tissue class file obtained from nifti\n file of patient at visit using protocol description. To find a tissue file,\n matches glob expression\n outputs/{pre_processing_dir}sub-{patno}/ses-{visit}/anat/smwc{tissue_class}PPMI*.nii\n Returns an error if more than one file is found that matches this expression.\n\n Paramters\n ---------\n tissue_class: int\n 1 (grey matter) or 2 (white matter)\n\n patno: int\n PPMI patient identifier\n\n visit: str\n PPMI visit name. Example: 'V04'.\n\n pre_processing_dir: str\n Directory in 'outputs' where pre-processing results are stored.\n\n Return\n ------\n str: path of a tissue class file\n \"\"\"\n if tissue_class not in (1, 2):\n raise Exception(f\"Unrecognized tissue class: {tissue_class}\")\n dirname = os.path.join(\"outputs\", pre_processing_dir)\n expression = (\n f\"{dirname}/sub-{patno}/ses-{visit}/anat/smwc{tissue_class}PPMI*.nii\"\n )\n files = glob.glob(expression)\n assert (\n len(files) == 1\n ), f\"Zero or more than 1 files were matched by expression: {expression}\"\n return os.path.abspath(files[0])\n\n def export_spm_segmentations(\n self,\n cohort: pd.DataFrame,\n folder: str,\n cut_coords: tuple = (-28, -7, 17),\n force: bool = False,\n extension: str = \"png\",\n ) -> None:\n \"\"\"Export segmentation images as 2D image files.\n\n Meant to be used for quality control. make_gifs can assemble these images\n in an animate gif.\n\n Parameters\n ----------\n cohort: pd.DataFrame\n LivingPark cohort to export. Must have a column called 'File name'.\n\n folder: str\n Folder where to export the segmentation images.\n\n cut_coords: tuple\n Passed to Nilearn viewer. The MNI coordinates of the cutting plane.\n\n force: bool\n If True, force export to existing folder. Removes all the files in folder\n before writing new ones.\n\n extension: str\n Image file extension supported by Matplotlib. Example: 'png'.\n \"\"\"\n alpha = 0.5\n\n if (not force) and os.path.exists(folder):\n print(\n f\"Folder {folder} already exists, skipping image export \"\n + \" (remove folder or use --force to force).\"\n )\n return\n\n if os.path.exists(folder): # force is True\n print(f\"Folder {folder} already exists, removing its content\")\n for f in glob.glob(os.path.join(folder, \"*\")):\n os.remove(f)\n\n os.makedirs(folder, exist_ok=True)\n\n for i in range(len(cohort)):\n\n input_file = cohort[\"File name\"].values[i]\n subj_id = cohort[\"PATNO\"].values[i]\n\n output_file_name = input_file.replace(\n os.path.join(self.data_cache_path, \"inputs\"),\n os.path.join(\"outputs\", \"pre_processing\"),\n )\n output_file_c1 = output_file_name.replace(\"PPMI\", \"smwc1PPMI\")\n output_file_c2 = output_file_name.replace(\"PPMI\", \"smwc2PPMI\")\n\n fig = plt.figure()\n display = nplt.plot_anat(\n cut_coords=list(cut_coords), figure=fig, title=f\"#{i}/{len(cohort)}\"\n )\n display.add_overlay(output_file_c1, cmap=\"Reds\", threshold=0.1, alpha=alpha)\n display.add_overlay(\n output_file_c2, cmap=\"Blues\", threshold=0.1, alpha=alpha\n )\n\n os.makedirs(folder, exist_ok=True)\n plt.savefig(\n os.path.join(\n folder, f\"qc_{self.cohort_id(cohort)}_{subj_id}.{extension}\"\n )\n )\n plt.close(fig) # so as to not display the figure\n\n def make_gif(self, frame_folder: str, output_name: str = \"animation.gif\") -> None:\n \"\"\"Make gifs from a set of images located in the same folder.\n\n Parameters\n ----------\n frame_folder : str\n Folder where frames are stored. Frames must be in a format supported by PIL.\n\n output_name : str\n Base name of the gif file. Will be written in frame_folder.\n \"\"\"\n frames = [\n Image.open(image)\n for image in glob.glob(os.path.join(f\"{frame_folder}\", \"*.png\"))\n ]\n frame_one = frames[0]\n frame_one.save(\n os.path.join(frame_folder, output_name),\n format=\"GIF\",\n append_images=frames,\n save_all=True,\n duration=1000,\n loop=0,\n )\n print(f\"Wrote {os.path.join(frame_folder, output_name)}\")\n\n def qc_spm_segmentations(self, cohort) -> None:\n \"\"\"Display a gif file with SPM segmentation results from the cohort.\n\n Parameters\n ----------\n cohort: pd.DataFrame\n LivingPark cohort to QC. Must have a column called 'File name'.\n \"\"\"\n qc_dir = f\"qc_{self.cohort_id(cohort)}\"\n\n self.export_spm_segmentations(cohort, qc_dir)\n self.make_gif(qc_dir)\n return ImageDisplay(url=os.path.join(qc_dir, \"animation.gif\"))\n", "repo_name": "chelsieng/livingpark-utils", "sub_path": "livingpark_utils/livingpark_utils.py", "file_name": "livingpark_utils.py", "file_ext": "py", "file_size_in_byte": 28559, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.path.abspath", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.makedirs", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.path.islink", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 63, "usage_type": "name"}, {"api_name": "os.path.symlink", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "warnings.filterwarnings", "line_number": 80, "usage_type": "call"}, {"api_name": "subprocess.check_call", "line_number": 84, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 98, "usage_type": "attribute"}, {"api_name": "IPython.display.HTML", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 103, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 103, "usage_type": "call"}, {"api_name": "IPython.display.HTML", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 142, "usage_type": "call"}, {"api_name": "ppmi_downloader.PPMIDownloader", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 224, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 224, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 232, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 241, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 249, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 275, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 275, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 275, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 279, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 279, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 279, "usage_type": "name"}, {"api_name": "numpy.nan", "line_number": 288, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 287, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 260, "usage_type": "attribute"}, {"api_name": "math.isnan", "line_number": 346, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 347, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 354, "usage_type": "attribute"}, {"api_name": "matplotlib.axes.Axes", "line_number": 354, "usage_type": "attribute"}, {"api_name": "matplotlib.axes", "line_number": 354, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 381, "usage_type": "attribute"}, {"api_name": "ppmi_downloader.PPMIDownloader", "line_number": 423, "usage_type": "call"}, {"api_name": "ppmi_downloader.PPMINiftiFileFinder", "line_number": 438, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 450, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 450, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 450, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 456, "usage_type": "call"}, {"api_name": "os.path", "line_number": 456, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 457, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 457, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 457, "usage_type": "call"}, {"api_name": "os.path.rename", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path", "line_number": 458, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 472, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 472, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 472, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 473, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 473, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 473, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 474, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 474, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 474, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 476, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 476, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 476, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 478, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 478, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 478, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 480, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 480, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 480, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path", "line_number": 482, "usage_type": "name"}, {"api_name": "os.path.symlink", "line_number": 483, "usage_type": "call"}, {"api_name": "os.path", "line_number": 483, "usage_type": "name"}, {"api_name": "os.path.path.relpath", "line_number": 484, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 484, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 484, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 484, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 488, "usage_type": "attribute"}, {"api_name": "os.path.path.basename", "line_number": 559, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 559, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 559, "usage_type": "name"}, {"api_name": "pkgutil.get_data", "line_number": 562, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 563, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 563, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 563, "usage_type": "name"}, {"api_name": "os.path.path.dirname", "line_number": 570, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 570, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 570, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 570, "usage_type": "call"}, {"api_name": "os.path.linesep", "line_number": 576, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 576, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 577, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 577, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 577, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 582, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 621, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 621, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 621, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 622, "usage_type": "call"}, {"api_name": "os.path", "line_number": 622, "usage_type": "name"}, {"api_name": "os.path.path.abspath", "line_number": 624, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 624, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 624, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 625, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 625, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 625, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 627, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 627, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 627, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 633, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 633, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 633, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 635, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 635, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 635, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 642, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 642, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 642, "usage_type": "name"}, {"api_name": "boutiques.descriptor2func.function", "line_number": 647, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 700, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 700, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 700, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 704, "usage_type": "call"}, {"api_name": "os.path.path.abspath", "line_number": 708, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 708, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 708, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 712, "usage_type": "attribute"}, {"api_name": "os.path.path.exists", "line_number": 743, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 743, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 743, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 750, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 750, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 750, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 752, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 752, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 752, "usage_type": "name"}, {"api_name": "os.path.remove", "line_number": 753, "usage_type": "call"}, {"api_name": "os.path", "line_number": 753, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 755, "usage_type": "call"}, {"api_name": "os.path", "line_number": 755, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 763, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 763, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 763, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 764, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 764, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 764, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 769, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 769, "usage_type": "name"}, {"api_name": "nilearn.plotting.plot_anat", "line_number": 770, "usage_type": "call"}, {"api_name": "nilearn.plotting", "line_number": 770, "usage_type": "name"}, {"api_name": "os.path.makedirs", "line_number": 778, "usage_type": "call"}, {"api_name": "os.path", "line_number": 778, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 779, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 779, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 780, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 780, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 780, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 784, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 784, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 798, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 798, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 799, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 799, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 799, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 799, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 803, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 803, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 803, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 810, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 810, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 810, "usage_type": "name"}, {"api_name": "IPython.display.Image", "line_number": 824, "usage_type": "call"}, {"api_name": "os.path.path.join", "line_number": 824, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 824, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 824, "usage_type": "name"}]} +{"seq_id": "18413325665", "text": "import os\nfrom itertools import count\n\nimport requests\nfrom dotenv import load_dotenv\n\nfrom cli_parser import parse_arguments\nfrom cli_tables import make_table\nfrom salaries_calculations import predict_rub_salary_sj, calculate_average\n\n\ndef fetch_all_salaries_sj(params: dict, headers: dict) -> dict:\n url = 'https://api.superjob.ru/2.0/vacancies'\n all_salaries = []\n for page in count():\n params.update(page=page)\n page_response = requests.get(url=url, headers=headers, params=params)\n page_response.raise_for_status()\n decoded_page_response = page_response.json()\n vacancies = decoded_page_response['objects']\n for vacancy in vacancies:\n all_salaries.append(predict_rub_salary_sj(vacancy))\n if not decoded_page_response['more']:\n break\n return {\n 'vacancies_found': decoded_page_response['total'],\n 'vacancies_processed': len(all_salaries),\n 'average_salary': calculate_average(all_salaries)\n }\n\n\ndef set_sj_parameters(language: str, town: str) -> tuple[dict, dict]:\n params = {'town': town, 'keyword': f'{language} разработчик', 'count': 100}\n headers = {'X-Api-App-Id': os.getenv('SJ_API_KEY')}\n return params, headers\n\n\ndef main():\n load_dotenv()\n sj_vacancies = {}\n keywords, town = parse_arguments()\n for language in keywords:\n params, headers = set_sj_parameters(language, town)\n sj_vacancies[language] = fetch_all_salaries_sj(params, headers)\n print(make_table(sj_vacancies, title='SuperJob Analytics'))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "triple-s-rank/developers_salary_app", "sub_path": "sj_statistics.py", "file_name": "sj_statistics.py", "file_ext": "py", "file_size_in_byte": 1609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.count", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "salaries_calculations.predict_rub_salary_sj", "line_number": 22, "usage_type": "call"}, {"api_name": "salaries_calculations.calculate_average", "line_number": 28, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 34, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 39, "usage_type": "call"}, {"api_name": "cli_parser.parse_arguments", "line_number": 41, "usage_type": "call"}, {"api_name": "cli_tables.make_table", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "1613296510", "text": "import stripe\nfrom flask import Flask, render_template, request, jsonify, redirect, url_for\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm.session import sessionmaker\n\n\nfrom local import SECRET_KEY, PUBLISHABLE_KEY\n\n\napp = Flask(__name__, template_folder=\"templates\", static_folder=\"static\")\nstripe_keys = {\n 'secret_key': SECRET_KEY,\n 'publishable_key': PUBLISHABLE_KEY\n}\nstripe.api_key = stripe_keys['secret_key']\n\nBase = declarative_base()\n\n\nclass Affiliate(Base):\n __tablename__ = 'affiliates'\n name = Column(String(250), nullable=False)\n email = Column(String(250), nullable=False, primary_key=True)\n address = Column(String(500), nullable=False)\n code = Column(String(8), nullable=False, unique=True)\n count = Column(Integer, default=0)\n\n\nengine = create_engine('sqlite:///vitesse.db')\ntry:\n Base.metadata.create_all(engine)\nexcept:\n pass\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', key=stripe_keys['publishable_key'])\n\n\n@app.route('/thank-you/')\ndef thank_you(order_number):\n return render_template('thank_you.html', order_number=order_number)\n\n\n@app.route('/charge', methods=['POST'])\ndef charge():\n amount = 62900\n shipping = {\n 'address': {\n 'line1': request.form['shipping_address_line1'],\n 'city': request.form['shipping_address_city'],\n 'state': request.form['shipping_address_state'],\n 'postal_code': request.form['shipping_address_zip'],\n 'country': request.form['shipping_address_country'],\n },\n 'name': request.form['shipping_name'],\n }\n customer = stripe.Customer.create(\n email=request.form['card[name]'],\n card=request.form['id'],\n shipping=shipping,\n )\n charge = stripe.Charge.create(\n customer=customer.id,\n amount=amount,\n currency='usd',\n description='Qty 1: Vitesse Electric Longboard'\n )\n\n code = request.form.get('code')\n if code:\n a = session.query(Affiliate).filter_by(code=code).first()\n if a:\n a.count += 1\n session.add(a)\n session.commit()\n\n return jsonify(order_number=charge.id)\n\n\n@app.route('/affiliates')\ndef affiliates():\n affiliates = session.query(Affiliate).order_by('count desc').order_by('name').all()\n return render_template('affiliate.html', affiliates=affiliates)\n\n\n@app.route('/affiliates/create', methods=['POST'])\ndef affiliate_make():\n if request.form.get('password') == \"JesusIsLord\":\n data = {field: request.form[field]\n for field in ('name', 'email', 'address', 'code')}\n a = Affiliate(**data)\n session.add(a)\n try:\n session.commit()\n except Exception as e:\n session.rollback()\n return str(e)\n return redirect(url_for('affiliates'))\n", "repo_name": "exit99/longboardsite", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3037, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "local.SECRET_KEY", "line_number": 14, "usage_type": "name"}, {"api_name": "local.PUBLISHABLE_KEY", "line_number": 15, "usage_type": "name"}, {"api_name": "stripe.api_key", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 28, "usage_type": "argument"}, {"api_name": "sqlalchemy.create_engine", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.session.sessionmaker", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "stripe.Customer.create", "line_number": 63, "usage_type": "call"}, {"api_name": "stripe.Customer", "line_number": 63, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "stripe.Charge.create", "line_number": 68, "usage_type": "call"}, {"api_name": "stripe.Charge", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request.form.get", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 94, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 94, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 104, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "74778592154", "text": "\r\nimport sys\r\nimport os\r\nimport pysam\r\nimport inspect\r\nimport re\r\nimport pickle\r\nimport gzip\r\n\r\n# pysam is for reading sam, bam files.\r\n\r\n# in the bash script, \r\n# data = /mnt/c/data...\r\n# result = /mnt/c/results\r\n# we call python script out with\r\n# python script.py $data/fast.fa $results/fastres.fa\r\n# so $data.. will be the first argument -> sys.argv.[1], assigned to variable infasta\r\n\r\n\r\ndef main(genomefasta,inbam,inJunc):\r\n # input genome fasta file, return genome dictionary \r\n print(\"Read genome fasta\")\r\n gen_dict = readGenome(genomefasta)\r\n #print(gen_dict[\"SA11_S5\"]) \r\n picklefile = os.path.join(os.path.split(os.path.abspath(inbam))[0], os.path.split(os.path.splitext(os.path.abspath(inbam))[0])[1]) #creating a pcl file in the same directory as inbam, and with the same name as inbam file. First part = path, second path = name\r\n try: \r\n interLRI_list, intraLRI_listdr, SRI_list,exp_name,bam_cnt,junc_cnt,totread_bp, totread_ap, per_inter, per_intra, per_SRI = loadData(picklefile)\r\n except:\r\n print(\"Read bam file\")\r\n SRI_list, intraLRI_listdr,bam_cnt, exp_name = readBam(inbam,gen_dict)\r\n #print(\"SRI_list\", SRI_list[:20])\r\n #print(\"intraLRI_listdr\", intraLRI_listdr[:20])\r\n #print(bam_list[:1][0:])\r\n\r\n print(\"Read chimeric junction file\")\r\n interLRI_list, intraLRI_listdr,SRI_list,junc_cnt = readChimJunc(inJunc,intraLRI_listdr,SRI_list)\r\n\r\n print(\"classification summary\")\r\n totread_bp, totread_ap, per_inter, per_intra, per_SRI = summary(bam_cnt, junc_cnt, interLRI_list, intraLRI_listdr, SRI_list)\r\n\r\n saveData((interLRI_list, intraLRI_listdr, SRI_list,exp_name,bam_cnt,junc_cnt,totread_bp, totread_ap, per_inter, per_intra, per_SRI), picklefile)\r\n\r\n\r\n print(f\"For {exp_name} '\\n'total read before processing: {totread_bp} '\\n'total interactions after processing: {totread_ap} '\\n'% of inter-LRI: {per_inter} '\\n'% of intra-LRI: {per_intra}'\\n'% of SRI: {per_SRI}\")\r\n\r\n #print(f\"interlist {interLRI_list[:10]}'\\n'intralist {intraLRI_listdr[:10]}'\\n' SRI {SRI_list[:10]}\")\r\n\r\n #print(\"seq_name\",seq_name[0:5])\r\n #print(\"chim_name\",chim_name[0:5])\r\n \r\n #print(\"Find equal names\")\r\n #s = findrep(seq_name,chim_name)\r\n #print(\"s\",s)\r\n \r\n #print(type(chim_name),chim_name[0:5],type(seq_name),seq_name[0:5])\r\n #for counting the number of chimeric CIGAR\r\n #interLRI_list, intraLRI_listdr, cnt = readChimJunc(inJunc,intraLRI_listdr)\r\n #print(\"cnt\",cnt)\r\n #print(chimCIGAR)\r\n #print(\"interLRI_list\", interLRI_list)\r\n #print(\"intraLRI_listdr\", intraLRI_listdr)\r\n\r\n \r\ndef readGenome(genomefasta):\r\n # load genome fasta file, and return a dictionary\r\n with open(genomefasta, \"r\") as genfasta:\r\n gen_dict = dict()\r\n seg_name = \"\"\r\n for i,line in enumerate(genfasta):\r\n # this command convert the name \">SA11_S5 WT LC178570.1\" into \"'>SA11_s5', 'WT', LC..'\", i.e. cut at space\r\n # \">SA11_S3 WT 65433667.7\\n\"\r\n line = line.strip()\r\n # \">SA11_S3 WT 65433667.7\"\r\n if i % 2 == 0:\r\n # this command removes the > symbol in front of genome segment name (from [1:])\r\n # \">SA11_S3 WT 65433667.7\"\r\n line_list = line.split()\r\n # [\">SA11_S3\" \"WT\" \"65433667.7\"]\r\n seg_name_arrow = line_list[0]\r\n # \">SA11_S3\"\r\n seg_name = seg_name_arrow[1:]\r\n # \"SA11_S3\"\r\n else:\r\n gen_dict[seg_name] = line\r\n #seg_name is a temporary variable which holds name from the previous line\r\n return gen_dict\r\n \r\ndef readBam(inbam,gen_dict):\r\n exp_name = os.path.split(os.path.splitext(os.path.abspath(inbam))[0])[1]\r\n with pysam.AlignmentFile(inbam,\"rb\") as ib:\r\n SRI_list = []\r\n intraLRI_listdr = []\r\n seq_name = []\r\n rname = \"\"\r\n comp = ()\r\n CIGAR = \"\"\r\n bam_cnt = 0\r\n for it,line in enumerate(ib):\r\n #seq_name.append(line.query_name)\r\n bam_cnt += 1\r\n rname = line.reference_name\r\n i = int(line.reference_start) # starting position as integer 0-index\r\n i,j,k,l = readCIGAR(i,line.cigarstring)\r\n if k == 0 and l == 0:\r\n comp = (rname,i,j)\r\n SRI_list.append(comp)\r\n #print(\"bam sequence\",line.query_alignment_sequence)\r\n #print(\"indexed sequence\", comp, gen_dict[rname][i:j])\r\n elif k-j <20:\r\n comp = (rname, i,l)\r\n SRI_list.append(comp)\r\n elif j-i >= 20 and l-k >= 20:\r\n comp = (rname, i, j, k, l)\r\n intraLRI_listdr.append(comp)\r\n elif j-i >= 20 and l-k < 20:\r\n comp = (rname, i,j)\r\n SRI_list.append(comp)\r\n elif j-i < 20 and l-k >= 20:\r\n comp = (rname, k,l)\r\n SRI_list.append(comp)\r\n #print(\"bam\",bam_cnt)\r\n return SRI_list, intraLRI_listdr, bam_cnt, exp_name\r\n\r\ndef readChimJunc(inJunc, intraLRI_listdr,SRI_list):\r\n with open(inJunc, \"r\") as ij:\r\n interLRI_list = []\r\n #chim_name =[]\r\n rname1 = \"\"\r\n rname2 = \"\"\r\n comp = ()\r\n comp2 = ()\r\n comp3 = ()\r\n CIGAR1 = \"\"\r\n CIGAR2= \"\"\r\n cnt = 0\r\n junc_cnt = 0\r\n for it,line in enumerate(ij):\r\n junc_cnt += 1\r\n line = line.split()\r\n if line[0] == \"#\":\r\n continue\r\n rname1 = line[0]\r\n rname2 = line[3]\r\n #chim_name.append(line[9])\r\n rstart1 = int(line[10])\r\n rstart2 = int(line[12])\r\n CIGAR1 = line[11]\r\n CIGAR2 = line[13]\r\n ai,aj,ak,al = readCIGAR(rstart1,CIGAR1)\r\n bi,bj,bk,bl = readCIGAR(rstart2,CIGAR2)\r\n #print (rname1, CIGAR1,rstart1)\r\n if rname1 == rname2:#intraLRI & SRI\r\n if (ak,al) != (0,0) and (bk,bl) != (0,0):\r\n pass\r\n elif (ak,al) != (0,0):\r\n if ak-aj > 20: #gap >20\r\n if aj-ai >= 20 and al-ak >=20: #both match >20\r\n comp = (rname1, ai, aj, bi, bj)\r\n comp2 = (rname1, ak, al, bi,bj)\r\n comp3 = (rname1, ai, aj, ak, al)\r\n intraLRI_listdr.append(comp)\r\n intraLRI_listdr.append(comp2)\r\n intraLRI_listdr.append(comp3)\r\n elif aj-ai <20:\r\n comp = (rname1, ak,al,bi,bj)\r\n intraLRI_listdr.append(comp)\r\n elif al-ak <20:\r\n comp = (rname1,ai,aj,bi,bj)\r\n intraLRI_listdr.append(comp)\r\n else:\r\n comp = (rname1,ai,al,bi,bj)\r\n intraLRI_listdr.append(comp)\r\n SRI_list.append(rname1, ai,al)\r\n elif (bk,bl) != (0,0):\r\n if bk-bj > 20:\r\n if bj-bi >= 20 and bl-bk >=20:\r\n comp = (rname1, bi, bj, ai, aj)\r\n comp2 = (rname1, bk, bl, ai,aj)\r\n comp3 = (rname1, bi, bj, bk, bl)\r\n intraLRI_listdr.append(comp)\r\n intraLRI_listdr.append(comp2)\r\n intraLRI_listdr.append(comp3)\r\n elif bj-bi <20:\r\n comp = (rname1, bk,bl,ai,aj)\r\n intraLRI_listdr.append(comp)\r\n elif bl-bk <20:\r\n comp = (rname1,bi,bj,ai,aj)\r\n intraLRI_listdr.append(comp)\r\n else:\r\n comp = (rname1,bi,bl,ai,aj)\r\n intraLRI_listdr.append(comp)\r\n SRI_list.append(rname1, bi,bl)\r\n else:\r\n intraLRI_listdr.append((rname1,ai,aj,bi,bj))\r\n else: #interLRI\r\n if (ak,al) != (0,0) and (bk,bl) != (0,0):\r\n pass\r\n elif (ak,al) != (0,0):\r\n comp3 = (rname2, bi, bj)\r\n if ak-aj > 20 :\r\n if aj-ai >= 20 and al-ak >= 20:\r\n comp = (rname1, ai, aj)\r\n comp2 = (rname1,ak, al)\r\n intraLRI_listdr.append((rname1,ai,aj,ak,al))\r\n interLRI_list.append((comp,comp3))\r\n interLRI_list.append((comp2,comp3))\r\n elif aj-ai <20:\r\n comp = (rname1, ak,al)\r\n interLRI_list.append((comp,comp3))\r\n elif al-ak <20:\r\n comp = (rname1,ai,aj)\r\n interLRI_list.append((comp,comp3))\r\n else: \r\n comp = (rname1,ai,al)\r\n interLRI_list.append((comp,comp3))\r\n SRI_list.append(comp)\r\n elif (bk,bl) != (0,0):\r\n comp3 = (rname1, ai, aj)\r\n if bk-bj > 20 :\r\n if bj-bi >= 20 and bl-bk >= 20:\r\n comp = (rname2, bi, bj)\r\n comp2 = (rname2,bk, bl)\r\n intraLRI_listdr.append((rname2,bi,bj,bk,bl))\r\n interLRI_list.append((comp,comp3))\r\n interLRI_list.append((comp2,comp3))\r\n elif bj-bi <20:\r\n comp = (rname2, bk,bl)\r\n interLRI_list.append((comp,comp3))\r\n elif bl-bk <20:\r\n comp = (rname2,bi,bj)\r\n interLRI_list.append((comp,comp3))\r\n else: \r\n comp = (rname2,bi,bl)\r\n interLRI_list.append((comp,comp3))\r\n SRI_list.append(comp)\r\n else:\r\n comp = (rname1,ai,aj)\r\n comp3 = (rname2,bi,bj)\r\n interLRI_list.append((comp,comp3))\r\n #print(\"ju\",junc_cnt)\r\n return interLRI_list, intraLRI_listdr, SRI_list, junc_cnt\r\n\r\ndef summary(bam_cnt,junc_cnt, interLRI_list, intraLRI_listdr, SRI_list):\r\n totread_bp = int(bam_cnt)+int(junc_cnt)\r\n leninter = len(interLRI_list)\r\n lenintra = len(intraLRI_listdr)\r\n lenSRI = len(SRI_list)\r\n totread_ap = leninter + lenintra+ lenSRI\r\n per_inter = leninter*100/totread_ap\r\n per_intra = lenintra*100/totread_ap\r\n per_SRI = lenSRI*100/totread_ap\r\n return totread_bp, totread_ap, per_inter, per_intra, per_SRI\r\n\r\n\r\ndef findrep(seq_name,chim_name):\r\n seq_name.sort()\r\n chim_name.sort()\r\n c = 0\r\n s = 0\r\n l = len(seq_name)\r\n for k,name in enumerate(seq_name):\r\n print(f\"{k} of {l}\", end=\"\\r\")\r\n for i in range(c,len(chim_name)):\r\n if name < chim_name[i]:\r\n continue\r\n elif name == chim_name[i]:\r\n s += 1\r\n else:\r\n c = i\r\n break\r\n print(\"\")\r\n return s\r\n# no overlap found between bam and chimJunction\r\n\r\n\r\ndef readCIGAR(i,CIGAR): \r\n ## read CIGAR string\r\n # M = match, S = soft clipping, D = deletion, N = intron (longer deletion), I = insertion\r\n reCig = re.compile(r\"([0-9]+)(M|S|D|N|I)\")\r\n j, k, l = i, 0, 0\r\n for cig in reCig.finditer(CIGAR):\r\n val,key = int(cig.group(1)), cig.group(2)\r\n #if key == \"S\" and i == j: i += val; j += val\r\n if key in \"MD\" and k == 0: j += val\r\n elif key in \"MD\" and k != 0: l += val\r\n elif key == \"N\" and k == 0: k = j + val; l = j + val\r\n return i,j,k,l\r\n\r\ndef loadData(fname): \r\n ## load data with pickle\r\n with open(f\"{fname}.pcl\", \"r+b\") as pcl_in:\r\n pcl_data = pickle.load(pcl_in)\r\n return pcl_data\r\n\r\ndef saveData(pcl_data, fname):\r\n ## save data with pickle\r\n with open(f\"{fname}.pcl\", \"w+b\") as pcl_out:\r\n pickle.dump(pcl_data, pcl_out , protocol=4)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n genomefasta = sys.argv[1]\r\n inbam = sys.argv[2]\r\n inJunc = sys.argv[3]\r\n #print(infastq)\r\n #exit()\r\n main(genomefasta, inbam, inJunc)\r\n\r\nexit \r\n#some_dictionary[\"new key\"] = 700\r\n\r\n#line.strip() #for removing characters at the start and end", "repo_name": "AlexisTseng/SPLASH-analysis", "sub_path": "ana_draft_2.py", "file_name": "ana_draft_2.py", "file_ext": "py", "file_size_in_byte": 12708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 89, "usage_type": "call"}, {"api_name": "pysam.AlignmentFile", "line_number": 90, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 279, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 292, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 298, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 302, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 303, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 304, "usage_type": "attribute"}]} +{"seq_id": "74602872804", "text": "import json\nfrom time import time\n\n\nclass IOUnit:\n @staticmethod\n def get_name_from_topic(topic):\n return topic.split(\"/\")[1]\n\n @staticmethod\n def find():\n return \"home/+/info/status\"\n\n def __init__(self, name, mqtt):\n self.name = name\n self.mqtt = mqtt\n self.info = dict()\n self.io_state = dict()\n self.error_log = dict()\n self.info_change_cb = []\n self.io_change_cb = []\n\n def get_name(self):\n return self.name\n\n def get_type(self):\n return self.__class__.__name__\n\n def start_update(self):\n self.mqtt.subscribe(\"home/{}/info/+\".format(self.name), self._mqtt_update_info)\n self.mqtt.subscribe(\"commands/home/{}/+/output\".format(self.name), self._mqtt_update_io_request)\n self.mqtt.subscribe(\"home/{}/+/output\".format(self.name), self._mqtt_update_io_state)\n\n def set_io(self, io, state=True):\n data = {\"commands/home/{}/{}/output\".format(self.name, io): \"on\" if state else \"off\"}\n self.mqtt.publish(data)\n\n def set_info_change_callback(self, callback, property):\n self.info_change_cb.append((callback, property))\n\n def set_io_change_callback(self, callback):\n self.io_change_cb.append(callback)\n\n def _mqtt_update_info(self, topic, payload):\n prop = topic.split(\"/\")[-1]\n self.info[prop] = payload\n\n for tup in self.info_change_cb:\n if tup[1] == prop or tup[1] == \"*\":\n (tup[0])()\n\n def _mqtt_update_io_state(self, topic, payload):\n io = topic.split(\"/\")[-2]\n\n try:\n res = json.loads(payload)\n except ValueError:\n raise RuntimeError(\"Malformated json from node {}: {}\".format(self.name, payload))\n\n try:\n if res[\"status\"].lower() == \"ok\":\n value = res[\"value\"]\n else:\n s = \"Received a failed set-io command on io '{}' from node '{}'\".format(io, self.name)\n self.error_log[time()] = s\n print(s)\n except KeyError:\n raise RuntimeError(\"Missing key in IOUnit output result: {}\".format(payload))\n\n self._set_io_state(io, value)\n\n def _mqtt_update_io_request(self, topic, payload):\n io = topic.split(\"/\")[-2]\n self._set_io_state(io, payload, synced=False)\n\n '''\n Set requested value if synced = 0,\n actual value of synced = True\n '''\n def _set_io_state(self, io, value, synced=True):\n field = \"actual\" if synced else \"requested\"\n\n if not hasattr(self.io_state, io):\n self.io_state[io] = dict()\n\n if value == \"on\":\n self.io_state[io][field] = True\n elif value == \"off\":\n self.io_state[io][field] = False\n else:\n raise RuntimeError(\"Got unknown IO state '{}' from IOUnit {}\".format(value, io))\n\n for cb in self.io_change_cb:\n cb()\n", "repo_name": "daniel-falk/home-automation-central", "sub_path": "controll-panel/devices/device/IOUnit.py", "file_name": "IOUnit.py", "file_ext": "py", "file_size_in_byte": 3203, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "34455191691", "text": "from functools import partial\nfrom typing import Any, Optional\n\nfrom torchvision.prototype.transforms import ImageNetEval\nfrom torchvision.transforms.functional import InterpolationMode\n\nfrom ...models.mobilenetv2 import MobileNetV2\nfrom ._api import WeightsEnum, Weights\nfrom ._meta import _IMAGENET_CATEGORIES\nfrom ._utils import handle_legacy_interface, _ovewrite_named_param\n\n\n__all__ = [\"MobileNetV2\", \"MobileNet_V2_Weights\", \"mobilenet_v2\"]\n\n\nclass MobileNet_V2_Weights(WeightsEnum):\n ImageNet1K_V1 = Weights(\n url=\"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth\",\n transforms=partial(ImageNetEval, crop_size=224),\n meta={\n \"task\": \"image_classification\",\n \"architecture\": \"MobileNetV2\",\n \"publication_year\": 2018,\n \"num_params\": 3504872,\n \"size\": (224, 224),\n \"categories\": _IMAGENET_CATEGORIES,\n \"interpolation\": InterpolationMode.BILINEAR,\n \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2\",\n \"acc@1\": 71.878,\n \"acc@5\": 90.286,\n },\n )\n default = ImageNet1K_V1\n\n\n@handle_legacy_interface(weights=(\"pretrained\", MobileNet_V2_Weights.ImageNet1K_V1))\ndef mobilenet_v2(\n *, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any\n) -> MobileNetV2:\n weights = MobileNet_V2_Weights.verify(weights)\n\n if weights is not None:\n _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"]))\n\n model = MobileNetV2(**kwargs)\n\n if weights is not None:\n model.load_state_dict(weights.get_state_dict(progress=progress))\n\n return model\n", "repo_name": "isLinXu/DL_Frame_Models", "sub_path": "Pytorch_vision/torchvision/prototype/models/mobilenetv2.py", "file_name": "mobilenetv2.py", "file_ext": "py", "file_size_in_byte": 1711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "_api.WeightsEnum", "line_number": 16, "usage_type": "name"}, {"api_name": "_api.Weights", "line_number": 17, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.prototype.transforms.ImageNetEval", "line_number": 19, "usage_type": "argument"}, {"api_name": "_meta._IMAGENET_CATEGORIES", "line_number": 26, "usage_type": "name"}, {"api_name": "torchvision.transforms.functional.InterpolationMode.BILINEAR", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.functional.InterpolationMode", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 38, "usage_type": "name"}, {"api_name": "_utils._ovewrite_named_param", "line_number": 43, "usage_type": "call"}, {"api_name": "models.mobilenetv2.MobileNetV2", "line_number": 45, "usage_type": "call"}, {"api_name": "_utils.handle_legacy_interface", "line_number": 36, "usage_type": "call"}, {"api_name": "models.mobilenetv2.MobileNetV2", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "75366540003", "text": "\"\"\"\r\n@author: onion-nikolay\r\n\"\"\"\r\nimport numpy as np\r\nimport skimage.filters as skf\r\nimport cv2 as cv\r\nfrom inspect import getargspec\r\nfrom helpers import calculateWindow, chooseArgs\r\n\r\nBIN_METHODS_GLOBAL = ['custom', 'mean', 'median', 'otsu', 'triangle', 'li',\r\n 'isodata', 'yen']\r\nBIN_METHODS_ADAPTIVE = ['ad_mean', 'ad_gaussian', 'niblack', 'sauvola',\r\n 'bradley']\r\nBIN_METHODS = BIN_METHODS_ADAPTIVE + BIN_METHODS_GLOBAL\r\n\r\n\r\ndef binarize(image, bin_method, **kwargs):\r\n \"\"\"\\n Returns binarized image.\r\n\r\n Parameters\r\n ----------\r\n image : ndarray, image to be binarized\r\n bin_method : str, name of thresholding method. available methods:\r\n ------------\r\n Globals:\r\n custom : constant threshold (default threshold = 0.5)\r\n args: 'threshold'\r\n mean : threshold = np.mean(image)\r\n args: None\r\n median : threshold = np.median(image)\r\n args: None\r\n otsu : threshold = skimage.filters.threshold_otsu(image, **kwargs)\r\n args: 'nbins'\r\n triangle : threshold = skimage.filters.threshold_triangle(image,\r\n **kwargs)\r\n args: 'nbins'\r\n li : threshold = skimage.filters.threshold_li(image)\r\n args: None\r\n isodata : threshold = skimage.filters.threshold_isodata(image,\r\n **kwargs)\r\n args: 'nbins', 'return_all'\r\n yen: threshold = skimage.filters.threshold_yen(image, **kwargs)\r\n args: 'nbins'\r\n -----------\r\n Locals:\r\n ad_mean : threshold for each pixel is mean of window. For more\r\n imformation see:\r\n https://docs.opencv.org/3.4/d7/d4d/tutorial_py_thresholding.html\r\n args: 'window_size', 'c'\r\n ad_gaussian : in work...\r\n niblack : threshold = skimage.filters.threshold_niblack(image,\r\n **kwargs)\r\n args: 'window_size', 'k'\r\n sauvola : threshold = skimage.filters.threshold_sauvola(image,\r\n **kwargs)\r\n args: 'window_size', 'k', 'r'\r\n bradley : see https://github.com/rmtheis/bradley-adaptive-thresholding\r\n Will be removed by skimage.filters.threshold_bradley in future.\r\n args: 't'\r\n\r\n Returns\r\n -------\r\n binary_image : ndarray (dtype=np.uint8)\r\n \"\"\"\r\n# Should be improved! I've done it, because I heven't found another way.\r\n# Current problem is thresholding works good only with uint8 images and\r\n# float images with pixels, distributed from 0 to 1.\r\n def __threshold_custom(image, threshold=0.5):\r\n if type(image[0, 0]) == np.uint8:\r\n threshold *= 255\r\n return threshold\r\n\r\n# Soon it will be removed by function from skf.\r\n def __threshold_bradley(image, t=0.15):\r\n [width, height] = np.shape(image)\r\n s1 = width//8\r\n s2 = s1//2\r\n integral_image = np.zeros((width*height,))\r\n threshold_mask = np.zeros([width, height])\r\n\r\n for i in range(width):\r\n _sum = 0\r\n for j in range(height):\r\n index = j * width + i\r\n _sum += image[i, j]\r\n integral_image[index] = integral_image[index-1] + _sum\r\n\r\n for i in range(width):\r\n for j in range(height):\r\n index = j * width + i\r\n x1 = i - s2\r\n x2 = i + s2\r\n y1 = j - s2\r\n y2 = j + s2\r\n x1 = 0 if x1 < 0 else x1\r\n x2 = width-1 if x2 >= width else x2\r\n y1 = 0 if y1 < 0 else y1\r\n y2 = height-1 if y2 >= height else y2\r\n count = (x2-x1)*(y2-y1)\r\n _sum = integral_image[y2*width+x2] - \\\r\n integral_image[y1*width+x2] - \\\r\n integral_image[y2*width+x1] + \\\r\n integral_image[y1*width+x1]\r\n threshold_mask[i, j] = _sum*(1.0-t)//count\r\n return threshold_mask\r\n\r\n# There is an implementation in cv2, but it returns image, not mask, so\r\n# the slower one is used now.\r\n def __threshold_admean(image, window_size=11, c=0):\r\n shp = np.shape(image)\r\n threshold_mask = np.zeros(shp, dtype=np.uint8)\r\n for x in range(shp[0]):\r\n for y in range(shp[1]):\r\n windowed_image = calculateWindow(image, [x, y], window_size)\r\n threshold_mask[x, y] = np.mean(windowed_image) - c\r\n return threshold_mask\r\n\r\n __BIN_METHODS_FUNC = {'mean': np.mean,\r\n 'median': np.median,\r\n 'otsu': skf.threshold_otsu,\r\n 'triangle': skf.threshold_triangle,\r\n 'li': skf.threshold_li,\r\n 'isodata': skf.threshold_isodata,\r\n 'yen': skf.threshold_yen,\r\n 'niblack': skf.threshold_niblack,\r\n 'sauvola': skf.threshold_sauvola,\r\n 'bradley': __threshold_bradley,\r\n 'ad_mean': __threshold_admean,\r\n 'ad_gaussian': __threshold_admean,\r\n 'custom': __threshold_custom}\r\n\r\n if not(bin_method in BIN_METHODS):\r\n raise ValueError(\"method '{}' is not found.\".format(bin_method))\r\n return np.uint8(image)\r\n else:\r\n bin_func = __BIN_METHODS_FUNC[bin_method]\r\n try:\r\n bin_func_args = dict(zip(getargspec(bin_func)[0][1:],\r\n getargspec(bin_func)[3]))\r\n threshold = bin_func(image, **chooseArgs(bin_func_args, kwargs))\r\n except TypeError:\r\n threshold = bin_func(image)\r\n return np.uint8(image > threshold)\r\n\r\n\r\ndef quantize(input_image, color_depth=8):\r\n \"\"\"\\n Returns image with constant number of color depth.\r\n\r\n Parameters\r\n ----------\r\n imput_image : ndarray (with positive only elements)\r\n color_depth : int, output color depth (from 2 to 8 bytes, default = 8)\r\n\r\n Returns\r\n -------\r\n output_image : ndarray (dtype=np.uint8)\r\n \"\"\"\r\n input_image = input_image.astype(float)\r\n output_image = input_image/input_image.max()*(2**color_depth-1)\r\n return output_image.astype(np.uint8)\r\n\r\n\r\n# Remove dummy_sizing\r\ndef cfPreprocessing(input_images, field_color=0, **kwargs):\r\n \"\"\"\\n Returns image, preprocessed for synthes of correlation filter or\r\n for correlation pattern recognition. The main idea is to fimd max size of\r\n all images and CF and place all images on equal square fields with sizes\r\n 2^n.\r\n\r\n Parameters\r\n ----------\r\n input_images : list of ndarray\r\n field_color : int or 'mean', default=0\r\n Default field color is black. It can be a number from black to white,\r\n or 'mean' of input_image.\r\n **kwargs\r\n We use it to pass CF to the function if preprocessing is used for CPR\r\n and size of CF can be bigger then max size of test images.\r\n\r\n Returns\r\n -------\r\n output_images : list of ndarray\r\n \"\"\"\r\n dummy_sizing = False\r\n try:\r\n corr_filter = kwargs['corr_filter']\r\n max_size = np.max(np.shape(corr_filter))\r\n except KeyError:\r\n max_size = 0\r\n for set_of_images in input_images:\r\n for image in set_of_images:\r\n size = np.max(np.shape(image))\r\n if size > max_size:\r\n max_size = size\r\n sizes = [2**num for num in range(17)]\r\n max_size = min(num for num in sizes if num >= max_size)\r\n\r\n# It used for some tests, don't remove it now, until it can be useful or\r\n# some better way of testing will be used.\r\n if dummy_sizing:\r\n max_size *= 2\r\n\r\n output_images = input_images\r\n for index, set_of_images in enumerate(input_images):\r\n output_images[index] = square(set_of_images, max_size, field_color)\r\n\r\n return output_images\r\n\r\n\r\ndef square(input_images, field_size, field_color=0, centered=False):\r\n \"\"\"\\n Returns images placed on square fields.\r\n\r\n Parameters\r\n ----------\r\n input_image : list of ndarray\r\n field_size : int\r\n input_image is placed on field (field_size, field_size).\r\n Should be 2**n.\r\n field_color : int or 'mean', default=0\r\n Default field color is black. It can be a number from black to white,\r\n or 'mean' of input_image.\r\n centered : bool, default=False\r\n If True, place image to the center of field.\r\n\r\n Returns\r\n -------\r\n output_images : list of ndarray\r\n \"\"\"\r\n output_images = []\r\n for input_image in input_images:\r\n\r\n if field_color is 'mean':\r\n field_color = np.mean(input_image)\r\n output_image = np.ones((field_size, field_size))*field_color\r\n [size1, size2] = np.shape(input_image)\r\n if field_size < max([size1, size2]):\r\n raise ValueError(\"field size is less then input image size.\")\r\n if centered:\r\n x1 = int(field_size//2-size1//2)\r\n x2 = int(field_size//2+size1//2) - (size1 % 2)\r\n y1 = int(field_size//2-size2//2)\r\n y2 = int(field_size//2+size2//2) - (size2 % 2)\r\n output_image[x1:x2, y1:y2] = input_image\r\n else:\r\n output_image[:size1, :size2] = input_image\r\n output_images.append(cv.equalizeHist(output_image.astype(np.uint8)))\r\n return output_images\r\n\r\n\r\ndef cfProcessing(raw_image, processing_method, **kwargs):\r\n \"\"\"\\n This function turns raw_image of CF into image can be used for\r\n SLM output or simulation. Now it includes quantization, binarizartion and\r\n phase addition.\r\n In progress: phase output, phase-on-amplitude, noises.\r\n\r\n Parameters\r\n ----------\r\n raw_image : ndarray\r\n processing_method : int, str or list\r\n If int, should be range from 2 to 8. Use for basic amplitude images.\r\n If str, shoul be in BIN_METHODS.\r\n If list, first element should be int or str like above, second one\r\n should be in phase.PHASE_SHAPES.\r\n **kwargs\r\n Can be used for arguments of thresholding or phase surface.\r\n\r\n Returns\r\n -------\r\n processed_image : ndarray\r\n \"\"\"\r\n from phase import phase_surface\r\n if type(processing_method) is int:\r\n processed_image = quantize(raw_image, processing_method)\r\n elif type(processing_method) is str:\r\n preproc_image = quantize(raw_image, 8)\r\n processed_image = binarize(preproc_image, processing_method, **kwargs)\r\n elif type(processing_method) is list:\r\n try:\r\n processing_method_0 = int(processing_method[0])\r\n except ValueError:\r\n processing_method_0 = processing_method[0]\r\n if type(processing_method_0) is int:\r\n processed_image = quantize(raw_image, processing_method_0)\r\n else:\r\n processed_image = quantize(raw_image, 8)\r\n processed_image = binarize(processed_image, processing_method_0,\r\n **kwargs)\r\n phase = phase_surface(np.shape(processed_image), processing_method[1])\r\n processed_image = processed_image * np.exp(1j*phase)\r\n else:\r\n print(\"Error! Processed method not recognised.\")\r\n processed_image = raw_image\r\n return processed_image\r\n", "repo_name": "onion-nikolay/pattern-recognition-basics", "sub_path": "image_processing.py", "file_name": "image_processing.py", "file_ext": "py", "file_size_in_byte": 11517, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.uint8", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 111, "usage_type": "attribute"}, {"api_name": "helpers.calculateWindow", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 119, "usage_type": "attribute"}, {"api_name": "skimage.filters.threshold_otsu", "line_number": 120, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 120, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_triangle", "line_number": 121, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 121, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_li", "line_number": 122, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 122, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_isodata", "line_number": 123, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 123, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_yen", "line_number": 124, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 124, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_niblack", "line_number": 125, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 125, "usage_type": "name"}, {"api_name": "skimage.filters.threshold_sauvola", "line_number": 126, "usage_type": "attribute"}, {"api_name": "skimage.filters", "line_number": 126, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 134, "usage_type": "call"}, {"api_name": "inspect.getargspec", "line_number": 138, "usage_type": "call"}, {"api_name": "inspect.getargspec", "line_number": 139, "usage_type": "call"}, {"api_name": "helpers.chooseArgs", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 235, "usage_type": "call"}, {"api_name": "cv2.equalizeHist", "line_number": 246, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 246, "usage_type": "attribute"}, {"api_name": "phase.phase_surface", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 289, "usage_type": "call"}]} +{"seq_id": "25883999021", "text": "from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom . import models, serializers\nfrom honugram.users import models as user_models\nfrom honugram.users import serializers as user_serializers\nfrom honugram.notifications import views as notification_views\n\n# viewing all image in database.\n# this practic test. service dev is not adjust to it.\n\nclass ListAllImages(APIView):\n\n def get(self, request, format=None):\n\n print(request.scheme)\n print(request.body)\n\n all_images = models.Image.objects.all()\n\n serializer = serializers.ImageSerializer(all_images, many=True)\n\n return Response(data=serializer.data)\n\nlist_all_images_view = ListAllImages.as_view()\n\nclass ListAllComments(APIView):\n\n def get(self, request, format=None):\n\n all_comments = models.Comment.objects.all()\n\n serializer = serializers.CommentSerializer(all_comments, many=True)\n\n return Response(data=serializer.data)\n\nlist_all_comments_view = ListAllComments.as_view()\n\n\nclass ListAllLikes(APIView):\n\n def get(self, request, format=None):\n\n all_likes = models.Like.objects.all()\n\n print(request.user.website)\n\n serializer = serializers.LikeSerializer(all_likes, many=True)\n\n return Response(data=serializer.data)\n\nlist_all_likes_view = ListAllLikes.as_view()\n\nclass Images(APIView):\n def get(self, request, format=None):\n\n user = request.user\n\n following_users = user.following.all()\n followers_users = user.followers.all()\n\n print(following_users)\n print(followers_users)\n\n image_list = []\n\n for following_user in following_users:\n\n user_image = following_user.images.all()[:2]\n\n for image in user_image:\n image_list.append(image)\n \n my_images = models.Image.objects.all()[:2]\n\n for image in my_images:\n image_list.append(image)\n \n sorted_image = sorted(image_list, key=lambda image: image.created_at , reverse=True)\n print(sorted_image)\n\n serializer = serializers.ImageSerializer(sorted_image, many=True, context={\"request\": request})\n\n return Response(serializer.data)\n\n def put(self, request, format=None):\n\n user = request.user \n\n serializer = serializers.InputImageSerializer(data=request.data)\n\n if serializer.is_valid():\n\n serializer.save(creator=user)\n\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n class Meta():\n ordering = ['-created_at']\n\n# this function changed ramda experssion ( ex> 70 lines)\n# def get_key(image):\n# return image.created_at\n\nimages_view = Images.as_view()\n\nclass LikeImage(APIView):\n def get(self, request, image_id, format=None):\n\n user = request.user\n\n likes = models.Like.objects.filter(image__id=image_id)\n\n like_creator_ids = likes.values('creator_id')\n\n users = user_models.User.objects.filter(id__in=like_creator_ids)\n print(likes.values('creator_id'))\n print(users)\n\n serializer = user_serializers.ListUserSerializer(users, many=True, context={\"request\": request}) \n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n \n\n def post(self, request, image_id, format=None):\n \n user = request.user\n\n try:\n found_image = models.Image.objects.get(id=image_id)\n except models.Image.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND) \n\n try:\n preexisting_like = models.Like.objects.get(\n creator=user,\n image=found_image,\n )\n\n return Response(status=status.HTTP_304_NOT_MODIFIED)\n\n except models.Like.DoesNotExist:\n\n new_like = models.Like.objects.create(\n creator=user,\n image=found_image,\n )\n\n new_like.save()\n\n notification_views.create_notification(\n user, found_image.creator, 'like', found_image)\n \n return Response(status=status.HTTP_201_CREATED)\n\nlike_image_view = LikeImage.as_view()\n\nclass UnlikeImage(APIView):\n def delete(self, request, image_id, format=None):\n \n user = request.user\n\n try:\n found_image = models.Image.objects.get(id=image_id)\n except models.Image.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND) \n\n try:\n preexisting_like = models.Like.objects.get(\n creator=user,\n image=found_image,\n )\n\n preexisting_like.delete()\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n except models.Like.DoesNotExist:\n \n return Response(status=status.HTTP_304_NOT_MODIFIED)\n\nunlike_image_view = UnlikeImage.as_view()\n\nclass CommentOnImage(APIView):\n\n def post(self, request, image_id, format=None):\n\n user = request.user\n\n try:\n found_image = models.Image.objects.get(id=image_id)\n except models.Image.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND) \n \n serializer = serializers.CommentSerializer(data=request.data)\n\n if serializer.is_valid() :\n\n serializer.save(creator=user, image=found_image)\n\n notification_views.create_notification(\n user, found_image.creator, 'comment', found_image, serializer.data['message']\n )\n\n return Response(data=serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\ncomment_on_image_view = CommentOnImage.as_view()\n\nclass Comment(APIView):\n def delete(self, request, comment_id, format=None):\n\n user = request.user\n\n try:\n found_comment = models.Comment.objects.get(id=comment_id, creator=user\n )\n found_comment.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n except models.Comment.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\ncomment_view = Comment.as_view()\n\nclass Search(APIView):\n\n def get(self, request, format=None):\n\n hashtags = request.query_params.get('hashtags', None)\n\n if hashtags is not None:\n\n hashtags = hashtags.split(\",\")\n\n images = models.Image.objects.filter(\n tags__name__in=hashtags).distinct()\n\n serializer = serializers.ImageSerializer(\n images, many=True, context={'request': request})\n\n print(serializer.data)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n else:\n\n images = models.Image.objects.all()[:20]\n serializer = serializers.ImageSerializer(\n images, many=True, context={'request': request})\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\nsearch_view = Search.as_view()\n\nclass ModerateComment(APIView):\n def delete(self, request, image_id, comment_id, format=None):\n\n user = request.user\n\n try:\n comment_to_delete = models.Comment.obejects.get(\n id=comment_id, image__id=image_id, image__creator=user)\n\n comment_to_delete.delete()\n\n except models.Comment.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n \n\nmoderate_comment_view = ModerateComment.as_view()\n\nclass ImageDetail(APIView):\n\n def find_own_image(self, image_id, user):\n try:\n image = models.Image.objects.get(id=image_id, creator=user)\n return image\n except models.Image.DoesNotExist:\n return None\n\n def get(self, request, image_id, format=None):\n\n user = request.user\n\n try:\n image = models.Image.objects.get(id=image_id)\n except models.Image.DoesNotExist:\n print(\"not found imag!!!!\")\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n serializer = serializers.ImageSerializer(image)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, image_id, format=None):\n\n user = request.user \n\n image = self.find_own_image(image_id, user)\n\n if image is None:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n serializer = serializers.InputImageSerializer(image, data=request.data, partial=True)\n\n if serializer.is_valid():\n\n serializer.save(creator=user)\n\n return Response(data=serializer.data, status=status.HTTP_204_NO_CONTENT)\n \n else: \n\n return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, image_id, format=None):\n user = request.user \n\n image = self.find_own_image(image_id, user)\n \n if image is None:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n image.delete()\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n \nimage_detail_view = ImageDetail.as_view()", "repo_name": "honu1/honugram", "sub_path": "honugram/images/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 9421, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 50, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 54, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 96, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 96, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 99, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 99, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 99, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 110, "usage_type": "name"}, {"api_name": "honugram.users.models.User.objects.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "honugram.users.models.User", "line_number": 119, "usage_type": "attribute"}, {"api_name": "honugram.users.models", "line_number": 119, "usage_type": "name"}, {"api_name": "honugram.users.serializers.ListUserSerializer", "line_number": 123, "usage_type": "call"}, {"api_name": "honugram.users.serializers", "line_number": 123, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 125, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 125, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 125, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 135, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 135, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 135, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 143, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_304_NOT_MODIFIED", "line_number": 143, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 143, "usage_type": "name"}, {"api_name": "honugram.notifications.views.create_notification", "line_number": 154, "usage_type": "call"}, {"api_name": "honugram.notifications.views", "line_number": 154, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 157, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 157, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 157, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 161, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 169, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 169, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 169, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 179, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 179, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 179, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 183, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_304_NOT_MODIFIED", "line_number": 183, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 183, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 187, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 196, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 196, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 196, "usage_type": "name"}, {"api_name": "honugram.notifications.views.create_notification", "line_number": 204, "usage_type": "call"}, {"api_name": "honugram.notifications.views", "line_number": 204, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 208, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 208, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 208, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 210, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 210, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 210, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 214, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 223, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 223, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 223, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 225, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 225, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 225, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 229, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 247, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 247, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 247, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 254, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 254, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 254, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 258, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 270, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 270, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 270, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 272, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 272, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 272, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 277, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 294, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_404_NOT_FOUND", "line_number": 294, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 294, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 298, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 298, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 298, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 307, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 307, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 307, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 315, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 315, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 315, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 319, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 319, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 319, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 327, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 327, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 327, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 331, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_204_NO_CONTENT", "line_number": 331, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 331, "usage_type": "name"}]} +{"seq_id": "70600676965", "text": "'''\nCreated on 15 de ago de 2016\n\n@author: M146545\n'''\nfrom __future__ import print_function\nfrom polyNomial import Poly\nimport random\n\nimport string\nimport matplotlib.pyplot as plt\n#from mpl_toolkits.mplot3d import Axes3D\n\n\nclass Bug(object):\n '''\n classdocs\n '''\n def __init__(self, seed):\n self.seed = seed\n \n def y_move(self, max_x=100, plot=False):\n #d = random.randint(1,26)\n #exp = random.randint(1,100)\n d = 21\n exp = 71\n vec = []\n x=[]\n y=[]\n for k in range(100):\n values = [random.random() for i in range(d)]\n #values = [float(k/100) for i in range(d)]\n #x.append(values[0])\n #y.append(values[1])\n \n k = Poly(d=d, exp=exp)\n r, f = k.built_func(*values)\n vec.append([values[0]*max_x, r if r <= max_x else max_x]) \n #vec.append([values[0], values[1], k.built_func(*values)])\n print (vec)\n if plot:\n for i in vec:\n x.append(i[0])\n y.append(i[1])\n #z.append(i[2])\n \n xlist = x\n ylist = y\n \n plt.axis([0, max_x, 0, max_x])\n plt.plot(xlist, ylist)\n plt.show()\n\na = Bug(42)\na.y_move(plot=True)\n ", "repo_name": "TRBaldim/Studies", "sub_path": "genetic/bug.py", "file_name": "bug.py", "file_ext": "py", "file_size_in_byte": 1313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.random", "line_number": 31, "usage_type": "call"}, {"api_name": "polyNomial.Poly", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "17311630701", "text": "from mininet.topo import Topo\nimport logging\nimport os\n\nfrom mininet.net import Mininet\nfrom mininet.node import CPULimitedHost\nfrom mininet.link import TCLink\nfrom mininet.cli import CLI\n\nlogging.basicConfig(filename='./fattree.log', level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nclass FatTree( Topo ):\n \n CoreSwitchList = []\n AggSwitchList = []\n EdgeSwitchList = []\n HostList = []\n \n def __init__( self, k):\n \" Create Fat Tree topo.\"\n self.pod = k\n self.iCoreLayerSwitch = (k/2)**2\n self.iAggLayerSwitch = k*k/2\n self.iEdgeLayerSwitch = k*k/2\n self.density = k/2\n self.iHost = self.iEdgeLayerSwitch * self.density\n \n logger.debug(\"Core Layer Switch: \", self.iCoreLayerSwitch)\n logger.debug(\"Agg Layer Switch: \", self.iAggLayerSwitch)\n \n self.bw_c2a = 0.2\n self.bw_a2e = 0.1\n self.bw_h2a = 0.05\n\n # Init Topo\n Topo.__init__(self)\n \n self.createTopo()\n logger.debug(\"Finished topology creation!\")\n\n self.createLink( bw_c2a=self.bw_c2a, \n bw_a2e=self.bw_a2e, \n bw_h2a=self.bw_h2a)\n logger.debug(\"Finished adding links!\")\n\n # self.set_ovs_protocol_13()\n # logger.debug(\"OF is set to version 1.3!\") \n \n def createTopo(self):\n self.createCoreLayerSwitch(self.iCoreLayerSwitch)\n self.createAggLayerSwitch(self.iAggLayerSwitch)\n self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)\n self.createHost(self.iHost)\n\n \"\"\"\n Create Switch and Host\n \"\"\"\n\n def _addSwitch(self, number, level, switch_list):\n for x in xrange(1, number+1):\n PREFIX = str(level) + \"00\"\n if x >= int(10):\n PREFIX = str(level) + \"0\"\n switch_list.append(self.addSwitch('s' + PREFIX + str(x), stp=True, failMode='standalone'))\n\n def createCoreLayerSwitch(self, NUMBER):\n logger.debug(\"Create Core Layer\")\n self._addSwitch(NUMBER, 1, self.CoreSwitchList)\n\n def createAggLayerSwitch(self, NUMBER):\n logger.debug(\"Create Agg Layer\")\n self._addSwitch(NUMBER, 2, self.AggSwitchList)\n\n def createEdgeLayerSwitch(self, NUMBER):\n logger.debug(\"Create Edge Layer\")\n self._addSwitch(NUMBER, 3, self.EdgeSwitchList)\n\n def createHost(self, NUMBER):\n logger.debug(\"Create Host\")\n\n for x in xrange(1, NUMBER+1):\n PREFIX = \"h00\"\n if x >= int(10):\n PREFIX = \"h0\"\n elif x >= int(100):\n PREFIX = \"h\"\n\n host_ip = \"10.1.%s.0\" % x\n self.HostList.append(self.addHost(PREFIX + str(x), ip=host_ip))\n\n \"\"\"\n Add Link\n \"\"\"\n def createLink(self, bw_c2a=0.2, bw_a2e=0.1, bw_h2a=0.5):\n logger.debug(\"Add link Core to Agg.\")\n end = self.pod/2\n for x in xrange(0, self.iAggLayerSwitch, end):\n for i in xrange(0, end):\n for j in xrange(0, end):\n linkopts = dict(bw=bw_c2a) \n self.addLink(\n self.CoreSwitchList[i*end+j],\n self.AggSwitchList[x+i],\n **linkopts)\n\n logger.debug(\"Add link Agg to Edge.\")\n for x in xrange(0, self.iAggLayerSwitch, end):\n for i in xrange(0, end):\n for j in xrange(0, end):\n linkopts = dict(bw=bw_a2e) \n self.addLink(\n self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],\n **linkopts)\n\n logger.debug(\"Add link Edge to Host.\")\n for x in xrange(0, self.iEdgeLayerSwitch):\n for i in xrange(0, self.density):\n linkopts = dict(bw=bw_h2a) \n self.addLink(\n self.EdgeSwitchList[x],\n self.HostList[self.density * x + i],\n **linkopts)\n\n'''\ndef main():\n topo = FatTree(4)\n\n net = Mininet(topo=topo, host=CPULimitedHost, link = TCLink)\n net.start()\n CLI(net)\n net.stop()\n\nif __name__ == \"__main__\":\n main()\n'''\n\n", "repo_name": "ShivamNegi/MPTCP", "sub_path": "fattree.py", "file_name": "fattree.py", "file_ext": "py", "file_size_in_byte": 4153, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "mininet.topo.Topo", "line_number": 13, "usage_type": "name"}, {"api_name": "mininet.topo.Topo.__init__", "line_number": 37, "usage_type": "call"}, {"api_name": "mininet.topo.Topo", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "614217057", "text": "from flask import Flask, render_template, jsonify, request, redirect, url_for\nfrom dotenv import load_dotenv\nfrom flask_sqlalchemy import SQLAlchemy\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom config import Config\nfrom forms import TaskForm\n\nload_dotenv('./.flaskenv')\n\n# Cria a instância do aplicativo Flask e carrega as configurações do objeto Config\napp = Flask(__name__)\napp.config.from_object(Config)\n\n# Cria uma instância do SQLAlchemy para manipular o banco de dados\ndb = SQLAlchemy(app)\n\n# Define a classe de modelo para a tabela \"tasks\" usando DataClass e SQLAlchemy\n@dataclass\nclass Task(db.Model):\n id: int\n title: str\n date: datetime\n completed: bool\n\n id = db.Column(db.Integer(), primary_key=True)\n title = db.Column(db.String(140))\n date = db.Column(db.DateTime(), default=datetime.now())\n completed = db.Column(db.Boolean(), default=False)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __repr__(self):\n return f''\n\n# Define a rota da página inicial\n@app.route('/')\ndef index():\n tasks = Task.query.all()\n # Retorna uma resposta JSON se a solicitação vier de uma solicitação Ajax\n if request.headers.get('X-Requested-With') == 'XMLHttpRequest':\n return jsonify(tasks)\n # Retorna a página HTML padrão com as tarefas existentes\n return render_template('index.html')\n\n# Define a rota para criar uma nova tarefa\n@app.route('/create', methods=['POST'])\ndef create_task():\n # Obtém o input do usuário enviado como dados JSON\n user_input = request.get_json()\n\n # Valida o formulário com base no esquema definido em \"TaskForm\"\n form = TaskForm(data=user_input)\n # Se o formulário for válido, cria uma nova tarefa e adiciona ao banco de dados\n if form.validate():\n task = Task(title=form.title.data)\n print(task)\n db.session.add(task)\n db.session.commit()\n # Retorna a nova tarefa criada como uma resposta JSON\n return jsonify(task)\n\n # Se houver um erro no formulário, redireciona para a página inicial\n print('error',user_input)\n return redirect(url_for('index'))\n\n# Define a rota para excluir uma tarefa\n@app.route('/delete', methods=['POST'])\ndef delete_task():\n # Obtém o ID da tarefa enviada como dados JSON\n task_id = request.get_json().get('id')\n \n # Obtém a tarefa com o ID especificado do banco de dados e a exclui\n task = Task.query.filter_by(id=task_id).first()\n db.session.delete(task)\n db.session.commit()\n\n # Retorna uma resposta JSON de sucesso\n return jsonify({'result':'okay'}),200\n\n# Define a rota para marcar uma tarefa como concluída\n@app.route('/complete',methods=['POST'])\ndef complete_task():\n # Obtém o ID da tarefa enviada como dados JSON\n task_id = request.get_json().get('id')\n task = Task.query.filter_by(id=task_id).first()\n\n # Obtém a tarefa com o ID especificado do banco de dados e marca como concluída\n task.completed = True\n db.session.add(task)\n db.session.commit()\n\n return jsonify({'result':'okay'}),200\n\n\n@app.route('/getTasks')\ndef get_tasks():\n tasks = Task.query.all()\n return jsonify(tasks)\n \n \nif __name__ == '__main__':\n app.run()\n", "repo_name": "LimeHawk/flask-vue-todo", "sub_path": "app/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3302, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "config.Config", "line_number": 13, "usage_type": "argument"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 42, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "forms.TaskForm", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 94, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "73640576166", "text": "import logging\nfrom typing import Any, Dict, List, Optional\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.utils.dateparse import parse_date\nfrom django.utils.timezone import now\nfrom django.utils.translation import gettext as _\nfrom jutil.admin import admin_log\nfrom jutil.format import choices_label\nfrom jutil.parse import parse_datetime\nfrom jutil.xml import xml_to_dict\nfrom jsanctions.helpers import get_country_iso2_code\nfrom jsanctions.models import (\n SanctionsListFile,\n SanctionEntity,\n NameAlias,\n Remark,\n Address,\n Identification,\n SanctionListObject,\n SubjectType,\n)\n\nlogger = logging.getLogger(__name__)\n\nUN_LIST_TYPE = \"UN\"\n\nUN_XML_ARRAY_TAGS = [\n \"VALUE\",\n \"INDIVIDUAL\",\n \"INDIVIDUAL_ALIAS\",\n \"INDIVIDUAL_ADDRESS\",\n \"INDIVIDUAL_DATE_OF_BIRTH\",\n \"INDIVIDUAL_PLACE_OF_BIRTH\",\n \"INDIVIDUAL_DOCUMENT\",\n \"ENTITY\",\n \"ENTITY_ALIAS\",\n \"ENTITY_ADDRESS\",\n]\n\nUN_NAME_FIELDS = [\"FIRST_NAME\", \"SECOND_NAME\", \"THIRD_NAME\", \"FOURTH_NAME\", \"FIFTH_NAME\", \"SIXTH_NAME\"]\n\n\ndef load_un_sanction_list_as_dict(filename: str) -> Dict[str, Any]:\n with open(filename, \"rb\") as fp:\n data: Dict[str, Any] = xml_to_dict(fp.read(), array_tags=UN_XML_ARRAY_TAGS)\n return data\n\n\ndef parse_un_data_id(data: Dict[str, Any]) -> int:\n uid = data.get(\"DATAID\")\n if uid is None:\n raise ValidationError(_(\"DATAID missing\"))\n return int(uid)\n\n\ndef create_un_alias(se: SanctionEntity, **kwargs) -> Optional[NameAlias]:\n names = []\n for k in UN_NAME_FIELDS:\n if k in kwargs and kwargs[k]:\n names.append(kwargs[k])\n if not names:\n logger.warning(\"No names: %s\", kwargs)\n return None\n\n alias = NameAlias(sanction=se, logical_id=parse_un_data_id(kwargs))\n alias.title = kwargs.get(\"TITLE\") or \"\"\n alias.last_name = names.pop() or \"\"\n alias.first_name = \" \".join(names).strip()\n alias.full_clean()\n alias.save()\n return alias\n\n\ndef create_un_comments(se: SanctionEntity, **kwargs) -> List[Remark]:\n out: List[Remark] = []\n for n in range(1, 10):\n k = \"COMMENTS{}\".format(n)\n if k in kwargs and kwargs[k]:\n obj_out = Remark(container=se, text=kwargs.get(k) or \"\") # type: ignore\n obj_out.full_clean()\n obj_out.save()\n out.append(obj_out)\n else:\n break\n return out\n\n\ndef create_un_note(obj: SanctionListObject, note: Any):\n if note:\n remark = Remark(container=obj, text=str(note))\n remark.full_clean()\n remark.save()\n\n\ndef create_un_address(se: SanctionEntity, **kwargs) -> Address:\n # {'STATE_PROVINCE', 'NOTE', 'COUNTRY', 'STREET', 'CITY', 'ZIP_CODE'}\n address = Address(sanction=se)\n address.region = kwargs.get(\"STATE_PROVINCE\") or \"\"\n address.city = kwargs.get(\"CITY\") or \"\"\n address.zip_code = kwargs.get(\"ZIP_CODE\") or \"\"\n address.country_description = kwargs.get(\"COUNTRY\") or \"\"\n address.street = kwargs.get(\"STREET\") or \"\"\n for k, v in kwargs.items():\n if hasattr(address, k):\n setattr(address, k, v)\n address.full_clean()\n address.save()\n create_un_note(address, kwargs.get(\"NOTE\"))\n return address\n\n\ndef create_un_document(se: SanctionEntity, **kwargs) -> Identification:\n # {'DATE_OF_ISSUE', 'NUMBER', 'NOTE', 'ISSUING_COUNTRY', 'CITY_OF_ISSUE', 'COUNTRY_OF_ISSUE',\n # 'TYPE_OF_DOCUMENT', 'TYPE_OF_DOCUMENT2'}\n id_obj = Identification(sanction=se)\n id_obj.identification_type_description = kwargs.get(\"TYPE_OF_DOCUMENT\") or kwargs.get(\"TYPE_OF_DOCUMENT2\") or \"\"\n id_obj.issue_date = parse_date(str(kwargs.get(\"DATE_OF_ISSUE\"))) if kwargs.get(\"DATE_OF_ISSUE\") else None # type: ignore\n id_obj.latin_number = kwargs.get(\"NUMBER\") or \"\"\n id_obj.issued_by = \"{} {} {}\".format(kwargs.get(\"CITY_OF_ISSUE\") or \"\", kwargs.get(\"COUNTRY_OF_ISSUE\") or \"\", kwargs.get(\"ISSUING_COUNTRY\") or \"\").strip()\n id_obj.country_description = kwargs.get(\"COUNTRY_OF_ISSUE\") or kwargs.get(\"ISSUING_COUNTRY\") or \"\"\n id_obj.full_clean()\n id_obj.save()\n create_un_note(id_obj, kwargs.get(\"NOTE\"))\n return id_obj\n\n\ndef set_un_members( # noqa\n se: SanctionEntity,\n data: Dict[str, Any],\n verbose: bool = False,\n padding: int = 0,\n):\n # DATAID\n se.logical_id = parse_un_data_id(data)\n\n # FIRST_NAME, ...\n create_un_alias(se, **data)\n\n # COMMENTSx\n create_un_comments(se, **data)\n\n # INVIDUAL_ADDRESS / ENTITY_ADDRESS\n address_list = data.get(\"INVIDUAL_ADDRESS\", []) or data.get(\"ENTITY_ADDRESS\", [])\n addresses: List[Address] = []\n if address_list:\n for e_data in address_list:\n if e_data:\n addresses.append(create_un_address(se, **e_data))\n\n # try to fill address information from UN list name\n if not addresses:\n un_list_type = data.get(\"UN_LIST_TYPE\")\n if un_list_type:\n country_code = get_country_iso2_code(un_list_type)\n if country_code:\n create_un_address(se, country_description=un_list_type, country_code=country_code)\n\n # INDIVIDUAL_DOCUMENT\n docs = data.get(\"INDIVIDUAL_DOCUMENT\")\n if docs:\n for e_data in docs:\n if e_data:\n create_un_document(se, **e_data)\n\n se.full_clean()\n se.save()\n if verbose:\n logger.debug(\"%sSaved %s\", padding * \" \", se)\n\n\ndef import_un_sanctions(source: SanctionsListFile, verbose: bool = False):\n data = load_un_sanction_list_as_dict(source.full_path)\n generation_date_str = data.get(\"@dateGenerated\") or data.get(\"@generationDate\")\n if not generation_date_str:\n raise Exception(\"Generation date missing\")\n source.generation_date = parse_datetime(generation_date_str).date()\n\n enterprise, created = SubjectType.objects.get_or_create(classification_code=SubjectType.ENTERPRISE)\n assert isinstance(enterprise, SubjectType)\n if created or not enterprise.code:\n enterprise.code = choices_label(SubjectType.CLASSIFICATION_CODES, enterprise.classification_code)\n enterprise.save()\n person, created = SubjectType.objects.get_or_create(classification_code=SubjectType.PERSON)\n assert isinstance(person, SubjectType)\n if created or not person.code:\n person.code = choices_label(SubjectType.CLASSIFICATION_CODES, person.classification_code)\n person.save()\n\n t0 = now()\n individuals_list = data.get(\"INDIVIDUALS\", {}).get(\"INDIVIDUAL\")\n for se_data in individuals_list:\n assert isinstance(se_data, dict)\n if verbose:\n logger.debug(\" sdnEntry uid %s\", se_data.get(\"uid\"))\n with transaction.atomic():\n se = SanctionEntity.objects.create(source=source, data=se_data, subject_type=person)\n set_un_members(se, se_data, verbose=verbose, padding=4)\n\n entities_list = data.get(\"ENTITIES\", {}).get(\"ENTITY\")\n for se_data in entities_list:\n assert isinstance(se_data, dict)\n if verbose:\n logger.debug(\" sdnEntry uid %s\", se_data.get(\"uid\"))\n with transaction.atomic():\n se = SanctionEntity.objects.create(source=source, data=se_data, subject_type=enterprise)\n set_un_members(se, se_data, verbose=verbose, padding=4)\n\n source.imported = now()\n source.save()\n msg = \"Imported {} sanction entities and {} individuals from {} in {}\".format(\n len(entities_list), len(individuals_list), source.full_path, source.imported - t0\n )\n logger.info(msg)\n admin_log([source], msg)\n", "repo_name": "kajala/django-jsanctions", "sub_path": "jsanctions/un.py", "file_name": "un.py", "file_ext": "py", "file_size_in_byte": 7540, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 24, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 46, "usage_type": "name"}, {"api_name": "jutil.xml.xml_to_dict", "line_number": 46, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 50, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 53, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 53, "usage_type": "call"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 57, "usage_type": "name"}, {"api_name": "jsanctions.models.NameAlias", "line_number": 66, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 57, "usage_type": "name"}, {"api_name": "jsanctions.models.NameAlias", "line_number": 57, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 76, "usage_type": "name"}, {"api_name": "jsanctions.models.Remark", "line_number": 76, "usage_type": "name"}, {"api_name": "jsanctions.models.Remark", "line_number": 80, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 75, "usage_type": "name"}, {"api_name": "jsanctions.models.Remark", "line_number": 75, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionListObject", "line_number": 89, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 89, "usage_type": "name"}, {"api_name": "jsanctions.models.Remark", "line_number": 91, "usage_type": "call"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 96, "usage_type": "name"}, {"api_name": "jsanctions.models.Address", "line_number": 98, "usage_type": "call"}, {"api_name": "jsanctions.models.Address", "line_number": 96, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 113, "usage_type": "name"}, {"api_name": "jsanctions.models.Identification", "line_number": 116, "usage_type": "call"}, {"api_name": "django.utils.dateparse.parse_date", "line_number": 118, "usage_type": "call"}, {"api_name": "jsanctions.models.Identification", "line_number": 113, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 145, "usage_type": "name"}, {"api_name": "jsanctions.models.Address", "line_number": 145, "usage_type": "name"}, {"api_name": "jsanctions.helpers.get_country_iso2_code", "line_number": 155, "usage_type": "call"}, {"api_name": "jsanctions.models.SanctionsListFile", "line_number": 172, "usage_type": "name"}, {"api_name": "jutil.parse.parse_datetime", "line_number": 177, "usage_type": "call"}, {"api_name": "jsanctions.models.SubjectType.objects.get_or_create", "line_number": 179, "usage_type": "call"}, {"api_name": "jsanctions.models.SubjectType.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 179, "usage_type": "name"}, {"api_name": "jsanctions.models.SubjectType.ENTERPRISE", "line_number": 179, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 180, "usage_type": "argument"}, {"api_name": "jutil.format.choices_label", "line_number": 182, "usage_type": "call"}, {"api_name": "jsanctions.models.SubjectType.CLASSIFICATION_CODES", "line_number": 182, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 182, "usage_type": "name"}, {"api_name": "jsanctions.models.SubjectType.objects.get_or_create", "line_number": 184, "usage_type": "call"}, {"api_name": "jsanctions.models.SubjectType.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 184, "usage_type": "name"}, {"api_name": "jsanctions.models.SubjectType.PERSON", "line_number": 184, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 185, "usage_type": "argument"}, {"api_name": "jutil.format.choices_label", "line_number": 187, "usage_type": "call"}, {"api_name": "jsanctions.models.SubjectType.CLASSIFICATION_CODES", "line_number": 187, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SubjectType", "line_number": 187, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 190, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 196, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 196, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionEntity.objects.create", "line_number": 197, "usage_type": "call"}, {"api_name": "jsanctions.models.SanctionEntity.objects", "line_number": 197, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 197, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 205, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 205, "usage_type": "name"}, {"api_name": "jsanctions.models.SanctionEntity.objects.create", "line_number": 206, "usage_type": "call"}, {"api_name": "jsanctions.models.SanctionEntity.objects", "line_number": 206, "usage_type": "attribute"}, {"api_name": "jsanctions.models.SanctionEntity", "line_number": 206, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 209, "usage_type": "call"}, {"api_name": "jutil.admin.admin_log", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "17326078779", "text": "\"\"\"Set up Navigation, ACL & Logos\"\"\"\n\nimport mara_pipelines\nimport mara_data_explorer\nimport flask\nimport mara_acl\nimport mara_acl.users\nimport mara_app\nimport mara_app.layout\nimport mara_db\nimport mara_page.acl\nfrom mara_app import monkey_patch\nfrom mara_page import acl\nfrom mara_page import navigation\n\nfrom app.ui import start_page\n\nblueprint = flask.Blueprint('ui', __name__, url_prefix='/ui', static_folder='static')\n\n\ndef MARA_FLASK_BLUEPRINTS():\n return [start_page.blueprint, blueprint]\n\n\n# replace logo and favicon\nmonkey_patch.patch(mara_app.config.favicon_url)(lambda: flask.url_for('ui.static', filename='favicon.ico'))\nmonkey_patch.patch(mara_app.config.logo_url)(lambda: flask.url_for('ui.static', filename='logo.png'))\n\n\n# add custom css\n@monkey_patch.wrap(mara_app.layout.css_files)\ndef css_files(original_function, response):\n files = original_function(response)\n files.append(flask.url_for('ui.static', filename='styles.css'))\n return files\n\n\n# define protected ACL resources\n@monkey_patch.patch(mara_acl.config.resources)\ndef acl_resources():\n return [acl.AclResource(name='Documentation',\n children=[mara_pipelines.MARA_ACL_RESOURCES().get('Pipelines'),\n mara_db.MARA_ACL_RESOURCES().get('DB Schema')]),\n acl.AclResource(name='Data',\n children=mara_data_explorer.MARA_ACL_RESOURCES().values()),\n acl.AclResource(name='Admin',\n children=[mara_app.MARA_ACL_RESOURCES().get('Configuration'),\n mara_acl.MARA_ACL_RESOURCES().get('Acl')])]\n\n\n# activate ACL\nmonkey_patch.patch(mara_page.acl.current_user_email)(mara_acl.users.current_user_email)\nmonkey_patch.patch(mara_page.acl.current_user_has_permissions)(mara_acl.permissions.current_user_has_permissions)\nmonkey_patch.patch(mara_page.acl.user_has_permissions)(mara_acl.permissions.user_has_permissions)\n\nmonkey_patch.patch(mara_acl.config.whitelisted_uris)(lambda: ['/mara-app/navigation-bar'])\n\n\n# navigation bar (other navigation entries will be automatically added)\n@monkey_patch.patch(mara_app.config.navigation_root)\ndef navigation_root() -> navigation.NavigationEntry:\n return navigation.NavigationEntry(label='Root', children=[\n mara_pipelines.MARA_NAVIGATION_ENTRIES().get('Pipelines'),\n mara_data_explorer.MARA_NAVIGATION_ENTRIES().get('Explore'),\n mara_db.MARA_NAVIGATION_ENTRIES().get('DB Schema'),\n navigation.NavigationEntry(\n 'Settings', icon='cog', description='ACL & Configuration', rank=100,\n children=[mara_app.MARA_NAVIGATION_ENTRIES().get('Package Configs'),\n mara_acl.MARA_NAVIGATION_ENTRIES().get('Acl')])])\n", "repo_name": "mara/mara-example-project-2", "sub_path": "app/ui/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 174, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 18, "usage_type": "call"}, {"api_name": "app.ui.start_page.blueprint", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.ui.start_page", "line_number": 22, "usage_type": "name"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 26, "usage_type": "name"}, {"api_name": "mara_app.config", "line_number": 26, "usage_type": "attribute"}, {"api_name": "flask.url_for", "line_number": 26, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 27, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 27, "usage_type": "name"}, {"api_name": "mara_app.config", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 34, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch.wrap", "line_number": 31, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 31, "usage_type": "name"}, {"api_name": "mara_app.layout", "line_number": 31, "usage_type": "attribute"}, {"api_name": "mara_page.acl.AclResource", "line_number": 41, "usage_type": "call"}, {"api_name": "mara_page.acl", "line_number": 41, "usage_type": "name"}, {"api_name": "mara_pipelines.MARA_ACL_RESOURCES", "line_number": 42, "usage_type": "call"}, {"api_name": "mara_db.MARA_ACL_RESOURCES", "line_number": 43, "usage_type": "call"}, {"api_name": "mara_page.acl.AclResource", "line_number": 44, "usage_type": "call"}, {"api_name": "mara_page.acl", "line_number": 44, "usage_type": "name"}, {"api_name": "mara_data_explorer.MARA_ACL_RESOURCES", "line_number": 45, "usage_type": "call"}, {"api_name": "mara_page.acl.AclResource", "line_number": 46, "usage_type": "call"}, {"api_name": "mara_page.acl", "line_number": 46, "usage_type": "name"}, {"api_name": "mara_app.MARA_ACL_RESOURCES", "line_number": 47, "usage_type": "call"}, {"api_name": "mara_acl.MARA_ACL_RESOURCES", "line_number": 48, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 39, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 39, "usage_type": "name"}, {"api_name": "mara_acl.config", "line_number": 39, "usage_type": "attribute"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 52, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 52, "usage_type": "name"}, {"api_name": "mara_page.acl.acl", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mara_page.acl", "line_number": 52, "usage_type": "name"}, {"api_name": "mara_acl.users", "line_number": 52, "usage_type": "attribute"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 53, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 53, "usage_type": "name"}, {"api_name": "mara_page.acl.acl", "line_number": 53, "usage_type": "attribute"}, {"api_name": "mara_page.acl", "line_number": 53, "usage_type": "name"}, {"api_name": "mara_acl.permissions", "line_number": 53, "usage_type": "attribute"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 54, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 54, "usage_type": "name"}, {"api_name": "mara_page.acl.acl", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mara_page.acl", "line_number": 54, "usage_type": "name"}, {"api_name": "mara_acl.permissions", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 56, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 56, "usage_type": "name"}, {"api_name": "mara_acl.config", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mara_page.navigation.NavigationEntry", "line_number": 62, "usage_type": "call"}, {"api_name": "mara_page.navigation", "line_number": 62, "usage_type": "name"}, {"api_name": "mara_pipelines.MARA_NAVIGATION_ENTRIES", "line_number": 63, "usage_type": "call"}, {"api_name": "mara_data_explorer.MARA_NAVIGATION_ENTRIES", "line_number": 64, "usage_type": "call"}, {"api_name": "mara_db.MARA_NAVIGATION_ENTRIES", "line_number": 65, "usage_type": "call"}, {"api_name": "mara_page.navigation.NavigationEntry", "line_number": 66, "usage_type": "call"}, {"api_name": "mara_page.navigation", "line_number": 66, "usage_type": "name"}, {"api_name": "mara_app.MARA_NAVIGATION_ENTRIES", "line_number": 68, "usage_type": "call"}, {"api_name": "mara_acl.MARA_NAVIGATION_ENTRIES", "line_number": 69, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch.patch", "line_number": 60, "usage_type": "call"}, {"api_name": "mara_app.monkey_patch", "line_number": 60, "usage_type": "name"}, {"api_name": "mara_app.config", "line_number": 60, "usage_type": "attribute"}, {"api_name": "mara_page.navigation.NavigationEntry", "line_number": 61, "usage_type": "attribute"}, {"api_name": "mara_page.navigation", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "16172044483", "text": "import h5py\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\nimport os\n\nf = h5py.File('train/digitStruct.mat', 'r')\nbboxs = f['digitStruct/bbox']\nnames = f['digitStruct/name']\n\n\ndef get_img_boxes(f, annotations, idx=0):\n ann = {key: [] for key in ['height', 'left', 'top', 'width', 'label']}\n meta = {}\n box = f[bboxs[idx][0]]\n name = f[names[idx][0]]\n for key in box.keys():\n if box[key].shape[0] == 1:\n ann[key].append(float(box[key][0][0]))\n else:\n for i in range(box[key].shape[0]):\n ann[key].append(float(f[box[key][i][0]][()].item()))\n\n file_name = ''.join([chr(v) for v in name])\n img = Image.open('./train/' + file_name)\n meta['filename'] = os.path.splitext(file_name)[0]\n meta['width'] = img.width\n meta['height'] = img.height\n obj_count = 0\n for left, top, width, height, label in zip(ann['left'], ann['top'],\n ann['width'], ann['height'],\n ann['label']):\n meta['x0'] = left\n meta['y0'] = top\n meta['x1'] = left + width\n meta['y1'] = top + height\n meta['label'] = int(label)\n annotations = annotations.append(pd.DataFrame(meta, index=[0]))\n\n return annotations\n\n\nannotations = pd.DataFrame(\n columns=[\"filename\", \"width\", \"height\", \"x0\", \"y0\", \"x1\", \"y1\", \"label\"])\n\nfor i in range(0, 10):\n annotations = get_img_boxes(f, annotations, i)\nannotations.to_csv(\"train/train_ann.csv\", index=False)", "repo_name": "sweiichen/digits-detection", "sub_path": "generate_csv_data.py", "file_name": "generate_csv_data.py", "file_ext": "py", "file_size_in_byte": 1544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "h5py.File", "line_number": 7, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "10112806117", "text": "from torch.utils.data import Dataset\nfrom glob import glob\nimport numpy as np\nimport os\n\n\nclass SpeechDataset(Dataset):\n def __init__(self, feature_dir, features_to_use):\n super().__init__()\n self.metadata = self.generate_metadata(feature_dir, features_to_use)\n self.features_to_use = features_to_use\n\n def generate_metadata(self, feature_dir, features):\n\n first_feature = features[0]\n\n feat_path_list = glob(os.path.join(feature_dir, first_feature, '*.npy'))\n basename_list = [os.path.basename(fp) for fp in feat_path_list]\n\n metadata = []\n\n for basename in basename_list:\n meta = {}\n meta['speaker'] = basename.split('_')[0]\n for feat in features:\n meta[feat] = os.path.join(feature_dir, feat, basename)\n metadata.append(meta)\n\n print(f\"Loaded total {len(metadata)} features - What kind of features: {features}\")\n return metadata\n\n def __len__(self):\n return len(self.metadata)\n\n def __getitem__(self, index):\n data = {\n 'speaker': self.metadata[index]['speaker']\n }\n for feat in self.features_to_use:\n data[feat] = np.load(self.metadata[index][feat], allow_pickle=True).item()[feat]\n return data\n", "repo_name": "prairie-schooner/wav2vec-vc", "sub_path": "dataload/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 1297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 7, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "10427675892", "text": "import numpy as np\nfrom sklearn.cluster import MeanShift\nfrom ipywidgets import interact, FloatSlider, fixed\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import figaspect\nfrom matplotlib.patches import Circle, Patch\nimport seaborn as sns\n\nresolution = 200\n\n\ndef plot(bandwidth, df):\n x = df.values\n model = MeanShift(bandwidth=bandwidth, n_jobs=-1)\n model.fit(x)\n plt.figure(figsize=figaspect(1))\n ax = plt.axes()\n ax.set(aspect='equal')\n df.plot.scatter(df.columns[0], df.columns[1], c='black', s=10, ax=ax)\n xlim, ylim = ax.get_xlim(), ax.get_ylim()\n side = max(xlim[1] - xlim[0], ylim[1] - ylim[0])\n x_center, y_center = sum(xlim) / 2, sum(ylim) / 2\n xx, yy = np.meshgrid(\n np.linspace(x_center - side / 2, x_center + side / 2, resolution),\n np.linspace(y_center - side / 2, y_center + side / 2, resolution))\n grid = np.c_[xx.ravel(), yy.ravel()]\n pred = model.predict(grid)\n palette = sns.color_palette()\n ax.scatter(\n xx,\n yy,\n c=[palette[i] for i in pred],\n marker='.',\n alpha=0.2,\n edgecolors='none')\n centers = model.cluster_centers_\n ax.scatter(\n centers[:, 0],\n centers[:, 1],\n marker='x',\n s=100,\n c='blue',\n label='クラスタ中心')\n for center in centers:\n ax.add_patch(\n Circle(\n xy=center,\n radius=bandwidth,\n fill=False,\n edgecolor='black',\n linestyle='--'))\n handles, labels = ax.get_legend_handles_labels()\n handles.extend([Patch(color=palette[i]) for i in range(len(centers))])\n labels.extend([f'クラスタ{i + 1}' for i in range(len(centers))])\n ax.legend(handles, labels)\n ax.set(\n xlim=(xx.min(), xx.max()),\n ylim=(yy.min(), yy.max()),\n xticks=(),\n yticks=())\n plt.show()\n\n\ndef show(df):\n bandwidth = FloatSlider(\n value=0.5,\n min=0.3,\n max=0.9,\n step=0.1,\n description='距離',\n readout_format='.1f',\n continuous_update=False)\n interact(plot, bandwidth=bandwidth, df=fixed(df))\n", "repo_name": "ScenesK/data-science-lecture", "sub_path": "workspace/beginner/my_functions/mean_shift/visualization.py", "file_name": "visualization.py", "file_ext": "py", "file_size_in_byte": 2163, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "sklearn.cluster.MeanShift", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.figure.figaspect", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 26, "usage_type": "attribute"}, {"api_name": "seaborn.color_palette", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.patches.Circle", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.patches.Patch", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "ipywidgets.FloatSlider", "line_number": 65, "usage_type": "call"}, {"api_name": "ipywidgets.interact", "line_number": 73, "usage_type": "call"}, {"api_name": "ipywidgets.fixed", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "73810198564", "text": "import numpy as np\nfrom gym.envs.robotics import rotations, robot_env, utils\nimport math\nimport mujoco_py\nimport os\nimport xml.etree.ElementTree as et\nimport gym\nfrom gym import error, spaces\nfrom gym.utils import seeding\nimport copy\nimport env.robot.gym_utils as utils # Modified some gym utils to incorporate multiple bodies in mocap\nimport cv2\n\nimport time\nimport matplotlib.pyplot as plt\nimport torch\n\nDEFAULT_SIZE = 500\n\ndef get_full_asset_path(relative_path):\n return os.path.join(os.path.dirname(__file__), 'assets', relative_path)\n\nclass BaseEnv(robot_env.RobotEnv):\n \"\"\"Superclass for all robot environments.\n \"\"\"\n def __init__(\n\n self, model_path, cameras, n_substeps=20, gripper_rotation=[0,1,0,0], \n has_object=False, image_size=84, reset_free=False, distance_threshold=0.01, action_penalty=0,\n observation_type='state+image', reward_type='dense', reward_bonus=True, use_xyz=False, action_scale=0.05, render=False\n ):\n \"\"\"Initializes a new robot environment.\n Args:\n model_path (string): path to the environments XML file\n cameras (int): Camera/s to be used. Description in src/arguments.py\n n_substeps (int): number of substeps the simulation runs on every call to step\n gripper_rotation (array): fixed rotation of the end effector, expressed as a quaternion\n has_object (boolean): whether or not the environment has an object\n image_size (int): size of image observations, if applicable\n reset_free (boolean): whether the arm configuration is reset after each episode\n distance_threshold (float): the threshold after which a goal is considered achieved\n action_penalty (float): scalar multiplier that penalizes high magnitude actions\n observation_type ('image' or 'state+image'): the observation type\n reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense\n reward_bonus (boolean): whether bonuses should be given for subgoals (only for dense rewards)\n use_xyz (boolean): whether movement is in 3d (xyz) or 2d (xy)\n\t\t\taction_scale (float): coefficient that scales scale position change\n \"\"\"\n self.xml_dir = '/'.join(model_path.split('/')[:-1])\n self.reference_xml = et.parse(model_path)\n self.root = self.reference_xml.getroot()\n self.n_substeps = n_substeps\n self.gripper_rotation = np.array(gripper_rotation, dtype=np.float32)\n self.has_object = has_object\n self.distance_threshold = distance_threshold\n self.action_penalty = action_penalty\n self.observation_type = observation_type\n self.reward_type = reward_type\n self.image_size = image_size\n self.reset_free = reset_free\n self.reward_bonus = reward_bonus\n self.use_xyz = use_xyz\n self.action_scale = action_scale\n self.closed_angle = 0\n\n # Robot workspace configurations\n self.center_of_table = np.array([1.655, 0.3, 0.53625])\n self.default_z_offset = 0.04\n self.max_z = 1.0\n self.min_z = 0.6\n self.state_dim = 4 if use_xyz else 3\n\n self.state_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.state_dim,), dtype=np.float32)\n self.state_space_shape = self.state_space.shape\n\n self.render_for_human = render\n self.cameras = cameras\n\n super(BaseEnv, self).__init__(\n model_path=model_path, n_substeps=n_substeps, n_actions=4,\n initial_qpos={})\n \n def goal_distance(self, goal1, goal2, use_xyz):\n assert goal1.shape == goal2.shape\n goal1 = np.around(goal1, 4)\n goal2 = np.around(goal2, 4)\n return np.linalg.norm(goal1 - goal2, axis=-1)\n\n # GoalEnv methods\n # ----------------------------\n def compute_reward(self, achieved_goal, goal, info):\n raise NotImplementedError('Reward signal has not been implemented for this task!')\n\n # Gripper helper\n # ----------------------------\n def _gripper_sync(self):\n self.sim.data.qpos[10] = 0.1\n\n self.sim.data.qpos[12] = -0.5\n\n # print(self.sim.data.qpos)\n # print('shape', self.sim.data.qpos.shape)\n \n def _gripper_consistent(self, angle):\n return 0\n \n # RobotEnv methods\n # ----------------------------\n def _step_callback(self):\n self.sim.forward()\n\n def _limit_gripper(self, gripper_pos, pos_ctrl):\n return pos_ctrl\n\n\n def _set_action(self, action):\n assert action.shape == (4,)\n\n action = action.copy() # ensure that we don't change the action outside of this scope\n pos_ctrl, gripper_ctrl = action[:3], action[3]\n # print('pos', pos_ctrl)\n # print('gripper', gripper_ctrl)\n self._pos_ctrl_magnitude = np.linalg.norm(pos_ctrl)\n\n # make sure gripper does not leave workspace\n gripper_pos = self.sim.data.get_site_xpos('grasp')\n pos_ctrl = self._limit_gripper(gripper_pos, pos_ctrl)\n\n pos_ctrl *= self.action_scale # limit maximum change in position\n gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])\n\n assert gripper_ctrl.shape == (2,)\n action = np.concatenate([pos_ctrl, self.gripper_rotation, gripper_ctrl])\n\n # Apply action to simulation.\n utils.ctrl_set_action(self.sim, action)\n utils.mocap_set_action(self.sim, action)\n\n\n def end_effector_pos(self):\n raise NotImplementedError('_get_achieved_goal has not been implemented for this task!')\n\n def _get_robot_state_obs(self):\n dt = self.sim.nsubsteps * self.sim.model.opt.timestep\n eef_velp = self.sim.data.get_site_xvelp('grasp') * dt\n\n x = time.process_time()\n #print(x)\n sin = math.sin(x)\n self.sim.data.qvel[13:16] = np.array([ 0, -0.2, 0]) # -0.2, 0.14*sin\n self.sim.data.qvel[-3:] = np.random.randn(3) * 0\n # print('qvel', self.sim.data.qvel)\n # print('qvel_shape', self.sim.data.qvel.shape)\n\n eef_pos = self.sim.data.get_site_xpos('grasp')\n gripper_angle = self.sim.data.get_joint_qpos('right_outer_knuckle_joint')\n\n return np.concatenate([\n eef_pos, np.array([gripper_angle]) #eef_velp later add\n ], axis=0).astype(np.float32)\n\n def _get_image_obs(self):\n return self.render_obs(mode='rgb_array', width=self.image_size, height=self.image_size)\n\n def _get_obs(self):\n end_effector_position = self.end_effector_pos()\n obs = self._get_image_obs()\n state_ = self._get_robot_state_obs()\n\n return {\n 'observation': obs,\n 'achieved_goal': end_effector_position,\n 'desired_goal': self.goal,\n 'state': state_\n }\n\n\n def _render_callback(self):\n self.sim.forward()\n\n def _reset_sim(self):\n # Reset intial gripper position\n if not self.reset_free:\n self.sim.set_state(self.initial_state)\n self._sample_initial_pos()\n\n if self.has_object:\n self._sample_object_pos()\n\n self.sim.forward()\n return True\n\n def _sample_object_pos(self):\n raise NotImplementedError('_sample_object_pos has not been implemented for this task!')\n\n def _sample_goal(self, goal=None):\n assert goal is not None, 'must configure goal in task-specific class'\n self._pos_ctrl_magnitude = 0 # do not penalize at start of episode\n return goal\n\n def _sample_initial_pos(self, gripper_target=None):\n assert gripper_target is not None, 'must configure gripper in task-specific class'\n gripper_target[2] += 0.17 \n self.sim.data.set_mocap_pos('robot0:mocap2', gripper_target)\n self.sim.data.set_mocap_quat('robot0:mocap2', self.gripper_rotation)\n self.sim.data.set_joint_qpos('right_outer_knuckle_joint', self.closed_angle)\n self._gripper_sync()\n for _ in range(10):\n self.sim.step()\n self.initial_gripper_xpos = self.sim.data.get_site_xpos('grasp').copy()\n self.init_finger_xpos = (self.sim.data.get_body_xpos('right_hand') + self.sim.data.get_body_xpos('left_hand'))/2\n\n def _is_success(self, achieved_goal, desired_goal):\n\n object_qpos = self.sim.data.get_joint_qpos('Object:joint')\n object_lift = object_qpos[2]\n self.grasp_lift = object_lift > 0.55\n\n if self.grasp_lift:\n d = self.goal_distance(achieved_goal, desired_goal, self.use_xyz)\n return (d < self.distance_threshold).astype(np.float32)\n\n def _env_setup(self, initial_qpos):\n for name, value in initial_qpos.items():\n self.sim.data.set_joint_qpos(name, value)\n utils.reset_mocap_welds(self.sim)\n self.sim.forward()\n\n # Sample initial position of gripper\n self._sample_initial_pos()\n \n # Extract information for sampling goals\n self.table_xpos = self.sim.data.body_xpos[self.sim.model.body_name2id('table')]\n\n def step(self, action):\n self.goal = self._sample_goal().copy()\n\n return super(BaseEnv, self).step(action)\n\n def render_video(self, mode=None, width=1080, height=1080, camera_id=None):\n data = []\n img = self.sim.render(width, height, camera_name='third_person', depth=False)[::-1, :, :]\n data.append(img)\n return np.asarray(data)\n\n def render(self, mode='human', width=500, height=500, depth=False, camera_id=0):\n return super(BaseEnv, self).render(mode, width, height)\n", "repo_name": "JangSeongwon/Moving-object-grasping-with-single-active-view-camera", "sub_path": "src/env/robot/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 9530, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "gym.envs.robotics.robot_env.RobotEnv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "gym.envs.robotics.robot_env", "line_number": 23, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 50, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "gym.spaces.Box", "line_number": 73, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "numpy.around", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 133, "usage_type": "call"}, {"api_name": "env.robot.gym_utils.ctrl_set_action", "line_number": 136, "usage_type": "call"}, {"api_name": "env.robot.gym_utils", "line_number": 136, "usage_type": "name"}, {"api_name": "env.robot.gym_utils.mocap_set_action", "line_number": 137, "usage_type": "call"}, {"api_name": "env.robot.gym_utils", "line_number": 137, "usage_type": "name"}, {"api_name": "time.process_time", "line_number": 147, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 221, "usage_type": "attribute"}, {"api_name": "env.robot.gym_utils.reset_mocap_welds", "line_number": 226, "usage_type": "call"}, {"api_name": "env.robot.gym_utils", "line_number": 226, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 244, "usage_type": "call"}]} +{"seq_id": "74931896485", "text": "\"\"\"\nmatch one list with another, make current list have same structure\nwith the former.\n\"\"\"\nfrom collections import Iterable\n\ndef _reshape(shaper, recursive_types):\n fn_lst = [_reshape(e, recursive_types) if e.__class__ in recursive_types else next for e in shaper]\n ret_cls = shaper.__class__\n def apply(itor):\n return ret_cls(fn(itor) for fn in fn_lst)\n return apply\n\ndef reshape(shaper, recursive_types=(list, tuple, set)):\n _apply = _reshape(shaper, set(recursive_types))\n def apply(seq):\n return _apply(iter(seq))\n return apply\n\n\nshaper = [(1, 2, 3, 6), {2, 3, 6}, [2, [2]]]\nprint(shaper)\n# [(1, 2, 3, 6), {2, 3, 6}, [2, [2]]]\n\nflatten = lambda nested: list(filter(lambda _: _, \n (lambda _: ((yield from flatten(e)) if isinstance(e, Iterable) else (yield e) for e in _))(nested)))\nlst = flatten(shaper)\nprint(lst)\n# [1, 2, 3, 6, 2, 3, 6, 2, 2]\n\nreshaper = reshape(shaper)\n\nprint(reshaper(lst))", "repo_name": "SimonCqk/Python_Toys", "sub_path": "utility/list_match.py", "file_name": "list_match.py", "file_ext": "py", "file_size_in_byte": 969, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.Iterable", "line_number": 26, "usage_type": "argument"}]} +{"seq_id": "15845650640", "text": "from typing import Text\nfrom rpi_ws281x import *\nfrom patterns import *\nfrom d2patterns import *\nfrom activeSettings import ActiveSettings\n\nimport zmq\n\n# LED strip configuration:\nLED_COUNT = 100 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\n#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n \n# Main program logic follows:\nif __name__ == '__main__':\n\n context = zmq.Context()\n socket = context.socket(zmq.PULL)\n socket.bind(\"tcp://*:5555\")\n\n \n # Create NeoPixel object with appropriate configuration.\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\n # Intialize the library (must be called once before other functions).\n strip.begin()\n \n print ('Press Ctrl-C to quit.')\n \n settings = ActiveSettings()\n offset = 0\n patterns =[\n SetColor(),\n ColorWipe(),\n TheaterChase(),\n Rainbow(),\n RainbowCycle(),\n TheaterChaseRainbow(),\n Vline(),\n Hline(),\n ScrollText(),\n ]\n for p in patterns:\n p.setup(strip,settings)\n\n try:\n while True:\n offset = offset + 1 if offset < 100 else 0\n try:\n #print(\"getting msg\")\n message = socket.recv_string(flags=zmq.NOBLOCK)\n print(message)\n settings.parse_update(message)\n except zmq.ZMQError as e:\n #print(\"no msg\",e)\n pass\n if settings.toggleenable == 1:\n if settings.mode == -1:\n settings.mode = settings.prevmode\n else:\n settings.prevmode = settings.mode\n settings.mode = -1\n settings.toggleenable = 0\n\n if -1 <= settings.mode <= len(patterns)-2:\n patterns[settings.mode+1].step()\n \n except KeyboardInterrupt:\n patterns[0].step()\n", "repo_name": "IdrisTheDragon/ledpi", "sub_path": "newworker.py", "file_name": "newworker.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "zmq.Context", "line_number": 23, "usage_type": "call"}, {"api_name": "zmq.PULL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "activeSettings.ActiveSettings", "line_number": 35, "usage_type": "call"}, {"api_name": "zmq.NOBLOCK", "line_number": 56, "usage_type": "attribute"}, {"api_name": "zmq.ZMQError", "line_number": 59, "usage_type": "attribute"}]} +{"seq_id": "41752204048", "text": "from datetime import datetime\nfrom sources.effects.base import MatrixPixelEffect\nfrom sources.helpers.color_rgb import ColorRGB\n\n\nclass WatchEffect(MatrixPixelEffect):\n def __init__(self, width, height, frame_delay=0.3):\n MatrixPixelEffect.__init__(self, width, height, 1, frame_delay)\n self.base_color = ColorRGB.random()\n self.new_base_color = ColorRGB.random()\n self.color = self.base_color\n self.color_conversion_progress = 0\n self.last_time = ''\n self.sprites = []\n self.sprites.append([\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1]])\n self.sprites.append([\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [1, 1, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1]])\n self.sprites.append([\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [1, 0, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 0, 1]])\n self.sprites.append([\n [1, 1, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [1, 1, 1],\n [1, 0, 0],\n [1, 0, 0],\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 0],\n [1, 0, 0],\n [1, 0, 0]])\n self.sprites.append([\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [1, 1, 1],\n [1, 0, 1],\n [1, 0, 1],\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1],\n [1, 1, 1]])\n self.sprites.append([\n [0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 0]])\n\n def play_frame(self, drawer):\n t = datetime.now().strftime(\"%H:%M:%S\")\n if self.last_time == t:\n return\n\n self.last_time = t\n\n self.color_conversion_progress = self.color_conversion_progress + 0.01\n\n if self.color_conversion_progress > 1:\n self.base_color = self.new_base_color\n self.new_base_color = ColorRGB.random()\n self.color_conversion_progress = 0\n\n self.color = self.base_color.convert(self.new_base_color, self.color_conversion_progress)\n\n sec = datetime.now().second\n i = 0\n drawer.clear(False)\n print(t)\n\n for s in t:\n if s == ':':\n if sec % 2 == 0:\n self.print_symbol(drawer, 10, i)\n else:\n self.print_symbol(drawer, int(s), i)\n i = i + 1\n\n drawer.show()\n\n def print_symbol(self, drawer, symbol_index, position):\n x = 4 * position\n sprite = self.sprites[symbol_index]\n for sx in range(3):\n for sy in range(7):\n pixel = super(WatchEffect, self).translateToMatrixColumns(x + sx + 2, sy + 1)\n if sprite[sy][sx] == 1:\n drawer.set_color(pixel, self.color)\n", "repo_name": "alxcp/raspberryPi-ws28x-drawer", "sub_path": "sources/effects/watch.py", "file_name": "watch.py", "file_ext": "py", "file_size_in_byte": 3896, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sources.effects.base.MatrixPixelEffect", "line_number": 6, "usage_type": "name"}, {"api_name": "sources.effects.base.MatrixPixelEffect.__init__", "line_number": 8, "usage_type": "call"}, {"api_name": "sources.effects.base.MatrixPixelEffect", "line_number": 8, "usage_type": "name"}, {"api_name": "sources.helpers.color_rgb.ColorRGB.random", "line_number": 9, "usage_type": "call"}, {"api_name": "sources.helpers.color_rgb.ColorRGB", "line_number": 9, "usage_type": "name"}, {"api_name": "sources.helpers.color_rgb.ColorRGB.random", "line_number": 10, "usage_type": "call"}, {"api_name": "sources.helpers.color_rgb.ColorRGB", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "name"}, {"api_name": "sources.helpers.color_rgb.ColorRGB.random", "line_number": 115, "usage_type": "call"}, {"api_name": "sources.helpers.color_rgb.ColorRGB", "line_number": 115, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "74775243356", "text": "import logging\nimport math\n\nfrom random import randint\nfrom six import string_types\nfrom time import sleep\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nVERSION = '1.1.1'\n\ntlp_map = {\n 'red': 1,\n 'amber': 2,\n 'green': 3,\n 'white': 4\n}\n\n\nclass ThreatQObject(object):\n \"\"\"\n Object to encapsulate all object types in ThreatQ\n \"\"\"\n\n # Not including Files/Attachments because we can't bulk upload\n object_list = [\n 'indicators', 'adversaries', 'events', 'malware',\n 'campaign', 'course_of_action', 'exploit_target',\n 'identities', 'attack_pattern', 'exploit_target',\n 'instrusion_set', 'report', 'ttp', 'vulnerability',\n 'tool', 'stix_pattern'\n ]\n\n def __init__(self, tq, api_name):\n self.tq = tq\n self.api_name = api_name\n\n # Object metadata\n self.oid = None\n self.value = ''\n self.name = ''\n self.title = ''\n self.description = ''\n self.attributes = []\n self.published_at = None\n self.happened_at = None\n self.comments = []\n self.tlp = None\n self.status_id = None\n self.status = None\n self.type_id = None\n self.type = None\n self.score = None\n self.sources = []\n\n # Related objects\n self.relationships = {}\n self.metadata = {}\n\n def _get_base_endpoint_name(self):\n return self.api_name\n\n def _get_api_endpoint(self):\n return '/api/' + self.api_name + '/' + str(self.oid)\n\n def _id(self):\n \"\"\"\n Get the ID of this object within its namespace\n \"\"\"\n\n return self.oid\n\n def _set_id(self, value):\n \"\"\"\n Set the ID value\n \"\"\"\n\n self.oid = value\n return self\n\n def set_value(self, value):\n \"\"\"\n Set the value for the custom object\n \"\"\"\n\n # Fix any user errors ;)\n if self.api_name == 'adversaries':\n self.name = value\n elif self.api_name == 'events':\n self.title = value\n else:\n self.value = value\n\n def set_type(self, type_value):\n \"\"\"\n Set the type for the object\n \"\"\"\n\n if isinstance(type_value, int):\n self.type_id = type_value\n else:\n self.type = type_value\n\n def set_status(self, status):\n \"\"\"\n Set the status for the object\n \"\"\"\n\n if isinstance(status, int):\n self.status_id = status\n else:\n self.status = status\n\n def set_TLP(self, tlp):\n \"\"\"\n Set the tlp for the custom object\n \"\"\"\n\n self.tlp = tlp\n\n def set_name(self, name):\n \"\"\"\n Set the name for the custom object\n \"\"\"\n\n self.name = name\n\n def set_title(self, title):\n \"\"\"\n Set the title for the custom object\n \"\"\"\n\n self.title = title\n\n def add_source(self, *args, **kwargs):\n \"\"\"\n Handler for adding sources via old vs. new method\n \"\"\"\n\n if len(args) == 1 and isinstance(args[0], string_types):\n self._add_source_quick(args[0], tlp_id=kwargs.get('tlp_id'), tlp=kwargs.get('tlp'))\n elif len(args) == 1 and isinstance(args[0], ThreatQSource):\n self._add_source_object(args[0])\n elif len(args) == 1 and isinstance(args[0], list):\n for i in args[0]:\n self.add_source(i)\n\n def _add_source_quick(self, name, tlp_id=None, tlp=None):\n \"\"\"\n Add a source to the object\n \"\"\"\n\n if not name:\n return\n\n src = ThreatQSource(name, tlp=tlp_id or tlp)\n self._add_source_object(src)\n\n def _add_source_object(self, source):\n \"\"\"\n Add the source object to the list\n \"\"\"\n\n if not source or not source.name:\n return\n\n self.sources.append(source)\n\n def add_metadata(self, name, value):\n \"\"\"\n Add any extra info to the object for tracking\n \"\"\"\n\n self.metadata[name] = value\n\n def relate_object(self, *args):\n \"\"\"\n Relates an object based on the ID\n \"\"\"\n\n if len(args) == 2 and args[0] and args[1]:\n self._relate_object_deprecated(args[0], args[1])\n elif len(args) == 1 and args[0]:\n self._relate_object(args[0])\n\n def _relate_object_deprecated(self, api_name, obj):\n \"\"\"\n Relates an object to another the \"old\" way\n \"\"\"\n\n if api_name not in self.relationships:\n self.relationships[api_name] = []\n\n # Don't relate if duplicate\n if any(item.oid == obj.oid for item in self.relationships[api_name] if item.oid is not None):\n return\n\n self.relationships[api_name].append(obj)\n\n def _relate_object(self, obj):\n \"\"\"\n Relates an object to another the \"new\" way\n \"\"\"\n\n if obj.api_name not in self.relationships:\n self.relationships[obj.api_name] = []\n\n # Don't relate if duplicate\n if any(item.oid == obj.oid for item in self.relationships[obj.api_name] if item.oid is not None):\n return\n\n self.relationships[obj.api_name].append(obj)\n\n def fill_from_api_response(self, api_response, sources=[], attr_sources=[]):\n \"\"\"\n Fill ourselves in based on an API response\n \"\"\"\n\n # Load basic data\n self.api_name = api_response.get('api_name', self.api_name)\n self.oid = api_response.get('id')\n self.value = api_response.get('value', '')\n self.name = api_response.get('name', '')\n self.title = api_response.get('title', '')\n self.description = api_response.get('description', '')\n self.happened_at = api_response.get('happened_at')\n self.type_id = api_response.get('type_id')\n if 'type' in api_response:\n if isinstance(api_response['type'], dict):\n self.type = api_response.get('type', {}).get('name')\n elif isinstance(api_response['type'], string_types):\n self.type = api_response['type']\n elif isinstance(api_response['type'], int):\n self.type_id = api_response['type']\n self.status_id = api_response.get('status_id')\n if 'status' in api_response:\n if isinstance(api_response['status'], dict):\n self.status = api_response.get('status', {}).get('name')\n elif isinstance(api_response['status'], string_types):\n self.status = api_response['status']\n elif isinstance(api_response['status'], int):\n self.status_id = api_response['status']\n\n # Load score\n if self.api_name == \"indicators\" and \"score\" in api_response:\n if isinstance(api_response['score'], dict):\n self.score = api_response['score'].get('manual_score')\n if self.score is None:\n self.score = api_response['score'].get('generated_score')\n else:\n self.score = api_response['score']\n\n self.score = math.floor(float(self.score))\n\n # Load relationships\n for item in self.object_list:\n if item in api_response:\n # Make sure we have a place to store the relationship\n if item not in self.relationships:\n self.relationships[item] = []\n\n # Turn all dictionaries into Threat objects\n for rel in api_response.get(item, []):\n obj = ThreatQObject(self.tq, item)\n obj.fill_from_api_response(rel)\n self.relationships[item].append(obj)\n\n # Load soures\n for item in api_response.get('sources', []):\n self.add_source(item['name'], tlp_id=item.get('tlp_id'), tlp=item.get('tlp'))\n for item in sources:\n if isinstance(item, dict):\n self.add_source(item['name'], tlp_id=item.get('tlp_id'), tlp=item.get('tlp'))\n else:\n self.add_source(item)\n\n # Load attributes\n for item in api_response.get('attributes', []):\n if not item['name'] or not item['value']: # You wouldn't think this would get hit, but it can\n continue\n\n attr_src = item.get('sources', [])\n\n # Append custom attribute sources\n if attr_sources:\n for src in attr_sources:\n if isinstance(item, dict):\n attr_src.append(src)\n else:\n attr_src.append({'name': src})\n\n self.add_attribute(item['name'], item['value'], sources=attr_sources, tlp=item.get('tlp'))\n\n # Load comments\n self.comments = api_response.get('comments', [])\n\n return self\n\n @staticmethod\n def bulk_upload(tq, objects, show_debug=True, ignored_fields=[]):\n \"\"\"\n Bulk upload a list of ThreatObjects\n \"\"\"\n\n if not objects:\n return []\n\n # Load the obejcts\n data = [obj._to_dict(ignore=ignored_fields) for obj in objects if obj]\n output = []\n i = 0\n batch = 500\n\n # Create batches to upload\n while i < len(data):\n delay = randint(1, 3)\n if show_debug:\n logger.debug('Bulk uploading [{}] entries {} - {}'.format(objects[0].api_name, i, i + batch))\n\n try:\n # Upload the objects\n res = tq.post('/api/{}/consume'.format(objects[0].api_name), data=data[i:i + batch])\n res = [] if not res else res.get('data', [])\n output.extend(res)\n\n # Load in the ID from the upload\n for item in res:\n for obj in objects:\n if (\n obj.name and obj.name == item.get('name') or\n obj.value and obj.value == item.get('value') or\n obj.title and obj.title == item.get('title')\n ):\n obj._set_id(item.get('id'))\n break\n except Exception:\n logger.error('Failed to upload entries {} - {}. Continuing...'.format(i, i + batch))\n\n sleep(delay)\n i += batch\n\n return output\n\n def _to_dict(self, ignore=[], for_api=True):\n \"\"\"\n Serialize this object to a representation suitable for upload to ThreatQ\n \"\"\"\n\n output = {}\n\n if not self.value and not self.name and not self.title:\n raise ValueError('Threat Object has no value or name!')\n\n # The default fields\n if not for_api:\n output['api_name'] = self.api_name\n if self.value:\n output['value'] = self.value\n if self.name:\n output['name'] = self.name\n if self.title:\n output['title'] = self.title\n if self.description and 'description' not in ignore:\n # Need this case because of the techdebt in the API\n if self.api_name == 'adversaries':\n output['description'] = [{'value': self.description[:65500]}]\n else:\n output['description'] = self.description[:65500] # Max MariaDB TEXT length\n\n if self.oid and 'id' not in ignore:\n output['id'] = self.oid\n if self.comments and 'comments' not in ignore:\n output['comments'] = self.comments\n if self.attributes and 'attributes' not in ignore:\n self.attributes = ThreatQAttribute.merge_attributes(self.attributes)\n output['attributes'] = [\n attr.to_dict() for attr in self.attributes if attr and isinstance(attr, ThreatQAttribute)]\n if self.tlp and self.tlp in tlp_map and 'tlp' not in ignore:\n output['tlp_id'] = tlp_map.get(self.tlp)\n if self.status and 'status' not in ignore:\n output['status'] = {'name': self.status}\n if self.status_id and 'status' not in ignore and 'status_id' not in ignore:\n output['status_id'] = self.status_id\n if self.type and 'type' not in ignore:\n output['type'] = {'name': self.type}\n if self.type_id and 'type' not in ignore and 'type_id' not in ignore:\n output['type_id'] = self.type_id\n if self.sources and 'sources' not in ignore:\n self.sources = ThreatQSource.merge_sources(self.sources) # Merge the sources by hierarchy\n output['sources'] = [\n src.to_dict() for src in self.sources if src and isinstance(src, ThreatQSource)]\n if self.happened_at and 'happened_at' not in ignore:\n output['happened_at'] = self.happened_at\n if self.api_name == \"indicators\" and self.score is not None and 'score' not in ignore and not for_api:\n output['score'] = self.score\n\n # Add relationships\n if 'relationships' not in ignore:\n for k, v in self.relationships.items():\n output[k] = []\n for item in v:\n # Only add if an ID is available\n if isinstance(item, dict) and 'id' in item:\n output[k].append({'id': item['id']})\n elif isinstance(item, ThreatQObject) and item.oid:\n output[k].append({'id': item.oid} if for_api else item._to_dict())\n\n if self.published_at and 'published_at' not in ignore:\n output['published_at'] = self.published_at\n\n return output\n\n def add_comment(self, value):\n \"\"\"\n Add a comment to a custom object\n \"\"\"\n\n if not value:\n raise Exception('Cannot add a comment to a Threat Object without a value!')\n\n self.comments.append({'value': value})\n\n def get_comments(self):\n \"\"\"\n Gets comments for a custom object\n \"\"\"\n\n if not self.oid:\n raise Exception('Cannot get comments for a Threat Object without an ID!')\n\n p = {'with': 'sources'}\n res = self.tq.get(self._get_api_endpoint() + '/comments', params=p)\n self.comments = res.get('data')\n\n return self.comments\n\n def add_attribute(self, *args, **kwargs):\n if len(args) == 1:\n self._add_attribute_object(args[0])\n elif len(args) == 2:\n self._add_attribute_quick(*args, **kwargs)\n\n return self\n\n def _add_attribute_quick(self, key, value, sources=None, tlp=None):\n \"\"\"\n Add an attribute to the Threat Object\n \"\"\"\n\n if not key or not value:\n return\n\n if isinstance(value, bool):\n value = 'Yes' if value else 'No'\n\n attr = ThreatQAttribute(key, value, sources=sources, tlp=tlp)\n self._add_attribute_object(attr)\n\n def _add_attribute_object(self, attribute):\n \"\"\"\n Adds a ThreatQ Attribute object as an attribute\n \"\"\"\n\n if isinstance(attribute, list):\n for i in attribute:\n if isinstance(i, ThreatQAttribute):\n self.add_attribute(i)\n elif isinstance(attribute, ThreatQAttribute) and attribute.name and attribute.value:\n self.attributes.append(attribute)\n\n def get_attributes(self):\n \"\"\"\n Get attributes associated with this object\n\n :raises: :py:class:`~threatqsdk.exceptions.NotCreatedError` if\n the object has yet to be created\n \"\"\"\n\n if not self.oid:\n raise Exception('Cannot get attributes of a Threat Object without an ID!')\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return []\n\n self.attributes = results['data']\n return self.attributes\n\n def _get_api_suffix(self, obj_type):\n return obj_type._get_base_endpoint_name()\n\n def get_related_objects(self, obj_type):\n \"\"\"\n Get related objects\n \"\"\"\n\n if not self.oid:\n raise Exception('Cannot get related objects of the Threat Object without an ID!')\n\n suffix = self._get_api_suffix(obj_type)\n if obj_type == self.__class__ and suffix == 'adversaries':\n return []\n endpoint = self._get_api_endpoint() + '/' + suffix\n results = self.tq.get(endpoint)\n if 'data' not in results:\n return []\n\n tr = []\n for obj in results['data']:\n inst = obj_type(self.tq)\n inst.fill_from_api_response(obj)\n tr.append(inst)\n\n return tr\n\n def _object_url(self, oid):\n \"\"\"\n Get a link to the identity suitable for presentation to an\n end user\n\n :param int iid: Identity ID\n \"\"\"\n\n if not self.oid:\n raise Exception('Cannot get Threat Object URL without an ID!')\n\n base = self.tq.threatq_host + '/{}/'.format(self.add_attribute)\n return base + str(oid) + '/details'\n\n def upload(self):\n \"\"\"\n Upload object to ThreatQ\n \"\"\"\n\n ThreatQObject.bulk_upload(self.tq, [self])\n\n def find(self, withp=''):\n \"\"\"\n Finds an object by its value\n \"\"\"\n\n params = {}\n if self.value:\n params['value'] = self.value\n elif self.name:\n params['name'] = self.name\n elif self.title:\n params['title'] = self.title\n if ',' in params['title']:\n params['title'] = params['title'].split(',')[0] + '%'\n if withp:\n params['with'] = withp\n\n res = self.tq.get('/api/{}'.format(self.api_name), params=params)\n if res and res.get('data') and res['data']:\n self.fill_from_api_response(res['data'][0])\n\n return self\n\n def add_tag(self, tag_name):\n self.add_tags([tag_name])\n\n def upload_tags(self, tags):\n\n if not self.oid:\n raise Exception('Cannot add tag to a Threat Object without an ID!')\n\n data = []\n if isinstance(tags, list):\n for tag in tags:\n if isinstance(tag, string_types):\n data.append({'name': tag})\n elif isinstance(tag, dict) and 'name' in tag:\n data.append(tag)\n\n if data:\n self.tq.post('/api/{}/{}/tags'.format(self.api_name, self.oid), data=data)\n\n @staticmethod\n def parse_tlp(tlp):\n \"\"\"\n Parse a generic TLP string/int into a valid ThreatQ one\n \"\"\"\n\n if tlp and isinstance(tlp, string_types) and tlp in tlp_map.keys():\n return tlp_map[tlp]\n elif tlp and isinstance(tlp, int) and tlp in tlp_map.values():\n return tlp\n\n\nclass ThreatQSource(object):\n\n def __init__(self, name, tlp=None):\n \"\"\"\n An encapsulation of a ThreatQ source\n \"\"\"\n\n self.name = name\n self.tlp = ThreatQObject.parse_tlp(tlp)\n\n @staticmethod\n def make_source_list(sources):\n \"\"\"\n Parses sources from an \"any\" variable\n \"\"\"\n\n new_sources = []\n if isinstance(sources, string_types):\n for src in sources.split(','): # Support comma separated sources\n new_sources.append(ThreatQSource(src))\n elif isinstance(sources, list):\n for src in sources:\n new_sources.extend(ThreatQSource.make_source_list(src))\n elif isinstance(sources, dict) and 'name' in sources:\n tlp = sources.get('tlp', sources.get('tlp_id'))\n new_sources.append(ThreatQSource(sources['name'], tlp=tlp))\n elif isinstance(sources, ThreatQSource) and sources.name:\n new_sources.append(sources)\n\n return new_sources\n\n @staticmethod\n def merge_sources(source_list):\n \"\"\"\n Merge sources with the same name together.\n This is so we can apply TLPs by hierarchy.\n \"\"\"\n\n new_sources = []\n for i in source_list:\n\n # Find a match\n found = None\n for j in new_sources:\n if i.name == j.name:\n found = j\n break\n\n # If there is no match, add the source\n if not found:\n new_sources.append(i)\n continue\n\n # If there is a match, compare based on TLP hierarchy\n # If the TLP is more \"secret\", remove old source and apply new one\n if (i.tlp and not j.tlp) or (i.tlp and j.tlp and i.tlp < j.tlp):\n new_sources.remove(j)\n new_sources.append(i)\n\n # Set the source list to the merged source list\n return new_sources\n\n def to_dict(self):\n output = {'name': self.name}\n if self.tlp:\n output['tlp_id'] = self.tlp\n\n return output\n\n\nclass ThreatQAttribute(object):\n\n def __init__(self, name, value, sources=None, tlp=None):\n \"\"\"\n An encapsulation of a ThreatQ attribute\n\n Parameters:\n - name (str): The name of the attribute\n - value (str): The value of the attribute\n - sources (any): Sources for the attribute\n - tlp (str,int): Default TLP for the attribute\n \"\"\"\n\n if sources is None:\n sources = []\n self.name = name\n self.value = value\n self.sources = ThreatQSource.make_source_list(sources)\n self.tlp = ThreatQObject.parse_tlp(tlp)\n\n @staticmethod\n def merge_attributes(attribute_list):\n \"\"\"\n Merge sources with the same name together.\n This is so we can apply TLPs by hierarchy.\n \"\"\"\n\n new_attrs = []\n for i in attribute_list:\n # Find a match\n found = None\n for j in range(len(new_attrs)):\n if i.name == new_attrs[j].name and i.value == new_attrs[j].value:\n found = j\n break\n\n # If there is no match, add the attribute\n if not found:\n new_attrs.append(i)\n continue\n\n # If there is a match, merge the sources\n new_attrs[j].sources.extend(i.sources)\n\n # Set the source list to the merged source list\n return new_attrs\n\n def add_source(self, source, tlp=None):\n \"\"\"\n Adds a source to the list\n \"\"\"\n\n if not source:\n return\n\n if isinstance(source, ThreatQSource):\n self.sources.append(source)\n elif isinstance(source, dict) and 'name' in source:\n if tlp:\n source['tlp'] = tlp\n self.sources.extend(ThreatQSource.make_source_list(source))\n elif isinstance(source, string_types):\n for i in source.split(','):\n self.sources.append(ThreatQSource(i, tlp=tlp))\n elif isinstance(source, list):\n for i in source:\n self.add_source(i, tlp=tlp)\n\n def to_dict(self):\n output = {'name': self.name, 'value': self.value}\n if isinstance(output['value'], bool):\n output['value'] = 'Yes' if output['value'] else 'No'\n\n if self.sources:\n self.sources = ThreatQSource.merge_sources(self.sources) # Merge the sources by hierarchy\n output['sources'] = [src.to_dict() for src in self.sources if src]\n if self.tlp:\n output['tlp_id'] = self.tlp\n\n return output\n", "repo_name": "phantomcyber/phantom-apps", "sub_path": "Apps/phthreatquotient/threatqsdk/bulk_object.py", "file_name": "bulk_object.py", "file_ext": "py", "file_size_in_byte": 23514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 81, "dataset": "github-code", "pt": "50", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 9, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 141, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 232, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 240, "usage_type": "argument"}, {"api_name": "math.floor", "line_number": 254, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 317, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 340, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 573, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 587, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 610, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 724, "usage_type": "argument"}]} +{"seq_id": "15614996770", "text": "\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport requests\n\ndef getnamepage(name,b):\n b.get('http://image.baidu.com/')\n search_box=b.find_element_by_id('kw')\n search_box.send_keys(name)\n search_box.send_keys(Keys.ENTER)\n time.sleep(3)\n\ndef download(num,b):\n #选取大尺寸\n ele=b.find_element_by_id('sizeFilter')\n ActionChains(b).move_to_element(ele).perform()\n time.sleep(0.2)\n ele4=b.find_element_by_xpath('//*[@id=\"imgid\"]/div[1]/ul/li[5]')\n ActionChains(b).move_to_element(ele4).perform()\n time.sleep(0.2)\n ele4.click()\n time.sleep(0.2)\n\n\n #打开第5张图片,在此界面中点击左右切换图片\n ele1=b.find_element_by_xpath('//*[@id=\"imgid\"]/div/ul/li[5]')\n ele1.click()\n b.switch_to.window(b.window_handles[1])#很重要的一步,切换窗口,否则页面找不到元素,python shell里面是b.switch_to_window\n x=1\n for i in range(1,num+1):\n #ele3=b.find_element_by_xpath('/html/body/div[1]/div[2]/div/span[2]/span')\n #ele3.click()\n #time.sleep(3)#为保险起见,设置一个睡眠和爬取的时间差\n ele2=b.find_element_by_xpath('//*[@id=\"currentImg\"]')\n img=ele2.get_attribute('src')#获取当前图片的url链接\n #下载img\n r=requests.get(img)\n if r.status_code==200:\n path='./img/%d.jpg'%x\n print('正在爬取 '+img)\n with open(path,'wb') as f:\n f.write(r.content)\n time.sleep(0.8)\n f.close()\n print('爬取成功')\n x+=1\n ele3=b.find_element_by_xpath('/html/body/div[1]/div[2]/div/span[2]')\n ele3.click()\n #time.sleep(3)\n #跳到下一张\n else:\n ele3=b.find_element_by_xpath('/html/body/div[1]/div[2]/div/span[2]/span')\n ele3.click()\n time.sleep(1)\n continue\n\n\n\n\ndef craw_baidupic(keyword,num=5):\n b = webdriver.Chrome()\n name = keyword#定义要搜索的内容\n getnamepage(name,b)\n download(num,b)\n b.close()\n # 关闭之前的页面\n pages = b.window_handles\n for handle in pages:\n b.switch_to.window(handle)\n b.close()\n", "repo_name": "OmegaDING/AutoGenPopularVideo", "sub_path": "crawl_baidupic.py", "file_name": "crawl_baidupic.py", "file_ext": "py", "file_size_in_byte": 2328, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 37, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.common.keys.Keys.ENTER", "line_number": 12, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 12, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.action_chains.ActionChains", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 45, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 63, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "42425836719", "text": "#!/usr/bin/python3\n\nADDRESS = \"\";\nPORT = 1337;\n\nimport http.server as hs;\nfrom modules.Tunnel import tunnel;\n#from modules.Keylogger import logger\nimport json;\n\nclass CustomRequestHandler(hs.BaseHTTPRequestHandler):\n\t\n\tdata={\"user\":\"\"};\n\t\n\tdef do_GET(self):\n\t\tprint(\"\\n\\n\");\n\t\tself.send_response(200);\n\t\tself.send_header(\"Content-Type\",\"text/JSON\");\n\t\tself.send_header(\"Content-Encoding\",\"ASCII\");\n\t\tself.end_headers();\n\n\t\t\n\t\t\n\t\tif self.data[\"user\"] != self.client_address:\n\t\t\tself.data[\"user\"] = self.client_address;\n\t\t\n\t\tdata = self.rfile.read(int(self.headers[\"content-length\"]));\n\t\tdata = str(data).strip(\"b'\").strip(\"'\"); #Ask Jacques why we strip\n\t\tdata = json.loads(data);\n\t\t\n\t\tres = self.execute(data);\n\t\t\n\t\t#self.wfile.write(b\"GET-ed Successfully!\");\n\t\tself.wfile.write(res);\n\t\treturn;\n\n\tdef execute(self,params):\n\t\tprint(\"GOT \",params);\n\t\t\n\t\t#default values\n\t\taction=None;\n\t\tvalue=None;\n\t\textras=None;\n\t\t\n\t\ttry:\n\t\t\taction = params[\"action\"];\n\t\t\tvalue = params[\"value\"];\n\t\t\textras = params[\"extras\"]\n\t\texcept KeyError:\n\t\t\tpass;\n\t\t\n\t\tret = None; #To be returned at end\n\t\t\n\t\tif action == \"tunnel\":\n\t\t\tif \"tunnel_obj\" not in self.data.keys():\n\t\t\t\tself.data[\"tunnel_obj\"] = tunnel.Tunnel();\n\t\t\ttunnel_obj = self.data[\"tunnel_obj\"]; #retrieve tunnel object\n\t\t\turl = value;\n\t\t\tif params[\"sub_action\"] == \"get\":\n\t\t\t\tret = tunnel_obj.get(url);\n\t\t\telif params[\"sub_action\"] == \"post\":\n\t\t\t\tret = tunnel_obj.post(url);\n\t\t\telif params[\"sub_action\"] == \"session\":\n\t\t\t\tret = tunnel_obj.session(url);\n\t\t\telif params[\"sub_action\"] == \"login session\":\n\t\t\t\tret = ret = tunnel_obj.login_session(url,extras);\n\t\t\tself.data[\"tunnel_obj\"] = tunnel_obj; #store tunnel object\n\t\telif action == \"keylogger\":\n\t\t\tif \"logger_obj\" not in self.data.keys():\n\t\t\t\tself.data[\"logger_obj\"] = logger.LoggerThread();\n\t\t\tlogger_obj = self.data[\"logger_obj\"]; #retrieve tunnel object\n\t\t\tsub_act = params[\"sub_action\"];\n\t\t\tif sub_act == \"start\":\n\t\t\t\tret = logger_obj.start();\n\t\t\telif sub_act == \"stop\":\n\t\t\t\tret = logger_obj.stop();\n\t\t\telif sub_act == \"get\":\n\t\t\t\tret = logger_obj.get_file();\n\t\t\tself.data[\"logger_obj\"] = logger_obj;\n\t\treturn(ret);\n\nif __name__ == \"__main__\":\n\tserver_addr = (ADDRESS,PORT);\n\trequest_handler = CustomRequestHandler;\n\thttp_daemon = hs.HTTPServer(server_addr,request_handler);\n\n\ttry:\n\t\tprint(\"Starting server on port \"+str(PORT));\n\t\thttp_daemon.serve_forever();\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\n\\nKilling Server...\");\n\t\texit();\n", "repo_name": "ashu20071/backdoor-shell", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2429, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "http.server.BaseHTTPRequestHandler", "line_number": 11, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 11, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 29, "usage_type": "call"}, {"api_name": "modules.Tunnel.tunnel.Tunnel", "line_number": 56, "usage_type": "call"}, {"api_name": "modules.Tunnel.tunnel", "line_number": 56, "usage_type": "name"}, {"api_name": "http.server.HTTPServer", "line_number": 85, "usage_type": "call"}, {"api_name": "http.server", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "17262336535", "text": "import json\r\nimport os\r\nfrom zeep.exceptions import Fault\r\nimport traceback\r\nimport sys\r\nsys.path.append(\"../\")\r\nfrom ciscoaxl import axl\r\nfrom common.baseFunctions import *\r\nfrom common.importLogic import updateConfigs\r\n\r\n\r\n## Reading import Logic Sheet\r\nimportLogicFile = f\"../common/importLogic.json\"\r\ndynamicLogicJson = json.load(open(importLogicFile))\r\n\r\n\r\n### Read Source and Destination Input JSON's\r\nsourceJsonFile=f\"../inputs/sourceCluster.json\"\r\ndestinationJsonFile=f\"../inputs/destinationCluster.json\"\r\n#sourceClusterInputJson = \r\n#destinationClusterInputJson = \r\n\r\n## Create Source AXL Object\r\n# ucm_axl_object_source = axl(\r\n# username=,\r\n# password=,\r\n# cucm=,\r\n# cucm_version=,\r\n# )\r\n\r\n## Create Destination AXL Object\r\n# ucm_axl_object_dest = axl(\r\n# username=,\r\n# password=,\r\n# cucm=,\r\n# cucm_version=,\r\n# )\r\n\r\ndataFilterFile=f\"getDataFilter.json\" ## output of generate config patterns\r\nconfigExportPath=f\"./ConfigExports\"\r\n\r\n# ## Sites to import from config Folder\r\n# Sites = destinationClusterInputJson[\"configsFolders\"]\r\n\r\n\r\ndef generate_config_patterns():\r\n\r\n # ## Define an empty dictionary object to store all sites data\r\n # allSitesData = {}\r\n # ## Generate the datafilter JSON Content\r\n # for site in sourceClusterInputJson['siteCode']:\r\n # siteSpecificdataFilterDict = {\r\n # \"CSSList\": [\r\n # f\"{site}_CSS\"\r\n # ]\r\n # }\r\n\r\n # allSitesData[site] = siteSpecificdataFilterDict\r\n\r\n # # Serializing json\r\n # dataFilterDict_JSONobject = json.dumps(allSitesData, indent=4)\r\n # jsonFile = open(dataFilterFile, \"w\")\r\n # jsonFile.write(dataFilterDict_JSONobject)\r\n\r\n # ## Close JSON File\r\n # jsonFile.close()\r\n\r\n return\r\n\r\n\r\ndef SiteDataExport(directory, siteDataFilterContent):\r\n\r\n CSSList = (\r\n siteDataFilterContent[\"CSSList\"]\r\n if \"CSSList\" in siteDataFilterContent.keys()\r\n else []\r\n )\r\n\r\n #Partition List\r\n PartitionList = []\r\n\r\n\r\n # CSS\r\n allCSS = []\r\n\r\n for css in CSSList:\r\n cssFound = ucm_source.get_calling_search_space(name=css)\r\n ## Extract Parition from CSS\r\n\r\n # Write Results\r\n write_results(directory, allCSS, \"css\")\r\n\r\n # Partitions\r\n allPartitions = []\r\n\r\n for partition in set(PartitionList):\r\n partitionFound = ucm_source.get_partition(name=partition)\r\n \r\n\r\n # Write Results\r\n write_results(directory, allPartitions, \"partition\")\r\n\r\n return True\r\n\r\n\r\ndef export_CSS_Partition():\r\n\r\n # Read the dataFilter JSON: This JSON is created in step 1\r\n dataFilterContent = json.load(open(dataFilterFile))\r\n for siteCode, siteData in dataFilterContent.items():\r\n configDirectory = f\"{configExportPath}/{siteCode}\"\r\n if not os.path.exists(configDirectory):\r\n os.makedirs(configDirectory)\r\n logPrint(f\"Files will be saved in '{configDirectory}' directory\")\r\n logPrint(f\"Fetching data for Site: {siteCode}\")\r\n\r\n try:\r\n SiteDataExport(configDirectory, siteData)\r\n except Exception as siteExe:\r\n logPrint(f\"Error Occured while exporting configs: {siteExe}\")\r\n traceback.print_exc()\r\n exit()\r\n\r\n else:\r\n logPrint(f\"Export Completed for Site: {siteCode}. Proceeding..\")\r\n\r\n\r\n\r\ndef import_CSS_Partition():\r\n ## iterate over each site folder and push the configurations\r\n for site in sourceClusterInputJson['configsFolders']:\r\n configDirectory = f\"{configExportPath}/{site}\"\r\n logPrint(f\"Reading Configs from Directory: {configDirectory}. Proceeding...\")\r\n if os.path.exists(configDirectory):\r\n logPrint(\r\n f\"Starting Import for Site: {site} on CUCM: {destinationClusterInputJson['cucm']}\"\r\n )\r\n try:\r\n userAccept = input(\"Do you want to proceed (Y/n)?\")\r\n except KeyError:\r\n print(\"Invalid input. Existing..\")\r\n exit()\r\n else:\r\n if userAccept == \"Y\":\r\n try:\r\n pass\r\n #updateConfigs(configDirectory, ucm_destination)\r\n except Exception as importExe:\r\n logPrint(f\"Error Occured while Importing: {importExe}\")\r\n traceback.print_exc()\r\n exit()\r\n else:\r\n logPrint(f\"Data Import completed for site: {site}. Proceeding...\")\r\n else:\r\n logPrint(f\"invalid response '{userAccept}' received. Exiting...\")\r\n exit()\r\n\r\n\r\n# Step 1\r\ngenerate_config_patterns()\r\n\r\n# Step 2\r\nexport_CSS_Partition()\r\n\r\n# Step 3\r\nimport_CSS_Partition()", "repo_name": "CXC-PS-Collab/UCM-Data-Migration", "sub_path": "devNet-Workbooks/lab1/lab1-handOn.py", "file_name": "lab1-handOn.py", "file_ext": "py", "file_size_in_byte": 4786, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 14, "usage_type": "call"}, {"api_name": "json.load", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path", "line_number": 112, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 113, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "traceback.print_exc", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "34469182478", "text": "import socket\nimport subprocess\nimport platform\nimport time\nimport sys\n\nimport google.protobuf\n\nfrom . import messages_pb2\n\nclass Connection():\n \"\"\"Automatically starts the binary and creates a socket connection to it.\n\n When started with the default arguments, will start the binary on an open\n port and connect to it.\n\n If start_binary is set to False, the binary will\n not be automatically started, and connection will instead be made to the\n given address and port.\n\n Once connection has been made, the req member will be a protobuf Request\n class as defined in messages.proto. This member can be edited to set the\n message fields for the next request.\n\n The send_request() method will send the current request to the binary.\n \"\"\"\n\n def __init__(self, address=\"localhost\", port=None, start_binary=True, binary_path=\"main\"):\n self.req = messages_pb2.Request()\n\n # Get a free port number\n if port is None:\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind((\"\", 0))\n _, port = tcp.getsockname()\n tcp.close()\n\n # Start the binary\n if start_binary:\n try:\n if platform.system() == \"Windows\":\n subprocess.Popen([binary_path, \"-p\", str(port)],\n stdout=subprocess.DEVNULL)\n else:\n subprocess.Popen([binary_path, \"-p\", str(port)],\n stdout=subprocess.DEVNULL)\n except OSError:\n print(\"Starting the binary failed\")\n sys.exit()\n\n # Attempt connecting until it succeeds\n for _ in range(10):\n try:\n self.s = socket.create_connection((address, port))\n break\n except ConnectionRefusedError:\n time.sleep(0.1)\n continue\n\n # Set TCP_NODELAY to prevent delays when sending short messages\n self.s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)\n\n def send_request(self):\n \"\"\"Send the Request message stored in this.req and resets\n it to default values.\n Returns the Response object received from the binary,\n or False if decoding the incoming message failed.\n \"\"\"\n # Serialize message\n serialized = self.req.SerializeToString()\n\n # Reset the message to default values\n self.req = messages_pb2.Request()\n\n # Send message length\n msg_len = len(serialized)\n sent = self.s.send(msg_len.to_bytes(4, \"big\"))\n\n # Send message content\n total_sent = 0\n while total_sent < msg_len:\n sent = self.s.send(serialized[total_sent:])\n total_sent += sent\n\n # Receive message length\n data = b\"\"\n while len(data) < 4:\n received = self.s.recv(4 - len(data))\n if len(received) == 0:\n raise ConnectionResetError(\"Connection was closed\")\n data += received\n msg_len = int.from_bytes(data, \"big\")\n\n # Receive a Response message\n data = b\"\"\n while len(data) < msg_len:\n received = self.s.recv(msg_len - len(data))\n if len(received) == 0:\n raise ConnectionResetError(\"Connection was closed\")\n data += received\n\n # Try to parse the response and return it\n try:\n resp_msg = messages_pb2.Response()\n resp_msg.ParseFromString(data)\n return resp_msg\n\n # Return False if decoding fails\n except google.protobuf.message.DecodeError as e:\n print(\"DecodeError in reponse: {}\".format(e))\n return False\n", "repo_name": "joonaspu/video-game-behavioural-cloning", "sub_path": "video_game_env/connection.py", "file_name": "connection.py", "file_ext": "py", "file_size_in_byte": 3739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "50", "api": [{"api_name": "socket.socket", "line_number": 33, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 33, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 33, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 41, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 42, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 43, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 45, "usage_type": "call"}, {"api_name": "subprocess.DEVNULL", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 49, "usage_type": "call"}, {"api_name": "socket.create_connection", "line_number": 54, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 57, "usage_type": "call"}, {"api_name": "socket.SOL_TCP", "line_number": 61, "usage_type": "attribute"}, {"api_name": "socket.TCP_NODELAY", "line_number": 61, "usage_type": "attribute"}, {"api_name": "google.protobuf.protobuf", "line_number": 109, "usage_type": "attribute"}, {"api_name": "google.protobuf", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "13134332388", "text": "import os\nfrom xml.dom.minidom import parse as xml_parser\n\nimport bs4\n\nfrom common.config import config\nfrom common.misc import fields as f\n\n\ndef return_xml_new_lines(sentence):\n cleaned = ''\n\n for word in sentence.split(\" \"):\n if word != '':\n cleaned += word.strip() + ' '\n\n return cleaned\n\n\ndef recursive_node_value(root):\n if len(root.childNodes) == 0:\n if root.nodeValue and root.nodeType == root.TEXT_NODE and root.nodeValue.strip() != '':\n return return_xml_new_lines(root.nodeValue)\n else:\n # ignore empty non text nodes\n return \"\"\n\n contents = ''\n\n for child in root.childNodes:\n if child.nodeType == child.ELEMENT_NODE and child.tagName == config.TEI_ITALICS_NODE_TAG:\n contents += '{value}'.format(value=recursive_node_value(child))\n else:\n contents += recursive_node_value(child)\n\n return contents\n\n\ndef parse(filename):\n \"\"\"\n\n \"\"\"\n tei_xml = xml_parser(filename)\n\n citations = list()\n\n for i, elem in enumerate(tei_xml.getElementsByTagName(config.TEI_FOOTNOTE_TAG)):\n contents = ''\n for child_node in elem.childNodes:\n # ignore empty nodes\n if child_node.nodeType == child_node.TEXT_NODE and child_node.nodeValue.strip() == '':\n continue\n\n assert child_node.nodeType == child_node.TEXT_NODE or child_node.nodeType == child_node.ELEMENT_NODE\n contents += recursive_node_value(child_node)\n\n citations.append(contents)\n\n return citations\n\n\ndef update(file, references):\n \"\"\"\n Create a new updated xml, containing information for all the bibliographic references.\n As per TEI standards, all bibliographic references are added under `teiHeader`\n\n https://www.tei-c.org/release/doc/tei-p5-doc/en/html/CO.html#COBICOL\n \"\"\"\n with open(file, \"r\") as fd:\n soup = bs4.BeautifulSoup(fd, \"xml\")\n # x3ml will fail if the following is present\n soup.find('TEI').attrs.clear()\n tei_header = soup.find(\"teiHeader\")\n\n listBibl = tei_header.find(\"listBibl\")\n if not listBibl:\n listBibl = soup.new_tag(\"listBibl\")\n\n for ref in references:\n\n biblStruct = soup.new_tag(\"biblStruct\")\n if ref[f.URI]:\n biblStruct['uri'] = ref[f.URI]\n \n analytic = soup.new_tag(\"analytic\")\n\n for author in ref[f.AUTHORS]:\n author_tag = soup.new_tag(\"author\")\n author_tag['level'] = 'a' # `a` for analytic\n author_tag.string = author\n analytic.append(author_tag)\n\n title_tag = soup.new_tag(\"title\")\n title_tag['level'] = 'a'\n title_tag.string = ref[f.TITLE]\n analytic.append(title_tag)\n\n if ref[f.LANG]:\n lang_tag = soup.new_tag('textLang')\n lang_tag.string = ref[f.LANG]\n analytic.append(lang_tag)\n\n biblStruct.append(analytic)\n\n monogr = soup.new_tag('monogr')\n\n if ref[f.PUBLISHER]:\n mono_title_tag = soup.new_tag(\"title\")\n mono_title_tag['level'] = 'm'\n mono_title_tag.string = ref[f.PUBLISHER]\n monogr.append(mono_title_tag)\n\n imprint_tag = soup.new_tag(\"imprint\")\n\n if ref[f.YEAR]:\n date_tag = soup.new_tag(\"date\")\n date_tag.string = ref[f.YEAR]\n imprint_tag.append(date_tag)\n\n monogr.append(imprint_tag)\n\n if ref[f.PAGES]:\n biblScope_tag = soup.new_tag(\"biblScope\")\n biblScope_tag.string = ref[f.PAGES]\n monogr.append(biblScope_tag)\n\n for editor in (ref[f.RESPONSIBILITY] or []):\n editor_tag = soup.new_tag(\"editor\")\n editor_tag.string = editor\n monogr.append(editor_tag)\n\n if ref[f.ISBN]:\n isbn_tag = soup.new_tag('idno')\n isbn_tag['type'] = 'ISBN'\n isbn_tag.string = ref[f.ISBN]\n monogr.append(isbn_tag)\n\n if ref[f.OCLC]:\n oclc_tag = soup.new_tag('idno')\n oclc_tag['type'] = 'OCLC'\n oclc_tag.string = ref[f.OCLC]\n monogr.append(oclc_tag)\n\n biblStruct.append(monogr)\n listBibl.append(biblStruct)\n tei_header.append(listBibl)\n\n filename = os.path.basename(file)\n inter_dirs = file.split(os.sep)\n\n outpath = os.path.join(config.WORKSPACE, config.OUTPUT_FOLDER, inter_dirs[-3], inter_dirs[-2])\n os.makedirs(outpath, exist_ok=True)\n\n outfile = os.path.join(outpath, filename)\n with open(file=outfile, mode=\"w\", encoding=\"utf-8\") as fd:\n fd.write(soup.prettify())\n\n return outfile\n", "repo_name": "biblhertz/referency", "sub_path": "common/tei/tei.py", "file_name": "tei.py", "file_ext": "py", "file_size_in_byte": 4878, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "common.config.config.TEI_ITALICS_NODE_TAG", "line_number": 31, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 31, "usage_type": "name"}, {"api_name": "xml.dom.minidom.parse", "line_number": 43, "usage_type": "call"}, {"api_name": "common.config.config.TEI_FOOTNOTE_TAG", "line_number": 47, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 47, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 70, "usage_type": "call"}, {"api_name": "common.misc.fields.URI", "line_number": 82, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 82, "usage_type": "name"}, {"api_name": "common.misc.fields.URI", "line_number": 83, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 83, "usage_type": "name"}, {"api_name": "common.misc.fields.AUTHORS", "line_number": 87, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 87, "usage_type": "name"}, {"api_name": "common.misc.fields.TITLE", "line_number": 95, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 95, "usage_type": "name"}, {"api_name": "common.misc.fields.LANG", "line_number": 98, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 98, "usage_type": "name"}, {"api_name": "common.misc.fields.LANG", "line_number": 100, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 100, "usage_type": "name"}, {"api_name": "common.misc.fields.PUBLISHER", "line_number": 107, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 107, "usage_type": "name"}, {"api_name": "common.misc.fields.PUBLISHER", "line_number": 110, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 110, "usage_type": "name"}, {"api_name": "common.misc.fields.YEAR", "line_number": 115, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 115, "usage_type": "name"}, {"api_name": "common.misc.fields.YEAR", "line_number": 117, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 117, "usage_type": "name"}, {"api_name": "common.misc.fields.PAGES", "line_number": 122, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 122, "usage_type": "name"}, {"api_name": "common.misc.fields.PAGES", "line_number": 124, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 124, "usage_type": "name"}, {"api_name": "common.misc.fields.RESPONSIBILITY", "line_number": 127, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 127, "usage_type": "name"}, {"api_name": "common.misc.fields.ISBN", "line_number": 132, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 132, "usage_type": "name"}, {"api_name": "common.misc.fields.ISBN", "line_number": 135, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 135, "usage_type": "name"}, {"api_name": "common.misc.fields.OCLC", "line_number": 138, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 138, "usage_type": "name"}, {"api_name": "common.misc.fields.OCLC", "line_number": 141, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 141, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "common.config.config.WORKSPACE", "line_number": 151, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 151, "usage_type": "name"}, {"api_name": "common.config.config.OUTPUT_FOLDER", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}]} +{"seq_id": "18128793522", "text": "# In the name of Allah\n\nfrom Crypto.PublicKey import RSA\nfrom Crypto.PublicKey.RSA import RsaKey\nfrom Crypto.Cipher import PKCS1_v1_5\nfrom Crypto.Random import new as Random\nfrom base64 import b64encode\nfrom base64 import b64decode, urlsafe_b64decode\nfrom threading import Lock\nimport pickle\nimport json\nimport typing as T\n\n\nclass Cipher:\n def __init__(self, data_dict: T.Optional[dict] = None):\n self.__lock = Lock()\n self.__generated = False\n if data_dict is not None:\n self.__tag = data_dict['tag']\n if 'private_key' in data_dict:\n # print(f'creating cipher@{self.__tag} (private)')\n self.__key = RSA.import_key(data_dict['private_key'], data_dict['public_key'])#, data_dict['private_key'])\n else:\n # print(f'creating cipher@{self.__tag} (public)')\n self.__key = RSA.import_key(data_dict['public_key'])#, data_dict['private_key'])\n self.__generated = True\n else:\n self.__tag = None\n self.__key = None\n\n def generate_key(self, key_length: int, tag: str = ''):\n with self.__lock:\n if self.__generated:\n raise Exception('Already generated')\n assert key_length in [1024, 2048, 4096]\n self.__tag = tag\n rng = Random().read\n self.__key = RSA.generate(key_length, rng)\n self.__generated = True\n return\n \n @property\n def has_private(self) -> bool:\n return self.key.has_private()\n\n @property\n def generated(self) -> bool:\n return self.__generated\n\n @property\n def tag(self) -> str:\n return self.__tag\n\n @property\n def key(self) -> RsaKey:\n if self.__key is None:\n raise Exception('Please first call the generate_key function')\n return self.__key\n\n def encrypt(self, data: str, perform_b64: bool = False):\n if perform_b64:\n plaintext = b64encode(data.encode())\n else:\n plaintext = data.encode()\n rsa_encryption_cipher = PKCS1_v1_5.new(self.key)\n ciphertext = rsa_encryption_cipher.encrypt(plaintext)\n return b64encode(ciphertext).decode()\n\n def decrypt(self, data: str, perform_b64: bool = False):\n if not self.key.has_private():\n raise Exception('This cipher is only used for encryption')\n rsa_decryption_cipher = PKCS1_v1_5.new(self.key)\n # ciphertext = b64decode(data.encode())\n ciphertext = urlsafe_b64decode(data.encode())\n plaintext = rsa_decryption_cipher.decrypt(ciphertext, 16)\n if perform_b64:\n return b64decode(plaintext).decode()\n else:\n return plaintext.decode()\n\n @property\n def public_key(self) -> bytes:\n return self.key.public_key().export_key()\n\n def write_public_key(self, path: str):\n with open(path, 'wb') as file:\n file.write(self.public_key)\n\n @property\n def private_key(self) -> bytes:\n if not self.key.has_private():\n raise Exception('This cipher is only used for encryption')\n return self.key.export_key()\n\n @property\n def __dict__(self) -> dict:\n k = self.key\n if self.has_private:\n return {\n 'public_key': self.public_key,\n 'private_key': self.private_key,\n 'tag': self.tag\n }\n else:\n return {\n 'public_key': self.public_key,\n 'tag': self.tag\n }\n\n def write(self, path: str, format: str = 'json'):\n \"\"\"type can be either 'json' or 'bin' (pickled)\"\"\"\n if not self.__generated:\n raise Exception('Can not write a non-generated key')\n if format == 'json':\n with open(path, 'w') as file:\n json.dump(vars(self), file, indent=' ', ensure_ascii=False)\n elif format == 'bin':\n with open(path, 'wb') as file:\n pickle.dump(vars(self), file)\n else:\n raise Exception('Right now only bin and json are accepted')\n", "repo_name": "ADanayi/iotis_app", "sub_path": "python/_classes/__Cipher.py", "file_name": "__Cipher.py", "file_ext": "py", "file_size_in_byte": 4152, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Optional", "line_number": 16, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 17, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA.import_key", "line_number": 23, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 23, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.import_key", "line_number": 26, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 26, "usage_type": "name"}, {"api_name": "Crypto.Random.new", "line_number": 38, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA.generate", "line_number": 39, "usage_type": "call"}, {"api_name": "Crypto.PublicKey.RSA", "line_number": 39, "usage_type": "name"}, {"api_name": "Crypto.PublicKey.RSA.RsaKey", "line_number": 56, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 63, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5.new", "line_number": 66, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5", "line_number": 66, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 68, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5.new", "line_number": 73, "usage_type": "call"}, {"api_name": "Crypto.Cipher.PKCS1_v1_5", "line_number": 73, "usage_type": "name"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 75, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 117, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "28000690111", "text": "import math\nimport random\nfrom datetime import date\n\ndef basic_python():\n\n pi = math.pi\n print(pi)\n\n random_choice = random.choice(['apple', 'pear', 'banana'])\n print(random_choice)\n\n now = date.today()\n birthday = date(1999, 8, 20)\n age = now - birthday\n print(age.days)\n\n\ndef arithmetic_calculation():\n\n a = 21\n b = 10\n c = 0\n\n c = a + b\n print(\"c 的值为:\", c)\n\n c = a - b\n print(\"c 的值为:\", c)\n\n c = a * b\n print(\"c 的值为:\", c)\n\n c = a / b\n print(\"c 的值为:\", c)\n\n c = a % b # 取余数\n print(\"c 的值为:\", c)\n\n # 修改变量 a 、b 、c\n a = 2\n b = 3\n c = a ** b\n print(\"c 的值为:\", c)\n\n a = 10\n b = 5\n c = a // b # 取整\n print(\"c 的值为:\", c)\n\ndef comparison_calculation():\n a = 21\n b = 10\n c = 0\n\n if a == b:\n print(\"a 等于 b\")\n else:\n print(\"a 不等于 b\")\n\n if a != b:\n print(\"a 不等于 b\")\n else:\n print(\"a 等于 b\")\n\n if a < b:\n print(\"a 小于 b\")\n else:\n print(\"a 大于等于 b\")\n\n if a > b:\n print(\"a 大于 b\")\n else:\n print(\"a 小于等于 b\")\n\n # 修改变量 a 和 b 的值\n a = 5\n b = 20\n if a <= b:\n print(\"a 小于等于 b\")\n else:\n print(\"a 大于 b\")\n\n if b >= a:\n print(\"b 大于等于 a\")\n else:\n print(\"b 小于 a\")\n\ndef logic_calculation(a, b):\n\n\n if a and b:\n print(\"变量 a 和 b 都为 true\")\n else:\n print(\"变量 a 和 b 有一个不为 true\")\n\n if a or b:\n print(\"变量 a 和 b 都为 true,或其中一个变量为 true\")\n else:\n print(\"变量 a 和 b 都不为 true\")\n\n if not (a and b):\n print(\"变量 a 和 b 都为 false,或其中一个变量为 false\")\n else:\n print(\"变量 a 和 b 都为 true\")\n\ndef assignment_calculation(a, b, c):\n\n\n c = a + b\n print(\"c 的值为:\", c)\n\n c += a\n print(\"c 的值为:\", c)\n\n c *= a\n print(\"c 的值为:\", c)\n\n c /= a\n print(\"c 的值为:\", c)\n\n c = 2\n c %= a\n print(\"c 的值为:\", c)\n\n c **= a\n print(\"c 的值为:\", c)\n\n c //= a\n print(\"c 的值为:\", c)\n\n\ndef if_statement():\n\n num = 9\n if num >= 0 and num <= 10: # 判断值是否在0~10之间\n print('hello')\n\n num = 10\n if num < 0 or num > 10: # 判断值是否在小于0或大于10\n print('hello')\n else:\n print('undefine')\n\n num = 8\n # 判断值是否在0~5或者10~15之间\n if (num >= 0 and num <= 5) or (num >= 10 and num <= 15):\n print('hello')\n else:\n print('undefine')\n\ndef do_statement():\n\n count = 0\n while count < 5:\n print(count, \" is less than 5\")\n count = count + 1\n else:\n print(count, \" is not less than 5\")\n\n fruits = ['banana', 'apple', 'mango']\n for index in range(len(fruits)):\n print('当前水果 :', fruits[index])\n print(\"Good bye!\")\n\ndef other_statement():\n\n for letter in 'Python':\n if letter == 'h':\n break\n print('当前字母 :', letter)\n\n for letter in 'Python':\n if letter == 'h':\n continue\n print('当前字母 :', letter)\n\n # 输出 Python 的每个字母\n for letter in 'Python':\n if letter == 'h':\n pass\n print('这是 pass 块')\n print('当前字母 :', letter)\n\n print(\"Good bye!\")\n\nif __name__ == '__main__':\n\n comparison_calculation()\n arithmetic_calculation()\n comparison_calculation()\n logic_calculation(True, False)\n assignment_calculation(21, 10, 0)\n if_statement()\n do_statement()\n other_statement()", "repo_name": "AbnerCui/Python_Lectures", "sub_path": "topic1_introduction_of_Python/introductionOfPython.py", "file_name": "introductionOfPython.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.pi", "line_number": 7, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "36347711584", "text": "# cording: utf-8\n\nimport os\nfrom flask import Flask, request, redirect, render_template, flash\nfrom werkzeug.utils import secure_filename\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\nfrom tensorflow import keras\nimport cv2\nfrom cv2 import THRESH_BINARY\nimport math\n\n\nclasses = [\"0\",\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\nimage_size = 28\n\nUPLOAD_FOLDER = \"uploads\"\n\n# 一次的に切り出した画像を保存するフォルダ\nNUM_FOLDER = 'trim-num-file' # プログラムの最後でフォルダ内のファイルをすべて消去します\n\n\n\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\napp = Flask(__name__)\napp.debug = False\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n\n\n\nmodel = load_model('./model.h5', compile=False)#学習済みモデルをロード\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('ファイルがありません')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('ファイルがありません')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n filepath = os.path.join(UPLOAD_FOLDER, filename) \n \n \n # ここから画像を切り出して予測する関数\n\n # 画像サイズが大きすぎる場合は小さくする\n def size_smaller(img):\n if (img.shape[0] > 4000 or img.shape[1] > 3000):\n img = cv2.resize(img, (img.shape[1] // 3, img.shape[0] // 3))\n elif (img.shape[0] > 2000 or img.shape[1] > 1000):\n img = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2)) \n img_size = img.shape[0]\n \n return img, img_size\n\n\n # 背景が白ければ黒に反転する\n def white_to_black(img): \n yl = np.mean(img[0:img.shape[0], 0:img.shape[1] // 20])\n yr = np.mean(img[0:img.shape[0], img.shape[1] - img.shape[1] // 20:img.shape[1]])\n xt = np.mean(img[0:img.shape[0] // 20, 0:img.shape[1]])\n xb = np.mean(img[img.shape[0] - img.shape[0] // 20:img.shape[0], 0:img.shape[1]])\n if (yl > 140 and yl <= 255 or yr > 140 and yr <= 255 \n or xt > 140 and xt <= 255 or xb > 140 and xb <= 255):\n img = cv2.bitwise_not(img)\n \n return img\n\n\n # 背景がノイズの入っていない白か黒か判断する\n def noise_detect(img):\n m = []\n yl = np.mean(img[0:img.shape[0], 0:img.shape[1] // 20])\n yr = np.mean(img[0:img.shape[0], img.shape[1] - img.shape[1] // 20:img.shape[1]])\n xt = np.mean(img[0:img.shape[0] // 20, 0:img.shape[1]])\n xb = np.mean(img[img.shape[0] - img.shape[0] // 20:img.shape[0], 0:img.shape[1]])\n yl_min = np.min(img[0:img.shape[0], 0:img.shape[1] // 20])\n yr_min = np.min(img[0:img.shape[0], img.shape[1] - img.shape[1] // 20:img.shape[1]])\n xt_min = np.min(img[0:img.shape[0] // 20, 0:img.shape[1]])\n xb_min = np.min(img[img.shape[0] - img.shape[0] // 20:img.shape[0], 0:img.shape[1]])\n yl_max = np.max(img[0:img.shape[0], 0:img.shape[1] // 20])\n yr_max = np.max(img[0:img.shape[0], img.shape[1] - img.shape[1] // 20:img.shape[1]])\n xt_max = np.max(img[0:img.shape[0] // 20, 0:img.shape[1]])\n xb_max = np.max(img[img.shape[0] - img.shape[0] // 20:img.shape[0], 0:img.shape[1]])\n m_min = [yl_min, yr_min, xt_min, xb_min]\n m_max = [yl_max, yr_max, xt_max, xb_max]\n if (yl == 255 and yr == 255 and xt == 255 and xb == 255):\n screen = \"w\"\n d_lebel = 255\n d_min = np.min(m_min)\n d_max = np.max(img)\n elif (yl == 0 and yr == 0 and xt == 0 and xb == 0):\n screen = \"b\"\n d_lebel = 0\n d_min = np.min(m_min)\n d_max = np.max(img)\n else:\n screen = \"c\"\n d_lebel = (yl + yr + xt + xb) // 4\n d_min = np.min(m_min)\n d_max = np.max(img)\n \n return img, screen, d_lebel, d_min, d_max, xb_min\n\n\n # 画像にフレーム状のマスクをかけて主に四隅の影によるノイズを除去する\n def mask(img, screen): \n if screen == 'w': \n return img\n elif screen =='b':\n # 画像の横サイズを変数に格納\n x_shape = img.shape[1] \n # 画像の縦サイズを変数に格納\n y_shape = img.shape[0] \n # 画像横サイズの15%を変数に格納しマスクの位置調節で使用\n bold_x = img.shape[1] // 15\n # 画像縦サイズの15%を変数に格納しマスクの位置調節で使用\n bold_y = img.shape[0] // 15\n # 画像横サイズの1/3を変数に格納しマスクの幅調整で使用\n divided_3 = img.shape[1] // 3\n # 画像縦サイズの1/2を変数に格納\n h = img.shape[0] // 2\n # 画像横サイズの1/2を変数に格納\n w = img.shape[1] // 2\n # 画像の横幅の半分を円の半径とする\n r = img.shape[1] // 2 \n # 時間短縮の為のステップ数を設定\n step = 15 \n # 右下のマスク \n for i in range(0, 90, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[y + h + bold_y:, x + w - bold_x:] = 0 \n # 左下のマスク\n for i in range(90, 180, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[y + h + bold_y:, :x + w + bold_x] = 0\n # 左上のマスク\n for i in range(180, 270, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[:y + w + bold_x, :x + w + bold_x] = 0 \n # 右上のマスク\n for i in range(270, 360, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[:y + w + bold_x, x + w - bold_x:] = 0 \n # フレームの継ぎ目のマスク\n for i in range(0, bold_x, step):\n img[divided_3:img.shape[0] - divided_3, 0:i] = 0\n img[divided_3:img.shape[0], img.shape[1] - bold_x:] = 0\n \n return img\n\n\n elif screen == 'c':\n # 画像の横サイズを変数に格納\n x_shape = img.shape[1] \n # 画像の縦サイズを変数に格納\n y_shape = img.shape[0] \n # 画像横サイズの15%を変数に格納しマスクの位置調節で使用\n bold_x = img.shape[1] // 15\n # 画像縦サイズの15%を変数に格納しマスクの位置調節で使用\n bold_y = img.shape[0] // 15\n # 画像横サイズの1/3を変数に格納しマスクの幅調整で使用\n divided_3 = img.shape[1] // 3\n # 画像縦サイズの1/2を変数に格納\n h = img.shape[0] // 2\n # 画像横サイズの1/2を変数に格納\n w = img.shape[1] // 2\n # 画像の横幅の半分を円の半径とする\n r = img.shape[1] // 2 \n # 背景が白かったら黒にする\n img = white_to_black(img)\n # 時間短縮の為のステップ数を設定\n step = 15 \n # 右下のマスク \n for i in range(0, 90, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[y + h + bold_y:, x + w - bold_x:] = 0 \n # 左下のマスク\n for i in range(90, 180, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[y + h + bold_y:, :x + w + bold_x] = 0\n # 左上のマスク\n for i in range(180, 270, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[:y + w + bold_x, :x + w + bold_x] = 0 \n # 右上のマスク\n for i in range(270, 360, step):\n x = int(math.cos(math.radians(i)) * r)\n y = int(math.sin(math.radians(i)) * r)\n img[:y + w + bold_x, x + w - bold_x:] = 0 \n # フレームの継ぎ目のマスク\n for i in range(0, bold_x, step):\n img[divided_3:img.shape[0] - divided_3, 0:i] = 0\n img[divided_3:img.shape[0], img.shape[1] - bold_x:] = 0\n \n return img\n\n\n # キャンバスの暗さに合わせて値を引き陰によるノイズを目立たなくする \n def smoothing_darkness(img, d_lebel):\n d = int(abs(d_lebel * 0.76))# 影カット閾値\n \n if img.shape[0] != 28:\n # 白の背景がもし暗かったら明るく修正する、キャンバスの大きさに合わせて判断する範囲を調整する \n if(np.mean(img[0:img.shape[0], 0:img.shape[0] // 12]) > 200 and \n np.mean(img[0:img.shape[0], 0:img.shape[0] // 12]) < 240):\n img = img - d \n elif(np.mean(img[0:img.shape[0], img.shape[1] - img.shape[0] // 12:img.shape[1]]) > 200 and \n np.mean(img[0:img.shape[0], img.shape[1] - img.shape[0] // 12:img.shape[1]]) < 240):\n img = img - d\n elif(np.mean(img[0:img.shape[0]//12,0:img.shape[1]])>200 and \n np.mean(img[0:img.shape[0] // 12, 0:img.shape[1]]) < 240):\n img = img - d\n elif(np.mean(img[img.shape[0] - img.shape[0] // 12:img.shape[0], 0:img.shape[1]]) > 200 and \n np.mean(img[img.shape[0] - img.shape[0] // 12:img.shape[0], 0:img.shape[1]]) < 240):\n img = img - d\n elif(np.mean(img[0:img.shape[0], 0:img.shape[0] // 12]) > 180 and \n np.mean(img[0:img.shape[0], 0:img.shape[0] // 12]) <= 200):\n img = img - d\n elif(np.mean(img[0:img.shape[0], img.shape[1] - img.shape[0] // 12:img.shape[1]]) > 180 and \n np.mean(img[0:img.shape[0], img.shape[1] - img.shape[0] // 12:img.shape[1]]) <= 200):\n img = img - d\n elif(np.mean(img[0:img.shape[0] // 12, 0:img.shape[1]]) > 180 and \n np.mean(img[0:img.shape[0] // 12, 0:img.shape[1]]) <= 200):\n img = img - d\n elif(np.mean(img[img.shape[0] - img.shape[0] // 12:img.shape[0], 0:img.shape[1]]) > 180 and \n np.mean(img[img.shape[0] - img.shape[0] // 12:img.shape[0], 0:img.shape[1]]) <= 200):\n img = img - d\n\n return img \n\n\n # 膨張 \n def dilate_img(img, screen):\n # 背景が白で膨張をすると文字が見えなくなる\n # screenはノイズのない背景が白のデータの場合\"w\",黒の場合\"b\"を返す\n # 画面が暗い場合は\"c\"を返す \n # 8近傍フィルタ\n filt = np.array([[1,1,1],\n [1,0,1],\n [1,1,1]], np.uint8)\n if screen == \"w\": # 背景が白ならば反転して2回膨張処理を行う\n img = cv2.bitwise_not(img) \n img = cv2.dilate(img, filt)\n img = cv2.dilate(img, filt)\n screen = 'b'\n elif screen == 'b':\n img = cv2.dilate(img, filt)\n img = cv2.dilate(img, filt)\n else: # 反転無しで2回膨張処理を行う\n #img = cv2.bitwise_not(img)\n img = cv2.dilate(img, filt)\n img = cv2.dilate(img, filt) \n screen = 'b'\n return img, screen\n\n\n def erode_img(img, screen):\n # ノイズのない背景が白のデータの場合\"w\",黒の場合\"b\"を返す関数で判定 \n # 8近傍フィルタ\n filt = np.array([[1,1,1],\n [1,0,1],\n [1,1,1]], np.uint8)\n if screen == \"w\": # 背景が白ならば反転して2回収縮処理を行う\n img = cv2.bitwise_not(img) \n img = cv2.erode(img, filt) \n screen = 'b'\n elif screen == 'b':\n img = cv2.erode(img, filt)\n else: # 反転無しで2回収縮処理を行う\n img = cv2.erode(img, filt)\n screen = 'b'\n\n return img, screen\n\n\n # 輪郭を抽出する\n def f_contours(img): \n \n # 背景が白い場合画像の枠が輪郭としてカウントされるので処理する\n img = white_to_black(img)\n \n # グレースケール\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # 2値化\n _, th_img = cv2.threshold(gray, 130, 255, THRESH_BINARY)\n \n # 輪郭を抽出する\n contours, _ = cv2.findContours(th_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \n # 輪郭のx,y,w,hを格納するリストを作成\n \n n = [] # 輪���の位置\n p = [] #\n t = 0 #\n thresh = 0.021 #0.028~0.021ノイズカット閾値\n\n # 輪郭を格納する\n for i in contours: \n x, y, w, h = cv2.boundingRect(i)\n \n # 縦横比が10倍以上は削除する(ノイズカット)\n if (w // h > 0.1 and h // w < 10 or w // h < 10 and h // w > 0.1): \n # 画像サイズの2.1%以下の輪郭は削除する(ノイズカット)\n if (w > img.shape[1] * thresh and h > img.shape[0] * thresh): \n n.append([x, y, w, h])\n \n # 抽出した輪郭を左上から順番に並べる\n num_location = sorted(n, key=lambda x:(x[1] // (img.shape[1] // 3), x[0]))\n \n # 外接矩形を描画した画像と座標を返す \n return img, num_location\n\n\n # 切り出した画像を一旦ファイルに保存して認識した結果をリストに保存する\n def create_num_file(img, num_location, screen):\n \n # 切り出し他画像の一時保存ディレクトリ名\n dir_path = './' + NUM_FOLDER + '/' \n \n # 切り出す数字の余白設定\n if img.shape[0] <= 28: # 画像サイズが(28,28)以下は余白無し\n m1, m2, m3, m4 = 0, 0, 0, 0\n elif img.shape[0] <= 50: # 画像サイズが(50,50)以下は余白1\n m1, m2, m3, m4 = 1, 1, 1, 1\n else: # 画像サイズが大きい場合の余白は10\n m1, m2, m3, m4 = 10, 10, 10, 10\n \n # 画像をファイルに保存する\n if img.shape[0] <= 28 or img.shape[1] <= 28: # (28,28)の場合はそのままファイルに保存する \n cv2.imwrite(os.path.join(NUM_FOLDER, \"num0\" + \".png\"), img)\n else: \n # 画像サイズが大きければ数字の数だけ画像を切り出してファイルを保存する \n for i in range(0, len(num_location)):\n x, y, w, h = num_location[i] \n \n # 画像を正方形に切り取る\n n1, n2 = abs((h - w) // 2), abs((h - w) // 2)\n\n # エラーにならない為の処理 \n if y - m1 < 0: # 切り取る幅が画像の上をオーバー\n m1 = 0\n if y + h + m2 > img.shape[0]: # 切り取る幅が画像の下をオーバー \n m2 = 0\n if x - n1 - m3 < 0: # 切り取る幅が画像の左端をオーバー\n m3 = 0 \n n1 = 0\n if x + w + n2 + m4 > img.shape[1]: # 切り取る幅が画像の右端をオーバー \n m4 = 0\n n2 = 0\n \n # 画像を余白を付けて切り出す\n trim_img = img[y - m1:y + h + m2, x - n1 - m3:x + w + n2 + m4]\n\n # 切り出された画像の大きさに応じて線を太くする\n if h > img.shape[0] * 0.9 and w > img.shape[1] * 0.9:\n # 大きい画像は2度関数を使用\n trim_img, screen = dilate_img(trim_img, screen) \n trim_img, screen = dilate_img(trim_img, screen)\n # 切り出した画像をそれぞれファイルに保存する\n cv2.imwrite(os.path.join(NUM_FOLDER, 'num' + str(i) + '.png'), trim_img)\n elif h > img.shape[0] * 0.5 and w > img.shape[1] * 0.5:\n # 元画像の半分程度の画像は1度関数を使用\n trim_img, screen = dilate_img(trim_img, screen)\n # 切り出した画像をそれぞれファイルに保存する\n cv2.imwrite(os.path.join(NUM_FOLDER, 'num' + str(i) + '.png'), trim_img)\n else: \n # 切り出した画像をそれぞれファイルに保存する\n cv2.imwrite(os.path.join(NUM_FOLDER, 'num' + str(i) + '.png'), trim_img)\n return dir_path \n\n\n def from_paint(img, screen):\n # グレースケール化\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # matplotlibで表示できるように加工するペイントで作ったPNGはこれが必要\n gray1 = cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)\n # 2値化\n print(img.shape)\n _, th_img = cv2.threshold(gray1, 130, 255, THRESH_BINARY)\n # 2度膨張処理を行う\n if (img.shape[0] <= 50 or img.shape[1] <= 50):\n d_img = th_img\n else:\n d_img, screen = dilate_img(th_img, screen) \n # 2度膨張処理を行う\n if (img.shape[0] <= 500 or img.shape[1] <= 500):\n d1_img = d_img\n else:\n d1_img, screen = dilate_img(d_img, screen)\n # 2度膨張処理を行う\n if (img.shape[0] <= 100 or img.shape[1] <= 100):\n d2_img = d1_img\n else:\n d2_img, screen = dilate_img(d1_img, screen)\n # 輪郭を抽出する\n c_img, num_location = f_contours(d2_img) \n # 画像を切り出してファイルに保存する\n dir_path = create_num_file(c_img, num_location, screen) \n #print('****paint****')\n\n return dir_path\n\n\n def from_camera(img, screen, d_min, d_max, xb_min): \n # 四隅のノイズを除去する\n m_img = mask(img, screen)\n # グレースケール化\n gray = cv2.cvtColor(m_img, cv2.COLOR_BGR2GRAY)\n # matplotlibで表示できるように加工するペイントで作った画像はこれが必要\n p_img = cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)\n # 2値化\n _, th_img = cv2.threshold(p_img, 110, 255, THRESH_BINARY)#130 薄い画像は20\n # 2度膨張処理を行う\n if (img.shape[0] <= 160 or img.shape[1] <= 160):\n d_img = th_img\n else:\n d_img, screen = dilate_img(th_img, screen)\n # 四隅のノイズを除去する\n if (img.shape[0] > 1000 and img.shape[1] >1000):\n m1_img = mask(d_img, screen)\n else:\n m1_img = d_img\n # 輪郭を抽出する\n c_img, num_location = f_contours(m1_img)\n # 画像を切り出してファイルに保存する\n dir_path = create_num_file(c_img, num_location, screen)\n #print('****camera****')\n\n return dir_path\n\n\n def from_scaner(img, screen):\n # 四隅のノイズを除去する\n m_img = mask(img, screen)\n # グレースケール化\n gray = cv2.cvtColor(m_img, cv2.COLOR_BGR2GRAY)\n # matplotlibで表示できるように加工するペイントで作ったPNGはこれが必要\n p_img = cv2.cvtColor(gray, cv2.COLOR_BGR2RGB)\n # 2値化\n _, th_img = cv2.threshold(p_img, 130, 255, THRESH_BINARY)\n # 2度膨張処理を行う\n if (img.shape[0] <= 50 or img.shape[1] <= 50):\n d_img = th_img\n else:\n d_img, screen = dilate_img(th_img, screen)\n # 収縮処理を行う\n if (img.shape[0] <= 100 or img.shape[1] <= 100):\n e_img = d_img\n else:\n e_img, screen = erode_img(d_img, screen)\n # 四隅のノイズを除去する\n m1_img = mask(e_img, screen)\n # 2度膨張処理を行う\n if (img.shape[0] <= 100 or img.shape[1] <= 100):\n d1_img = m1_img\n else:\n d1_img, screen = dilate_img(m1_img, screen)\n # 輪郭を抽出する\n c_img, num_location = f_contours(d1_img) \n # 画像を切り出してファイルに保存する\n dir_path = create_num_file(c_img, num_location, screen) \n #print('****scaner****')\n\n return dir_path\n\n\n # ここから画像を受け取り切り出して予測する\n\n # 画像を読み込む\n img = cv2.imread('./' + filepath)\n\n # 画像をNumPy配列に変換\n img = np.array(img)\n\n # 処理速度向上のために画像サイズを小さくする\n _ ,img_size = size_smaller(img)\n\n # 主にカメラやスキャナー用\n img, screen, d_lebel, d_min, d_max, xb_min = noise_detect(img)\n\n # 暗い部分のある画像をスムーズにする\n lite_img = smoothing_darkness(img, d_lebel)\n\n # ペイント画像、カメラ画像、スキャナで処理を分ける \n if img_size != 28 and d_lebel == 0 or d_lebel == 255:\n # ペイントからの画像を処理\n dir_path = from_paint(lite_img, screen)\n elif (img_size != 28 and d_lebel > 160 and d_lebel <190 or d_lebel >3 and d_lebel < 30):\n # カメラから画像を処理\n dir_path = from_camera(lite_img, screen, d_min, d_max, xb_min)\n elif (img_size != 28 and d_lebel >= 190 and d_lebel <255):\n # スキャナーからの画像を処理 \n dir_path = from_scaner(lite_img, screen) \n elif img_size == 28: #(28,28)サイズの画像は複数文字認識無し\n # 輪郭を抽出する\n _, num_location = f_contours(lite_img) \n # 画像を切り出してファイルに保存する\n dir_path = create_num_file(lite_img, num_location, screen)\n\n\n p = [] # 認識結果を格納するリスト\n q = 0 # 切り出して書き込んだファイル数をカウントする\n\n # 指定のディレクトリ内のファイルを読み込み数字を認識する\n for i in os.listdir(dir_path)[1:]:\n # 切り取った画像を読み込みむ\n img = cv2.imread(dir_path + i)\n # グレースケール化\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #(28,28)にリサイズ \n img = cv2.resize(img,(image_size,image_size))\n img = image.img_to_array(img)\n data = np.array([img]) \n \n # 変換したデータをモデルに渡して予測する\n result = model.predict(data)[0] \n \n # リストに認識結果を格納する\n if np.max(result) < 0.5: # 認識結果が0.5より小さい場合は(*)を表示する\n p.append('(*)')\n elif np.max(result) >= 0.5: # 認識結果が0.5以上の場合は認識結果を格納する\n predicted = result.argmax()\n p.append(classes[predicted]) \n q = q + 1 \n pred_answer = \"これは \" + ''.join(p) + \" です\"\n print(str(pred_answer))\n p=[] # リストをリセットする \n\n # dir_pathフォルダ内の切り出した数字ファイルを削除します\n for i in range(q): #書き込んだファイル分だけ消去する\n # dir_path内の一番初めのファイルを削除 \n os.remove(dir_path + os.listdir(dir_path)[1]) # gitkeepファイルを入れたのでインデックスは1\n # アップロードフォルダ内のファイルを削除する\n os.remove('./' + UPLOAD_FOLDER + '/' + os.listdir(UPLOAD_FOLDER)[1])\n return render_template(\"index.html\",answer=pred_answer)\n\n return render_template(\"index.html\",answer=\"\")\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 8080))\n app.run(host ='0.0.0.0',port = port)\n\n\n\n\n\n\n \n \n \n", "repo_name": "tsudatch/flask-mnist-a4", "sub_path": "mnist.py", "file_name": "mnist.py", "file_ext": "py", "file_size_in_byte": 26705, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "50", "api": [{"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 111, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 141, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 141, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 142, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 142, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 146, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 146, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 147, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 147, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 151, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 151, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 152, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 152, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 156, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 156, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 157, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 157, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 190, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 190, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 191, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 191, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 195, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 195, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 196, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 196, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 200, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 200, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 201, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 201, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 205, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 205, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 206, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 206, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 258, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 260, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 261, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 262, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 266, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 269, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 280, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_not", "line_number": 282, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 283, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 286, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 288, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 301, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 301, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 304, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 304, "usage_type": "argument"}, {"api_name": "cv2.findContours", "line_number": 307, "usage_type": "call"}, {"api_name": "cv2.RETR_EXTERNAL", "line_number": 307, "usage_type": "attribute"}, {"api_name": "cv2.CHAIN_APPROX_SIMPLE", "line_number": 307, "usage_type": "attribute"}, {"api_name": "cv2.boundingRect", "line_number": 318, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 349, "usage_type": "call"}, {"api_name": "os.path", "line_number": 349, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 379, "usage_type": "call"}, {"api_name": "os.path", "line_number": 379, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 384, "usage_type": "call"}, {"api_name": "os.path", "line_number": 384, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 393, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 393, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 395, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 395, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 398, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 398, "usage_type": "argument"}, {"api_name": "cv2.cvtColor", "line_number": 427, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 427, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 429, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 429, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 431, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 431, "usage_type": "argument"}, {"api_name": "cv2.cvtColor", "line_number": 455, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 455, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 457, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 457, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 459, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 459, "usage_type": "argument"}, {"api_name": "cv2.imread", "line_number": 489, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 492, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 524, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 526, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 528, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 528, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 530, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image.img_to_array", "line_number": 531, "usage_type": "call"}, {"api_name": "tensorflow.keras.preprocessing.image", "line_number": 531, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 540, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 551, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 551, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 553, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 553, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 554, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 556, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 560, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 560, "usage_type": "attribute"}]} +{"seq_id": "14546340380", "text": "r'''This module provides `pyparsing` grammar for Tripoli-4 output listings.\n\n.. role :: parsing_var(literal)\n.. |keff| replace:: k\\ :sub:`eff`\n.. |kij| replace:: k\\ :sub:`ij`\n.. _pyparsing: https://pythonhosted.org/pyparsing/\n\nDocumentation on the ``pyparsing`` package can be found at `pyparsing`_.\n\nTransformation from ``pyparsing.ParseResults`` to more standard python objects,\nincluding :obj:`numpy` arrays, is done with :mod:`~.transform`, calling\n:mod:`~valjean.eponine.tripoli4.common`.\n\nGeneralitites\n-------------\n\n* This parser only parses the result part of the listing (selection done in\n :mod:`~valjean.eponine.tripoli4.scan`).\n* It takes into account all responses in ``qualtrip`` database up to Tripoli-4,\n version 10.2.\n* If a response is not taken into account parsing will fail:\n\n * with a big, ugly message ending by location of the end of successful\n parsing in the result string (possible to print it) → normally where starts\n your new response\n * it seems to end normally, but did not in reality. One of the best checks in\n that case is to test if the ``endflag`` in\n :mod:`~valjean.eponine.tripoli4.scan` was read in the parser, usually not.\n Then the new response probably have to be added.\n\n* A general parser is proposed for use in the file, but other parsers can be\n built from the partial parsers written here\n* Numbers are automatically converted to :obj:`numpy` numbers (possibility to\n choose the dtype used for numbers)\n* Keywords and most of the variables used to build parsers are private\n\n\nOrganisation\n------------\n\nThis module is divided in 3 parts:\n\nkeywords:\n list of all keywords used to parse the listings, this part is\n important as these keywords trigger the parsing\nparsers:\n parsers for each part of the listing (introduction, mesh,\n spectra, general responses, |keff|, etc.)\ngeneral parser:\n parser to parse the full listing, taking into accout all\n current response (present in V&V)\n\nKeywords are in most of the cases used as flags and suppressed when data are\nstored.\n\nA first structure is designed when building the parsers results as lists and\ndictionaries in the ``pyparsing.ParseResults``. Then `parse actions` are used\nto standard python or :obj:`numpy` objects. These `parse actions`, called with\n``pyparsing.ParserElement.setParseAction``, can be found in :mod:`~.transform`.\n\nMain parsers blocks\n```````````````````\nThe main parsers blocks are defined at the end of the module, named\n:parsing_var:`mygram` and :parsing_var:`response`. The default parser is\n:parsing_var:`mygram`.\n\nTypically, each result block in the listing should start by the `intro` block,\nparsed by :parsing_var:`intro`, and end with at least one `runtime` block,\nparsed by :parsing_var:`runtime`. This parts follows the\n:mod:`~valjean.eponine.tripoli4.scan`: :obj:`str` starting by\n``'RESULTS ARE GIVEN'`` and ending with ``'simulation time'``,\n``'exploitation time'`` or ``'elapsed time'``.\n\nBetween these blocks can be found the data blocks. The major ones are:\n\n* one or more responses, driven by the keyword ``'RESPONSE FUNCTION'``,\n* the editions of IFP adjoint criticality,\n* the \"default\" |keff| block, in most of the cases at the end of the listing,\n* the *contributing particles block*, mainly in first pass listings,\n* the perturbation block,\n* an optional additional `runtime` block.\n\nMain data blocks are described below (results taken into account, main\nfeatures).\n\nResponse block, parser :parsing_var:`response`\n``````````````````````````````````````````````\nThe core of the listings is the list of responses, including all the required\nscores. This big block is constructed as a :obj:`list` of :obj:`dict`, each\none representing a response (key ``'list_responses'`` in the final result).\n\nResponse are constructed as:\n\n* response introduction containing its definition, parsed by\n :parsing_var:`respintro`:\n\n * a description of the response parsed by :parsing_var:`respdesc` including:\n\n * ``'RESPONSE FUNCTION'`` keyword as mandatory (afaik)\n * ``'RESPONSE NAME'``, ``'SCORE NAME'`` and ``'ENERGY DECOUPAGE NAME'``\n that are present in most of the cases\n\n * more characteristics of the response, parsed by :parsing_var:`respcarac`,\n like:\n\n * considered particle (``'PARTICULE'`` in the listing)\n * nucleus on which the reaction happens (if ``'RESPONSE FUNCTION'`` is a\n ``'REACTION'``)\n * temperature\n * compostion of the volumes considered\n * concentration\n * reaction considered (usually given as codes)\n * others like DPA type, required arguments, mode, filters, etc.\n\n* responses themselves, using parser :parsing_var:`responseblock`, are various:\n\n * responses including *score* description, all included in the\n :parsing_var:`scoreblock` parser. More than one can be present, they are\n grouped in the :parsing_var:`listscoreblock` parser.\n :parsing_var:`scoreblock` parser contains:\n\n * score description (parser :parsing_var:`scoredesc`) contains the score\n mode (``'TRACK'``, ``'SURF'`` or ``'COLL'``) and the score zone\n (currently taken into account: mesh, results cumulated on all geometry or\n on all sources, Volume, Volume Sum, Frontier, Frontier Sum, Point, Cells\n and Maille)\n\n * results block, where at least one of these results can be found, are by\n parsed by the following parsers:\n\n * :parsing_var:`spectrumblock`: spectrum\n * :parsing_var:`meshblock`: mesh\n * :parsing_var:`vovspectrumblock`: spectrum with variance of variance\n * :parsing_var:`entropy`: entropy results (Boltzmann and Shannon\n entropies)\n * :parsing_var:`medfile`: location of optional MED file\n * :parsing_var:`genericscoreblock`: default result integrated over energy\n * :parsing_var:`uncertblock`: uncertainty results\n * :parsing_var:`uncertintegblock`: uncertainties on integrated results\n over energy\n * :parsing_var:`gbblock`: Green bands results\n\n * |keff| presented as a generic response, possibly transformed in\n :obj:`numpy.matrix` (parser :parsing_var:`keffblock`)\n * |kij| results: matrix, eigenvalues, eigenvectors (parser\n :parsing_var:`kijres`)\n * |kij| sources (parser :parsing_var:`kijsources`)\n * Adjoint related results (parser :parsing_var:`adjointres`): scores ordered\n by precursors and families, by perturbation index, by cycle length or\n sensitivities (this last case is represented in a 3 dimensions\n :obj:`numpy.ndarray`, incident energy, energy (\"leaving neutron\"),\n direction cosine (µ)). For the moment this is only for IFP method , in\n close future also for Wielandt method\n * default result integrated over energy where no scoring mode and zone are\n precised (parser :parsing_var:`defintegratedres`)\n * perturbation results (parser :parsing_var:`perturbation`)\n\n\nOther parsers\n`````````````\nVarious other blocks can appear in the Tripoli-4 listing, located at the same\nlevel as the response block. These parsers and the associated dictionary key\n(same level as ``'list_responses'``) are:\n\n* :parsing_var:`ifpadjointcriticality`: edition of IFP adjoint criticality, key\n ``'ifp_adjoint_crit_edition'``;\n* :parsing_var:`autokeffblock`: \"default\" |keff| block, containing for example\n the best estimation of |keff| using variable number of discarded batches, key\n ``'keff_auto'``;\n* :parsing_var:`contribpartblock`: *contributing particles block*, key\n ``'contributing_particles'``\n* :parsing_var:`perturbation`: perturbation results, containing a description\n of the calculation of the perturbation followed by the perturbation result\n presented like a usual response (spectrum, mesh, etc. depending on required\n score), key ``'perturbation'``\n* :parsing_var:`runtime`: simulation, exploitation or elapsed time.\n\n\n.. todo::\n\n Adjoint results: for the moment only IFP is really parsed. Grammar has\n already more or less adapted to welcome Wielandt method that will have the\n same kind of outputs (renaming as adjoint_res for example). No key is set\n for the moment to specify the method, it can be obtained from the response\n function itself. Adjoint criticality editions are only done for IFP, this\n may change when the same will be available for Wielandt. Some renaming can\n also be needed.\n\n'''\n\nfrom pyparsing import (Word, Keyword, White, alphas, alphanums,\n Suppress, Optional, LineEnd, CaselessKeyword,\n Group, OneOrMore, ZeroOrMore, Forward, originalTextFor,\n tokenMap, delimitedList, printables, replaceWith)\nfrom pyparsing import pyparsing_common as pyparscom\nfrom . import transform as trans\nfrom .transform import compose2\nfrom .dump import dump_in_logger\nfrom ... import LOGGER\n\n_fnums = pyparscom.fnumber.setParseAction(tokenMap(trans.common.FTYPE))\n_inums = pyparscom.number.setParseAction(tokenMap(trans.common.ITYPE))\n\n###################################\n# KEYWORDS #\n###################################\n\n# General keywords\n_integratedres_kw = Keyword(\"INTEGRATED RESULTS\")\n_numbatchsused_kw = (Keyword(\"number of\")\n + (Keyword(\"batches\") | Keyword(\"batch\"))\n + Optional(Keyword(\"used\")))\n_numbatchs1stdiscarded_kw = Keyword(\"number of first discarded batches\")\n_notconverged_kw = (Keyword(\"NOT YET CONVERGED\")\n | CaselessKeyword(\"Not converged\"))\n_unknown_kw = Keyword(\"unknown\")\n_unavailable_kw = Keyword(\"unavailable\")\n_units_kw = Keyword(\"Units:\")\n_warning_kw = CaselessKeyword(\"Warning\")\n_endtable = LineEnd() + LineEnd()\n\n# Introduction keywords\n_sourceintensity_kw = Keyword(\"RESULTS ARE GIVEN FOR SOURCE INTENSITY\")\n_meanweightleakage_kw = Keyword(\"Mean weight leakage\")\n_meanweightleakagein_kw = Keyword(\"Mean weight leakage inside\")\n_edbatchnum_kw = Keyword(\"Edition after batch number\")\n_meanweightrestartpart_kw = Keyword(\"Mean weight of restarted particles :\")\n\n# End of edition keywords\n_simulationtime_kw = Keyword(\"simulation time (s)\")\n_exploitationtime_kw = Keyword(\"exploitation time (s)\")\n_elapsedtime_kw = Keyword(\"elapsed time (s)\")\n_rdmgenerator_kw = Keyword(\"Type and parameters of random generator \"\n \"at the end of simulation\")\n# _rdmgenerator_kw = Keyword(\"Type and parameters of random generator\" +\n# (\"at the end of simulation\"|\"after batch\"))\n_normalcompletion_kw = Keyword(\"NORMAL COMPLETION\")\n\n# Response description keywords\n_respfunction_kw = Keyword(\"RESPONSE FUNCTION\")\n_respname_kw = Keyword(\"RESPONSE NAME\")\n_scorename_kw = Keyword(\"SCORE NAME\")\n_energysplitname_kw = Keyword(\"ENERGY DECOUPAGE NAME\")\n_nusplitname_kw = Keyword(\"DECOUPAGE NAME\")\n_respfiltered_kw = Keyword(\"RESPONSE FILTERED BY\")\n_respfiltcompos_kw = Keyword(\"COMPOSITIONS\")\n_particule_kw = Keyword(\"PARTICULE\")\n_incparticle_kw = Keyword(\"INCIDENT PARTICULE\")\n_noise_equation_kw = Keyword(\"NOISE EQUATION\")\n_reactiononnucl_kw = Keyword(\"reaction on nucleus\")\n_temperature_kw = Keyword(\"temperature\")\n_composition_kw = Keyword(\"composition\")\n_concentration_kw = Keyword(\"concentration\")\n_reaction_kw = Keyword(\"reaction consists in\")\n_required_kw = Keyword(\"REQUIRED\")\n_dpatype_kw = Keyword(\"DPA TYPE:\")\n_mode_kw = Keyword(\"MODE :\")\n_inducedbyint_kw = Keyword(\"INDUCED BY INTERACTION :\")\n_fxptcontrib_kw = Keyword(\"FXPT CONTRIBUTION\")\n_spectrumresp_kw = Keyword(\"SPECTRUM\")\n_filters_kw = Keyword(\"Score filtered by volume\")\n\n# Scoring description\n_scoremode_kw = Keyword(\"scoring mode\")\n_scorezone_kw = Keyword(\"scoring zone\")\n_scoremesh_kw = (Keyword(\"Results on a mesh\")\n .setParseAction(replaceWith('Mesh')))\n_scoremeshinfobins_kw = Keyword(\"(x,y,z)\")\n_scoreallgeom_kw = (Keyword(\"Results cumulated on all geometry\")\n .setParseAction(replaceWith('All geometry')))\n_scoreallsources_kw = (Keyword(\"Results cumulated on all sources\")\n .setParseAction(replaceWith('All sources')))\n_scorevol_kw = Keyword(\"Volume\")\n_scorevolvol_kw = Keyword(\"num of volume\")\n_scorevolumeunit_kw = Keyword(\"Volume in\")\n_scorevolumeint_kw = (Keyword(\"The result is integrated over the volume\")\n | Keyword(\"The result is integrated in volume\")\n ).setParseAction(replaceWith('volume integrated'))\n_scorevolumenotint_kw = (\n Keyword(\"The volume has been provided by the user \"\n \"(the user requested a score per unit volume)\")\n | Keyword(\"The volume has been calculated by Tripoli-4 \"\n \"or provided by the user\")).setParseAction(\n replaceWith('per unit volume'))\n_scorevolsum_kw = Keyword(\"Volume Sum\")\n_scorevolsumvol_kw = Keyword(\"num of volumes\")\n_scorevolumesum_kw = Keyword(\"Total volume in cm3\")\n_scorevolumecell_kw = Keyword(\"Cell volume in cm3\")\n_scoresurf_kw = Keyword(\"Frontier\")\n_scoresurfvol_kw = Keyword(\"volumes\")\n_scoresurface_kw = Keyword(\"Surface in cm2\")\n_scoresurfaceint_kw = Keyword(\"The surface area has been provided by the user \"\n \"(the user requested a score per unit area)\")\n_scoresurfacenotint_kw = Keyword(\"The result is integrated over the surface\")\n_scoresurfsum_kw = Keyword(\"Frontier Sum\")\n_scoresurfsumfront_kw = Keyword(\"num of frontiers\")\n_scoresurfacesum_kw = Keyword(\"Total surface in cm2\")\n_scorepoint_kw = Keyword(\"Point\")\n_scorecell_kw = Keyword(\"Cells\")\n_scorecelldet_kw = Keyword(\"(numvol,depth,imaille,jmaille,kmaille...)\")\n_scoremaille_kw = Keyword(\"Maille\")\n_scoremaillevol_kw = Keyword(\"num of volume\")\n_scoremailledepth_kw = Keyword(\"depth of lattice\")\n_scoremaillecell_kw = Keyword(\"num of cell\")\n\n# Correspondence table (volumes and their names)\n_corresptable_kw = Keyword(\"Correspondence table between volumes \"\n \"ids and names :\")\n_vol_kw = CaselessKeyword(\"Volume\")\n\n# KEFF keywords\n_notcvg_exp_kw = Keyword(\"(invalid keff domain)\")\n_fullcomb_kw = Keyword(\"full combined estimator\")\n_bestresdiscbatchs_kw = Keyword(\"best results are obtained with discarding\")\n_correlations_kw = Group(Keyword(\"estimators\")\n + Keyword(\"correlations\")\n + Keyword(\"combined values\")\n + Keyword(\"combined sigma%\"))\n_estimator_kw = Keyword(\"ESTIMATOR\")\n_equivkeff_kw = Keyword(\"Equivalent Keff:\")\n_warn_combkeff_kw = (\n Keyword(\"One of the Keffectives is null and should not be\")\n + Keyword(\"Combined Keffectives will not be edited\"))\n_warn_fixsourcekeff_kw = (\n Keyword(\"In FIXED_SOURCES_CRITICITY mode, the keff result\")\n + Keyword(\"is actually an overall multiplication factor \"\n \"(cf User's Guide)\"))\n\n# Time steps\n_timestepnum_kw = Keyword(\"TIME STEP NUMBER\")\n_timestepmin_kw = Keyword(\"time min. =\")\n_timestepmax_kw = Keyword(\"time max. =\")\n\n# Angular zones\n_muangzone_kw = Keyword(\"MU ANGULAR ZONE :\")\n_mumin_kw = Keyword(\"mu min. =\")\n_mumax_kw = Keyword(\"mu max. =\")\n_phiangzone_kw = Keyword(\"PHI ANGULAR ZONE :\")\n_phimin_kw = Keyword(\"phi min. =\")\n_phimax_kw = Keyword(\"phi max. =\")\n\n# Spectrum keywords\n_spectrum_kw = Keyword(\"SPECTRUM RESULTS\")\n_spgroupwunit_kw = Keyword(\"group (MeV)\")\n_spgroup_kw = Keyword(\"group\")\n_spscovlethargy_kw = Keyword(\"score/lethargy\")\n_spvov_kw = Keyword(\"vov\")\n_nuspectrum_kw = Keyword(\"NU RESULTS\")\n_nusprange_kw = Keyword(\"range\")\n_zaspectrum_kw = Keyword(\"ZA RESULTS\")\n_zaspid_kw = Keyword(\"(Z,A)\")\n_spscore_kw = Keyword(\"score\")\n_spsigma_kw = Keyword(\"sigma_%\")\n\n# Mesh keywords\n_energyrange_kw = Keyword(\"Energy range\")\n\n# MED files\n_creationmedfile_kw = Keyword(\"Creating MED output file\")\n_creationfile_kw = Keyword(\"# Creating output file\")\n_medmeshid_kw = Keyword(\"MED mesh id\")\n\n# Entropy\n_boltzmannentropy_kw = Keyword(\"Boltzmann Entropy of sources =\")\n_shannonentropy_kw = Keyword(\"Shannon Entropy of sources =\")\n\n# Scores ordered by nuclei and precursor families\n_nucleiorder_kw = Keyword(\"Scores for nuclei contributions are ordered \"\n \"according to the user list:\")\n_familiesorder_kw = Keyword(\"Scores are ordered from family i = 1 to i = MAX:\")\n_nucleifamilyorder_kw = Keyword(\"Scores are ordered \"\n \"by nuclei and by families:\")\n_nucleus_kw = Keyword(\"Nucleus :\")\n# Perturbation order\n_perturborder_kw = Keyword(\"Scores are ordered by perturbation index:\")\n# Sensitivities\n_sensitivitytypeorder_kw = Keyword(\"Scores are ordered by type\")\n_sensitivityindexorder_kw = Keyword(\"and index:\")\n_sensitivity_kw = Keyword(\"SENSITIVITY :\")\n_sensitivityreactionrate_kw = Keyword(\"REACTION_RATE_RATIO :\")\n_sensitivity_energyint_kw = Keyword(\"Energy integrated S\")\n_sensitivity_incenergy_kw = Keyword(\"Incident energy interval in MeV:\")\n_sensitivity_dircos_kw = Keyword(\"Direction cosine interval:\")\n\n# Variance of variance\n_vovstar_kw = Keyword(\"variance of variance* :\")\n_sensibtomaxval_kw = Keyword(\"sensibility to maximum value:\")\n_vov_kw = Keyword(\"variance of variance :\")\n\n# Greenbands\n_gbspectrumstep_kw = Keyword(\"* SOURCE SPECTRUM STEP NUMBER :\")\n_gbenergymin_kw = Keyword(\"source energy min. =\")\n_gbenergymax_kw = Keyword(\"source energy max. =\")\n_gbsourcenum_kw = Keyword(\"SOURCE NUMBER :\")\n_gbsourcetab_kw = Keyword(\"SOURCE TABULATION :\")\n\n# KIJ matrix\n_kijlefteigenval_kw = Keyword(\"left_eigenvalues called\")\n_kijmkeff_kw = Keyword(\"kij-keff =\")\n_kijdomratio_kw = Keyword(\"dominant ratio =\")\n_kijeigenval_kw = Keyword(\"eigenvalues (re, im)\")\n_kijeigenvec_kw = Keyword(\"eigenvectors\")\n_kijeigenvecnotprint_kw = Keyword(\"KIJ eigenvectors not printed, \"\n \"increase maximum dump size if needed\")\n_kijmatrix_kw = Keyword(\"KIJ_MATRIX :\")\n_kijmatrixnotprint_kw = Keyword(\"KIJ matrix not printed, \"\n \"increase maximum dump size if needed\")\n\n# KIJ sources\n_kijsources_kw = Keyword(\"SOURCES VECTOR :\")\n_kijsourcesorder_kw = Keyword(\"Sources are ordered following\")\n\n# KIJ keff\n_kijfissilevol_kw = Keyword(\"number of fissile volumes :\")\n_kijlistfissilevol_kw = Keyword(\"list of fissile volume numbers : \")\n_kijbatchs_kw = Keyword(\"number of last batches kept :\")\n_kijkeffevid_kw = (Keyword(\"EIGENVECTOR :\") + Keyword(\"index\")\n + Keyword(\"source rate\"))\n_kijkeffmat_kw = Keyword(\"K-IJ MATRIX :\")\n_kijkeffstddevmat_kw = Keyword(\"STANDARD DEVIATION MATRIX :\")\n_kijkeffsensibilitymat_kw = Keyword(\"SENSIBILITY MATRIX :\")\n\n# Adjoint results (IFP for the moment)\n# if IFP is changed to Wielandt in _cvgstat_kw when using Wielandt method this\n# is the way to get the method name\n# convergence statistics\n_cvgstat_kw = Keyword(\"Scores for IFP convergence statistics are ordered \"\n \"from cycle length L = 1 to L = MAX:\")\n# IFP adjoint criticality edition\n_ifpadjcriticality_kw = Keyword(\"IFP_ADJOINT_CRITICALITY EDITION\")\n_ifpadjcyclelength_kw = Keyword(\"IFP CYCLE LENGTH =\")\n_ifpadjnormalizedres_kw = Keyword(\"RESULTS ARE NORMALIZED\")\n_ifpadjvol_kw = Keyword(\"Vol\")\n_ifpadjminmax_kw = Keyword(\"(min | max)\")\n_ifpadjscore_kw = Keyword(\"score [a.u.]\")\n\n# Results on spherical harmonics\n_shr_nb_space_bins_kw = Keyword('Number of space bin :')\n_shr_nb_u_space_bins_kw = Keyword('Number of space bin in u dimension:')\n_shr_nb_v_space_bins_kw = Keyword('Number of space bin in v dimension:')\n_shr_nb_w_space_bins_kw = Keyword('Number of space bin in w dimension:')\n_shr_nb_energy_bins_kw = Keyword('Number of energetic bins :')\n_shr_nb_inc_energy_bins_kw = Keyword('Number of incident energy bins :')\n_shr_lmax_kw = Keyword('Maximum L number of moments :')\n_shr_score_name_kw = Keyword('Score :')\n_shr_space_kw = Keyword('Space bin :')\n_shr_energy_kw = Keyword('Energy range (in MeV):')\n_shr_incident_energy_kw = Keyword('Outgoing data for incident energy '\n 'bin boundaries :')\n_shr_l_kw = Keyword('l')\n_shr_m_kw = Keyword('m')\n_shr_score_kw = Keyword('Score')\n_shr_sigma_kw = Keyword('Sigma (%)')\n\n# Perturbations\n_perturbation_kw = Keyword(\"================== Perturbation result edition \"\n \"======================\")\n_perturank_kw = Keyword(\"Perturbation rank =\")\n_pertumethod_kw = Keyword(\"Method :\")\n_pertuorder_kw = Keyword(\"Order:\")\n_pertutype_kw = Keyword(\"Perturbation de type\")\n_pertucompo_kw = Keyword(\"Composition :\")\n\n# Uncertainties results (linked to perturbations ?)\n_uncertres_kw = Keyword(\"UNCERTAINTY RESULTS\")\n_uncertgp_kw = Keyword(\"group (Mev)\")\n_uncertsig2_kw = Keyword(\"sigma2(means)\")\n_uncertmean_kw = Keyword(\"mean(sigma_n2)\")\n_uncertsig_kw = Keyword(\"sigma(sigma_n2)\")\n_uncertfisher_kw = Keyword(\"fisher test\")\n_uncertintegres_kw = Keyword(\"UNCERTAINTY ON ENERGY INTEGRATED RESULTS\")\n\n# Creation jdds\n_nbcontribpart_kw = Keyword(\"NUMBER OF CONTRIBUTING PARTICLES\")\n_endcontribpart_kw = Keyword(\"--- end of CONTRIBUTING PARTICLES ---\")\n\n# Symbol lines\n_star_line = Suppress(Word('*'))\n_equal_line = Suppress(Word('='))\n_minus_line = Suppress(White() + Word('-'))\n\n\n################################\n# PARSERS #\n################################\n\n# Introduction parser\n_sourceintensity = (Suppress(_sourceintensity_kw + ':')\n + (_fnums | _unavailable_kw)\n ('source_intensity'))\n# unkown -> string not in a list, while list of float per default\n_meanweightleakvals = Group(_fnums('score')\n + Suppress(\"sigma =\") + _fnums('sigma')\n + Suppress('sigma% =') + _fnums('sigma%'))\n_meanweightleak = (Suppress(_meanweightleakage_kw)\n + (Suppress('=') + _meanweightleakvals('mean_weight_leak')\n | Suppress(':') + _unknown_kw('mean_weight_leak')))\n_meanweightleakin = (\n Suppress(_meanweightleakagein_kw)\n + (Suppress('=') + _meanweightleakvals('mean_weight_leak_inside')\n | Suppress(':') + _unknown_kw('mean_weight_leak_inside')))\n_edbatchnum = Suppress(_edbatchnum_kw + ':') + _inums('edition_batch_number')\n\n_meanweightrestartpart = (Suppress(_meanweightrestartpart_kw)\n + _fnums('mean_weight_restart_particle'))\n_introelts = (_meanweightleak | _meanweightleakin | _edbatchnum\n | _meanweightrestartpart)\nintro = Group(_sourceintensity + _star_line + OneOrMore(_introelts))('intro')\n\n# Conclusion parser\n_packet_length_warning = Suppress(\n \"* packet length is\" + _inums\n + \"(check documentation for conventions about discard and batches)\")\n_simutime = Suppress(_simulationtime_kw + ':') + _fnums('simulation_time')\n_exploitime = (Suppress(_exploitationtime_kw + ':')\n + _fnums('exploitation_time'))\n_elapsedtime = Suppress(_elapsedtime_kw + ':') + _fnums('elapsed_time')\nruntime = (Optional(_packet_length_warning)\n + Group(_simutime | _elapsedtime | _exploitime)('conclu'))\n\n# Response parser\n# Description of the response\n_respfunc = (Suppress(_respfunction_kw + ':')\n + OneOrMore(Word(printables), stopOn=LineEnd())\n .setParseAction(' '.join)('response_function'))\n# warning: order matters hier, LineEnd has to be before Optional(Word)\n_respname = (Suppress(_respname_kw + ':')\n + (Suppress(LineEnd())\n | Optional(Word(printables)('response_name'))))\n_scorename = (Suppress(_scorename_kw + \":\")\n + Optional(Word(printables)('score_name')))\n_energysplit = (Suppress(_energysplitname_kw + ':')\n + Word(printables)('energy_split_name'))\n_nusplit = (Suppress(_nusplitname_kw + ':')\n + Word(printables)('nu_split_name'))\nrespdesc = (_respfunc + Optional(_respname) + Optional(_scorename)\n + Optional(_energysplit | _nusplit))\n\n_respfilter = (Suppress(_respfiltered_kw) + _inums('nb_filtered_compos')\n + Suppress(_respfiltcompos_kw + ':')\n + Group(OneOrMore(Word(printables), stopOn=LineEnd()))\n .setParseAction(trans.convert_list_to_tuple)\n ('filtered_compositions'))\n\n_particle = (Suppress(_particule_kw + ':')\n + OneOrMore(Word(alphas+','), stopOn=LineEnd())\n .setParseAction(' '.join)('particle'))\n_incparticle = (Suppress(_incparticle_kw + ':')\n + Word(alphas)('incident_particle'))\n_noise_equation = (Suppress(_noise_equation_kw + ':')\n + OneOrMore(Word(alphas), stopOn=LineEnd())\n .setParseAction(' '.join)('noise_equation'))\n# response characteristics written in lower case\n_reactiononnucl = (Suppress(_reactiononnucl_kw + ':')\n + Word(printables)('reaction_on_nucleus'))\n_temperature = Suppress(_temperature_kw + ':') + _fnums('temperature')\n_composition = (Suppress(_composition_kw + ':')\n + Word(printables)('composition'))\n_concentration = Suppress(_concentration_kw + ':') + _fnums('concentration')\n_reaction = (Suppress(_reaction_kw)\n + ((Suppress(Word(alphas) + ':')\n + Group(delimitedList(_inums, delim='+')))\n | OneOrMore(Word(alphas), stopOn=LineEnd())\n .setParseAction(' '.join))\n .setParseAction(trans.convert_list_to_tuple)('reaction'))\n\n\n# Goal: when more than one reaction are required, keep characteristics grouped\n# by particle, reaction, etc.\ndef _next_compos(toks):\n if toks.getName() == 'reaction_on_nucleus':\n detail = _temperature | _composition | _concentration | _reaction\n elif toks.getName() == 'temperature':\n detail = _reactiononnucl | _composition | _concentration | _reaction\n elif toks.getName() == 'composition':\n detail = _reactiononnucl | _temperature | _concentration | _reaction\n elif toks.getName() == 'concentration':\n detail = _reactiononnucl | _temperature | _composition | _reaction\n elif toks.getName() == 'reaction':\n detail = _reactiononnucl | _temperature | _composition | _concentration\n else:\n LOGGER.warning(\"Not a foreseen result name, please check, keeping all\")\n detail = (_reactiononnucl | _temperature | _composition\n | _concentration | _reaction)\n _otherdetails << OneOrMore(detail) # pylint: disable=W0106\n\n\n_compodetails = Forward()\n_otherdetails = Forward()\n_compoptions = (_reactiononnucl\n | _temperature\n | _composition\n | _concentration\n | _reaction).setParseAction(_next_compos)\n_compodetails << Group(_compoptions + _otherdetails) # pylint: disable=W0106\n_nuclflags = (OneOrMore(_compodetails).setParseAction(trans.lod_to_dot)\n ('compos_details'))\n\n\n# other response characteristics\n_dpatype = (Suppress(_dpatype_kw)\n + OneOrMore(Word(alphas+'-,'), stopOn=LineEnd())\n .setParseAction(' '.join)('dpa_type'))\n_required = (Suppress(_required_kw)\n + OneOrMore(Word(alphanums+'():'), stopOn=LineEnd())\n .setParseAction(' '.join)('required'))\n_mode = Suppress(_mode_kw) + Word(alphas)('mode')\n_inducedbyint = (Suppress(_inducedbyint_kw)\n + Group(OneOrMore(_inums))\n .setParseAction(trans.convert_list_to_tuple)\n ('induced_by_interation'))\n_notinducedbyint = (Suppress(\"NOT\" + _inducedbyint_kw)\n + Group(OneOrMore(_inums))\n .setParseAction(trans.convert_list_to_tuple)\n ('NOT_induced_by_interation'))\n_fxptcontrib = (OneOrMore(Word(alphas+'()'),\n stopOn=_fxptcontrib_kw).setParseAction(' '.join)\n ('fxpt_contribution')\n + Suppress(_fxptcontrib_kw))\n_spectrumresp = Suppress(_spectrumresp_kw + ':') + Word(alphas)('spectrum')\n_filters = (Suppress(_filters_kw + ':')\n + Group(OneOrMore(_inums))\n .setParseAction(trans.convert_list_to_tuple)\n ('filtered_volumes')\n + Optional(Suppress('With') + Word(alphas+'_')('filter_type')))\n\n\nrespcarac = (_respfilter\n | _particle\n | _incparticle\n | _noise_equation\n | _nuclflags\n | _filters\n | _required\n | _dpatype\n | _mode\n | _inducedbyint\n | _notinducedbyint\n | _fxptcontrib\n | _spectrumresp)\n\nrespintro = respdesc + ZeroOrMore(respcarac)\n\n# Responses themselves\n# Score description (not needed for KEFF)\nscoremode = Suppress(_scoremode_kw + ':') + Word(alphas+'_')('scoring_mode')\n# scoring zones\n_score_mesh_unit = (Suppress(\"(in\")\n + Word(alphanums+'.^-+')('unit')\n + Suppress(')'))\n_score_meshinfo_cell_unit = (Suppress(_scoremeshinfobins_kw + '[')\n + _fnums + Word(alphanums+' .^-+')\n + Suppress(']'))('cell_unit')\n_score_meshinfo_volume = (Suppress(_vol_kw + '[')\n + _fnums + Word(alphanums+'.^-+')\n + Suppress(']'))('vol_unit')\n_score_meshcols = Suppress(Word(alphas)\n + Optional(_scoremeshinfobins_kw)\n + Word(alphas)\n + Word(alphas+'() '))\n_score_meshinfounit_cols = (Suppress(Word(alphas))\n + _score_meshinfo_cell_unit\n + _score_meshinfo_volume\n + Suppress(Word(alphas) + Word(alphas+'() ')))\n_score_mesh = (Optional(Suppress(_scorevolumeunit_kw)\n + Word(alphanums+' .^-+')('scoring_zone_volsurf_unit')\n + Suppress(':')\n + _fnums('scoring_zone_volsurf'))\n + Optional(Suppress(_scorevolumecell_kw + ':')\n + _fnums('scoring_zone_cellvol'))\n + _scoremesh_kw('scoring_zone_type') + Suppress(':')\n + (_score_meshinfounit_cols | _score_meshcols)\n + Optional(_score_mesh_unit))\n_score_allgeom = _scoreallgeom_kw('scoring_zone_type')\n_score_allsources = _scoreallsources_kw('scoring_zone_type')\n_score_vol = (_scorevol_kw('scoring_zone_type')\n + Suppress(_scorevolvol_kw + ':')\n + (_inums | Word(printables))('scoring_zone_id')\n + Suppress(_scorevolumeunit_kw)\n + Word(alphanums+' .^-+')('scoring_zone_volsurf_unit')\n + Suppress(':')\n + _fnums('scoring_zone_volsurf')\n + Optional(_scorevolumeint_kw | _scorevolumenotint_kw)\n ('scoring_zone_details'))\n_score_vol_sum = ((_scorevolsum_kw('scoring_zone_type')\n + Suppress(_scorevolsumvol_kw + ':')\n + Group(delimitedList(_inums, delim='+'))('scoring_zone_id')\n + Suppress(_scorevolumesum_kw + ':')\n + _fnums('scoring_zone_volsurf')))\n_score_surf = (_scoresurf_kw('scoring_zone_type')\n + Suppress(_scoresurfvol_kw + ':')\n + Group(delimitedList(_inums\n | Word(printables, excludeChars=',')))\n ('scoring_zone_id')\n + Optional(Suppress(_scoresurface_kw + ':')\n + _fnums('scoring_zone_volsurf'))\n + Optional(Suppress(_scoresurfaceint_kw\n | _scoresurfacenotint_kw)))\n_score_surf_sum = (\n _scoresurfsum_kw('scoring_zone_type')\n + Suppress(_scoresurfsumfront_kw + ':')\n + delimitedList(Group(Suppress('(')\n + delimitedList(Word(printables, excludeChars=',()')\n | _inums)\n + Suppress(')')), delim='+')('scoring_zone_id')\n + Suppress(_scoresurfacesum_kw + ':')\n + _fnums('scoring_zone_volsurf'))\n_score_point = (_scorepoint_kw('scoring_zone_type')\n + Suppress(':')\n + Group(_fnums + Suppress(',')\n + _fnums + Suppress(',')\n + _fnums)('scoring_zone_id'))\n_cellelt = (Group(Suppress('(') + _inums\n + Suppress(',') + _inums\n + OneOrMore(\n Group(Suppress(',') + _inums\n + Suppress(',') + _inums\n + Suppress(',') + _inums))\n + Suppress(')')))\n_score_cell = (_scorecell_kw('scoring_zone_type')\n + Suppress(_scorecelldet_kw)\n + Group(_cellelt\n + ZeroOrMore(Suppress('+') + _cellelt))\n ('scoring_zone_id'))\n_maillelt = (Suppress(_scoremaillevol_kw + ':') + _inums\n + Suppress(_scoremailledepth_kw + ':') + _inums\n + Suppress(_scoremaillecell_kw + ':')\n + OneOrMore(\n Group(Suppress('(') + _inums\n + Suppress(',') + _inums\n + Suppress(',') + _inums + Suppress(')'))))\n_score_maille = (_scoremaille_kw('scoring_zone_type')\n + Group(_maillelt)('scoring_zone_id'))\nscorezone = (Suppress(_scorezone_kw+':')\n + (_score_mesh\n | _score_allgeom\n | _score_allsources\n | _score_vol\n | _score_surf\n | _score_surf_sum\n | _score_vol_sum\n | _score_point\n | _score_cell\n | _score_maille\n | LineEnd()))\n# scoring description = scoring mode + scoring zone\nscoredesc = scoremode + scorezone\n\n# Correspondence table (volumes ids and names)\ncorresptable = (Suppress(_corresptable_kw)\n + OneOrMore(Group(\n Suppress(_vol_kw + ':')\n + _inums('volume_id')\n + Suppress(Keyword('is :'))\n + Word(printables)('volume_name')), stopOn=_endtable)\n ('correspondence_table'))\n\n\n# RESPONSE RESULTS\n\n\ndef _set_no_unit_case(toks):\n '''Deal with the \"not unit\" case'''\n if len(toks) == 1:\n return {'uscore': '', 'usigma': toks[0]}\n LOGGER.warning(\"more than one unit, please check: %s\", toks)\n return None\n\n\ndef _rm_blanks(toks):\n '''Remove leading and trailing spaces (not the ones inside the string)'''\n return toks[0].strip()\n\n\n# Default integrated result\n_numdiscbatch = (Suppress(_numbatchs1stdiscarded_kw + ':')\n + _inums('discarded_batches'))\n_numusedbatch = Suppress(_numbatchsused_kw + ':') + _inums('used_batches')\n_integratedres_name = _integratedres_kw | Word(alphas) + _integratedres_kw\n_integratedres = _fnums('score') + _fnums('sigma')\n_unitsres = (Suppress(_units_kw)\n + (Word('%').setParseAction(_set_no_unit_case)\n | (Word(alphanums+'.^-+ ').setParseAction(_rm_blanks)('uscore')\n + Word('%')('usigma'))))\n# rejection in vov and sensibility cases\n_rejection = (Suppress('[')\n + OneOrMore(Word(alphanums+'<>.+-')).setParseAction(' '.join)\n ('rejection')\n + Suppress(']'))\n_vovnostar = Suppress(_vov_kw) + Group(_fnums('score'))('vov_res')\n_vovstar = Group(\n Suppress(_vovstar_kw) + _fnums('score')\n + Suppress('[') + _fnums('sigma') + Suppress(']')\n + Optional(_rejection))('vovstar_res')\n_sensibtomaxval = Group(\n Suppress(_sensibtomaxval_kw) + _fnums('max_val')\n + Optional(_rejection))('sensibility_res')\n_vov = (_vovnostar | _vovstar + _sensibtomaxval)\n# best result\nbestres = (Group(Suppress(_bestresdiscbatchs_kw) + _inums('discarded_batches')\n + Suppress(\"batches\")\n + _minus_line\n + _numusedbatch + _fnums('score') + _fnums('sigma'))\n ('best_result_res'))\n\n\nintegratedres = (Group(Optional(Suppress(_integratedres_name))\n + Optional(_numdiscbatch)\n + ((_numusedbatch + _integratedres)\n | _notconverged_kw('not_converged')))\n ('integrated_res')\n + Optional(_vov))\n\ngenericscoreblock = (Group(Optional(Suppress(_integratedres_name))\n + ((_numusedbatch\n + _integratedres\n + Optional(_unitsres))\n | _notconverged_kw('not_converged')))\n ('generic_res').setParseAction(trans.group_to_dict))\n\n\n# Time steps\n_timestep = Group(Suppress(_timestepnum_kw + ':') + _inums\n + _minus_line\n + Suppress(_timestepmin_kw) + _fnums\n + Suppress(_timestepmax_kw) + _fnums)('time_step')\n\n\n# Angular zones\n_muangzone = Group(Suppress(_muangzone_kw) + _inums\n + _minus_line\n + Suppress(_mumin_kw) + _fnums\n + Suppress(_mumax_kw) + _fnums)('mu_angle_zone')\n_phiangzone = Group(Suppress(_phiangzone_kw) + _inums\n + _minus_line\n + Suppress(_phimin_kw) + _fnums\n + Suppress(_phimax_kw) + _fnums)('phi_angle_zone')\n\n\n# Spectrum\n_spectrumunits = Group(Suppress(_units_kw)\n + Word(alphanums+'.^-+%') * 4)('units')\n_spectrumbin = _fnums + Suppress('-') + _fnums\n_spectrumcols = Suppress((_spgroupwunit_kw | _spgroup_kw)\n + _spscore_kw\n + _spsigma_kw\n + _spscovlethargy_kw)\n_spectrumvals = (Group(_spectrumbin + _fnums + _fnums + _fnums)\n .setFailAction(trans.fail_spectrum))\n_spectrum = (Suppress(_spectrum_kw)\n + _numdiscbatch\n + _spectrumcols\n + Optional(_spectrumunits)\n + OneOrMore(_spectrumvals, stopOn=_endtable)('spectrum_vals'))\nspectrumblock = (Group(OneOrMore\n (Group(OneOrMore(_timestep | _muangzone | _phiangzone)\n + _spectrum\n + Optional(integratedres)))\n | Group(_spectrum))('spectrum_res'))\n\n\n# Spectrum with vov\n# no unit for vov (not even a space) -> use spectrum ones with 4 fields\n_vovspectrumcols = Suppress((_spgroupwunit_kw | _spgroup_kw)\n + _spscore_kw + _spsigma_kw\n + _spscovlethargy_kw + _spvov_kw)\n_vovspectrumbin = _fnums + Suppress('-') + _fnums\n_vovspectrumvals = Group(_vovspectrumbin + _fnums + _fnums + _fnums + _fnums)\n_vovspectrum = (Suppress(_spectrum_kw)\n + _numdiscbatch\n + _vovspectrumcols\n + Optional(_spectrumunits)\n + OneOrMore(_vovspectrumvals, stopOn=_endtable)\n ('spectrum_vals'))\nvovspectrumblock = Group(Group(_vovspectrum))('vov_spectrum_res')\n\n\n# Nu spectrum\n_nuspectrumunits = Group(Suppress(_units_kw)\n + Word(alphanums+'.^-+%') * 3)('units')\n_nuspectrumcols = Suppress(_nusprange_kw + _spscore_kw + _spsigma_kw)\n_nuspectrumvals = (Group(_spectrumbin + _fnums + _fnums)\n .setFailAction(trans.fail_spectrum))\n_nuspectrum = (Suppress(_nuspectrum_kw)\n + _numdiscbatch\n + _nuspectrumcols\n + Optional(_nuspectrumunits)\n + OneOrMore(_nuspectrumvals, stopOn=_endtable)('spectrum_vals'))\nnuspectrumblock = Group(Group(_nuspectrum + integratedres))('nu_spectrum_res')\n\n\n# ZA spectrum\n_zaspectrumunits = Group(Suppress(_units_kw)\n + Word(alphanums+'.^-+%') * 2)('units')\n_zaspectrumcols = Suppress(_zaspid_kw + _spscore_kw + _spsigma_kw)\n_zaspectrumbin = (Suppress('(') + _inums + Suppress(',')\n + _inums + Suppress(')'))\n_zaspectrumvals = (Group(_zaspectrumbin + _fnums + _fnums)\n .setFailAction(trans.fail_spectrum))\n_zaspectrum = (Suppress(_zaspectrum_kw)\n + _numdiscbatch\n + _zaspectrumcols\n + Optional(_zaspectrumunits)\n + OneOrMore(_zaspectrumvals, stopOn=_endtable)('spectrum_vals'))\nzaspectrumblock = Group(Group(_zaspectrum + integratedres))('za_spectrum_res')\n\n\ndef _printtoks(toks):\n print(toks)\n\n\n# Entropy\n_boltzmannentropy = (Suppress(_boltzmannentropy_kw)\n + _fnums('boltzmann_entropy_res'))\n_shannonentropy = Suppress(_shannonentropy_kw) + _fnums('shannon_entropy_res')\nentropy = _boltzmannentropy + _shannonentropy\n\n\n# Mesh\n_mesh_energyrange = (Group(Suppress(_energyrange_kw + \"(in\") + Word(alphas)\n + Suppress('):') + _fnums + Suppress('-') + _fnums)\n ('mesh_energyrange'))\n_mesh_energyintegrated = ((Suppress(_integratedres_name) + Suppress(':'))\n ('mesh_energyintegrated'))\n_mesh_energyline = _mesh_energyrange | _mesh_energyintegrated\n_meshres = Group(\n _mesh_energyline\n + Group(originalTextFor(\n OneOrMore(Word(printables), stopOn=_endtable)))('mesh_vals')\n + Optional(Group(entropy)('entropy')))\nmeshblock = Group(OneOrMore(Group(_timestep\n + Optional(_score_mesh_unit)\n + Group(OneOrMore(_meshres))('meshes')\n + Optional(integratedres)))\n | Group(Group(OneOrMore(_meshres))('meshes')))('mesh_res')\n\n\n# KIJ matrix\n# before keff as KIJ ESTIMATOR used to evaluate KEFF...\n# definition de kijdim en variable globale ne fonctionne pas car creee avant\n# solution : utiliser forward pour le redefinir !\n\n\ndef _set_kijdim(toks):\n _kijdim = len(toks)\n _kijeigenvec << Group(_fnums * _kijdim) # pylint: disable=W0106\n _kijmatrix << Group(_fnums * _kijdim) # pylint: disable=W0106\n\n\n_kijsum = Group(Optional(Suppress(_kijlefteigenval_kw))\n + Suppress(_kijmkeff_kw) + _fnums\n + Suppress(_kijdomratio_kw) + _fnums)('kijmkeff_res')\n_kijeigenval = Group(_fnums + _fnums)\n_kijeigenvec = Forward()\n_kijmatrix = Forward()\n_kijeigenvaltab = (Suppress(_kijeigenval_kw)\n + (OneOrMore(_kijeigenval).setParseAction(_set_kijdim))\n ('kij_eigenval'))\n_kijeigenvectab = ((Suppress(_kijeigenvec_kw) + (OneOrMore(_kijeigenvec)))\n | _kijeigenvecnotprint_kw)('kij_eigenvec')\n_kijmatrixtab = (Suppress(_kijmatrix_kw)\n + ((OneOrMore(_kijmatrix)) | _kijmatrixnotprint_kw)\n ('kij_matrix'))\nkijres = (Group(Suppress(_integratedres_name)\n + _numusedbatch\n + _kijsum\n + _kijeigenvaltab\n + _kijeigenvectab\n + _kijmatrixtab).setParseAction(trans.convert_kij_result)\n ('kij_res'))\n\n\n# KIJ SOURCES\n_kijsourcesorder = (Suppress(_kijsourcesorder_kw)\n + Word(alphas)('kij_sources_order') + Suppress(':'))\n_kijsourcesval = Group(OneOrMore(_fnums))('kij_sources_vals')\nkijsources = (Group(Suppress(_integratedres_name)\n + _numusedbatch\n + Suppress(_kijsources_kw)\n + Optional(_kijsourcesorder)\n + _kijsourcesval)\n .setParseAction(trans.convert_kij_sources)('kij_sources'))\n\n\n# KIJ estimator for keff\n\ndef _define_kij_dim(toks):\n LOGGER.debug(\"KIJ dimension: %d\", len(toks))\n _kijdim = len(toks)\n _identifier = (Group(Suppress('(')\n + _inums + Suppress(',')\n + _inums + Suppress(',')\n + _inums + Suppress(')'))\n | _inums)\n _idline << Group(_identifier * _kijdim) # pylint: disable=W0106\n _matline << Group(_identifier # pylint: disable=W0106\n + Suppress('|')\n + (_fnums + Suppress('|')) * _kijdim)\n\n\ndef _set_kij_vols(toks):\n _nbvols = int(toks[0])\n _kijlistvol << Group(_inums * _nbvols) # pylint: disable=W0106\n\n\n_kijkeffbeg = Word(alphas)('estimator') + Suppress(_estimator_kw) + _minus_line\n_kijlistvol = Forward()\n_kijfissilevol = ((Suppress(_kijfissilevol_kw) + _inums('nb_fissile_vols'))\n .setParseAction(_set_kij_vols)\n + Suppress(_kijlistfissilevol_kw)\n + _kijlistvol('list_fissile_vols'))\n_kijkeffintro = (Optional(_kijfissilevol)\n + Suppress(_kijbatchs_kw) + _inums('batchs_kept')\n + Suppress(_kijmkeff_kw) + _fnums('kij_mkeff'))\n\n_idline = Forward()\n_matline = Forward()\n_kijkeffev = OneOrMore(Group(_inums + _fnums)).setParseAction(_define_kij_dim)\n_kijkeffevtab = Group(Suppress(_kijkeffevid_kw) + _kijkeffev)('kij_leigenvec')\n_defmatrix = _idline + _minus_line + OneOrMore(_matline + _minus_line)\n_kijkeffmatrix = (Group(Suppress(_kijkeffmat_kw) + _defmatrix)\n ('kij_matrix'))\n_kijkeffstdmatrix = (Group(Suppress(_kijkeffstddevmat_kw) + _defmatrix)\n ('kij_stddev_matrix'))\n_kijkeffsensibmatrix = (Group(Suppress(_kijkeffsensibilitymat_kw) + _defmatrix)\n ('kij_sensibility_matrix'))\n\n_kijkeffblock = (Group(_kijkeffbeg\n + _kijkeffintro\n + _kijkeffevtab\n + _kijkeffmatrix\n + _kijkeffstdmatrix\n + _kijkeffsensibmatrix)\n .setParseAction(trans.convert_kij_keff))\n\n\n# Keff as generic response\n_keffres = Group(Word(alphas) + _fnums + _fnums)\n_keffresblock = Group(OneOrMore(_keffres))('res_per_estimator')\n_correlationdesc = Suppress(_correlations_kw)\n_correlationestim = Group(Word(alphas) + Suppress('<->') + Word(alphas))\n_correlationvals = OneOrMore(_fnums\n | _notconverged_kw)\n_correlation = Group(_correlationestim + _correlationvals)\n_correlationblock = (Group(_correlationdesc + OneOrMore(_correlation))\n ('correlation_mat'))\n_fullcombestimation = (Group(Suppress(_fullcomb_kw)\n + ((_fnums + _fnums)\n | (_notconverged_kw\n + Suppress(Optional(_notcvg_exp_kw)))))\n ('full_comb_estimation'))\n_autokeffres = ((_keffresblock + _correlationblock + _fullcombestimation)\n | _notconverged_kw('not_converged'))\n_warnkeff = (Suppress(_warning_kw)\n + _warn_combkeff_kw.setParseAction(' '.join)('warning'))\nkeffblock = Group(Suppress(_integratedres_name)\n + _numusedbatch\n + (_autokeffres | _warnkeff)\n ).setParseAction(trans.convert_keff)('keff_res')\n\n\n# Keff as historical response\n_bestresestim = (OneOrMore(Word(alphas), stopOn=_estimator_kw)\n .setParseAction(' '.join)('keff_estimator')\n + Suppress(_estimator_kw))\n_bestresdiscbatch = (Suppress(_bestresdiscbatchs_kw)\n + _inums('best_disc_batchs')\n + Suppress(\"batches\"))\n_bestkeff = (Suppress(Keyword(\"keff\") + '=') + _fnums('keff')\n + Suppress(Keyword(\"sigma\") + '=') + _fnums('sigma')\n + Suppress(Keyword(\"sigma%\") + '=') + _fnums('sigma%'))\n_equivkeff = Suppress(_equivkeff_kw) + _fnums('equivalent_keff')\n_bestkeffpestim = (Group(_notconverged_kw('not_converged')\n | Group(Optional(_bestresdiscbatch)\n + _numusedbatch\n + _bestkeff\n + Optional(_equivkeff))('keff_auto'))\n ('results'))\n_bestreskeff = Group(_bestresestim + _minus_line + _bestkeffpestim)\n_warnfixedsources = Group(Suppress(_warning_kw) + _minus_line\n + _warn_fixsourcekeff_kw('warning'))\n_bestresblock = OneOrMore(_bestreskeff, stopOn=\"KIJ\")\nautokeffblock = Group(Group(Optional(_warnfixedsources)\n + _bestresblock\n + Optional(_kijkeffblock))\n .setParseAction(trans.convert_keff_auto))('keff_auto')\n\n\n# MED files\nmedfile = (Suppress((_creationmedfile_kw | _creationfile_kw) + ':')\n + Word(alphanums+'/_.')('med_file')\n + Suppress(_medmeshid_kw + Word(alphanums+'_.')))\n\n\n# Greenbands exploitation\n_gbspectrumstep = Suppress(_gbspectrumstep_kw) + _inums\n_gbenergymin = Suppress(_gbenergymin_kw) + _fnums\n_gbenergymax = Suppress(_gbenergymax_kw) + _fnums\n_gbstepdesc = Group(_gbspectrumstep + _minus_line\n + _gbenergymin + _gbenergymax)('gb_step_desc')\n_gbtabulation = (Suppress(_gbsourcetab_kw)\n + Group(Suppress('u =') + _inums\n + Suppress(', v =') + _inums\n + Suppress(', w =') + _inums))\n_gbsource = Group(Suppress(_gbsourcenum_kw)\n + _inums\n + (_minus_line | (_gbtabulation + _minus_line)))('gb_source')\n_gbrespersource = Group(_gbsource + spectrumblock)\n_gbstep = Group(_gbstepdesc + Group(OneOrMore(_gbrespersource))('gb_step_res'))\ngbblock = Group(OneOrMore(_gbstep))('green_bands_res')\n\n\n# Scores ordered by nuclei and precursor families, IFP outputs\n_generic_score = Suppress(':') + _fnums + _fnums\n# Nuclei order alone\n_scorepernucleus = Group(Word(alphanums) + _generic_score)\n_nucleiorder = (Suppress(_nucleiorder_kw)\n + ZeroOrMore(_scorepernucleus)('score_per_nucleus'))\n# Families order alone\n_scoreperfamily = Group(Suppress(\"i =\") + _inums + _generic_score)\n_familyorder = (Suppress(_familiesorder_kw)\n + ZeroOrMore(_scoreperfamily)('score_per_family'))\n# Nuclei and families order\n_nucleusid = Suppress(_nucleus_kw) + Word(alphanums) + Suppress('.')\n_nucleusfam = Group(_nucleusid + _familyorder)\n_nuclfamorder = (Suppress(_nucleifamilyorder_kw)\n + ZeroOrMore(_nucleusfam)('score_per_nucleus_family'))\n# Perturbation index order\n_scoreperpertuind = Group(Suppress(\"i =\") + _inums + _generic_score)\n_perturborder = (Suppress(_perturborder_kw)\n + ZeroOrMore(_scoreperpertuind)('score_per_perturbation'))\n# Convergence statistics\n_cvgline = Group(Suppress(\"L =\") + _inums + _generic_score)\n_cvgstat = (Suppress(_cvgstat_kw)\n + ZeroOrMore(_cvgline)('score_per_length'))\n# results from adjoint calculation\nadjointres = (Group(Group(Suppress(_integratedres_name)\n + _numusedbatch\n + Group(_nuclfamorder\n | _nucleiorder\n | _familyorder\n | _perturborder\n | _cvgstat)('adj_res')\n + Optional(_unitsres)\n ).setParseAction(trans.convert_generic_adjoint))\n )('adjoint')\n# Convergence statistics\n_kingcritline = Group(Suppress(\"CRITICALITY SOURCE\") + _fnums + _fnums)\n_kingtimestepline = Group(Suppress(\"END OF TIME STEP \") + Suppress(_inums)\n + _fnums + _fnums)\n_kingstat = Group(_kingcritline\n + ZeroOrMore(_kingtimestepline))('kin_generic_res')\nkingres = (Group(Suppress(_integratedres_name)\n + _numusedbatch\n + _kingstat\n + Optional(_unitsres)('units')\n ).setParseAction(trans.convert_generic_kinetic)\n )('kinetic_generic')\n# sensitivities\n_sensitivityorder = (Suppress(_sensitivitytypeorder_kw)\n + OneOrMore(Word(alphas + '_,()'),\n stopOn=_sensitivityindexorder_kw)\n .setParseAction(' '.join)\n + Suppress(_sensitivityindexorder_kw))\n_sensitivityreactionrate_ratio = Group(Suppress(_sensitivityreactionrate_kw)\n + _fnums('score') + _fnums('sigma'))\n_sensitivity_type = (OneOrMore(Word(alphas.upper()), stopOn=_sensitivity_kw)\n .setParseAction(' '.join)\n + Suppress(_sensitivity_kw))('typeI')\n_sensitivity_index = Group(\n Suppress(\"i =\") + _inums('sensitivity_index') + Suppress(';')\n + Suppress(\"NUCLEUS :\") + Word(alphanums + '_')('sensitivity_nucleus')\n + Suppress(',') + Suppress(\"TYPE :\")\n + Word(alphanums.upper() + '_() ')('sensitivity_reaction'))\n_sensitivity_dircos = Group(Suppress(_sensitivity_dircos_kw)\n + _fnums*2)('direction_cosine')\n_sensitivity_energyinc = Group(Suppress(_sensitivity_incenergy_kw)\n + _fnums*2)('energy_incident')\n_sensitivity_cols = (Keyword(\"E min\") + Keyword(\"E max\")\n + Keyword(\"S(E)\") + Keyword(\"sigma\"))\n_sensitivity_vals = Group(_fnums*4)\n_sensitivity_energyint = Group(Suppress(_sensitivity_energyint_kw)\n + _integratedres)\n_sensitivity_res = Group(_sensitivity_index('charac')\n + Group(OneOrMore(Group(\n ZeroOrMore(_sensitivity_dircos\n | _sensitivity_energyinc)\n + Suppress(_sensitivity_cols)\n + OneOrMore(_sensitivity_vals)('values'))))\n ('vals')\n + _sensitivity_energyint('energy_integrated'))\n_sensitivity = (Suppress(_sensitivityorder)\n + ZeroOrMore(Group(_sensitivityreactionrate_ratio\n ('reaction_rate')))\n + (OneOrMore(Group(_sensitivity_type('sensitivity_type')\n + OneOrMore(_sensitivity_res)('res')))))\nsensitivityres = Group(Group(Optional(Suppress(_integratedres_name))\n + _numusedbatch\n + Group(_sensitivity)('sensit_res')\n + Optional(_unitsres)('units'))\n .setParseAction(trans.convert_sensitivities)\n )('sensitivity')\n\n\ndef _rename_norm_kw():\n '''Transform RESULTS ARE NORMALIZED keyword in int (lighter)'''\n return 1\n\n\ndef _define_ifp_adj_table_dim(toks):\n '''Define the format of the IFP adjoint criticality table result:\n coordinates (Vol or space coordinates) are followed by energy.\n '''\n _tabdim = len(toks)\n if \"Vol\" not in toks[0]:\n _ifpadjbinval << _fnums * 2 * _tabdim # pylint: disable=W0104\n else:\n # in Vol case: int to identify volume, then E (min | max)\n _ifpadjbinval << _inums + _fnums * 2 # pylint: disable=W0104\n\n\n# IFP adjoint criticality edition\n_adjcrit_ed_intro = _star_line + Suppress(_ifpadjcriticality_kw) + _star_line\n_ifpadjcrit_intro = Group(Word(alphas+'_')('ifp_response')\n + _scorename\n + Suppress(_ifpadjcyclelength_kw)\n + _inums('ifp_cycle_length')\n + (Optional(_ifpadjnormalizedres_kw\n .setParseAction(_rename_norm_kw))\n ('normalized'))\n + _star_line)\n_ifpadjbinval = Forward()\n_ifpadjcoordinate = Word(alphas) + Suppress(_ifpadjminmax_kw)\n_ifpadjcoordinates = ((Optional(_ifpadjvol_kw) + OneOrMore(_ifpadjcoordinate))\n .setParseAction(_define_ifp_adj_table_dim))\n_ifpadjcolumns = _ifpadjcoordinates + _ifpadjscore_kw + _spsigma_kw\n_ifpadjline = Group(_ifpadjbinval + _fnums + _fnums)\n_ifpadjvalues = OneOrMore(_ifpadjline)\n_adjcritblock = Group(_ifpadjcrit_intro('ifp_adjoint_criticality_intro')\n + _ifpadjcolumns('columns')\n + _ifpadjvalues('values')\n + _star_line)\nifpadjointcriticality = (Group((_adjcrit_ed_intro + OneOrMore(_adjcritblock))\n .setParseAction(trans.convert_ifp_adj_crit_ed))\n ('ifp_adjoint_crit_edition'))\n\n# Spherical harmonics results\n_shr_bins = Group(Suppress(_shr_nb_space_bins_kw) + _inums('nb_sbins')\n + Suppress(_shr_nb_u_space_bins_kw) + _inums('nb_ubins')\n + Suppress(_shr_nb_v_space_bins_kw) + _inums('nb_vbins')\n + Suppress(_shr_nb_w_space_bins_kw) + _inums('nb_wbins')\n + Suppress(_shr_nb_energy_bins_kw) + _inums('nb_ebins')\n + Suppress(_shr_nb_inc_energy_bins_kw) + _inums('nb_iebins')\n + Suppress(_shr_lmax_kw) + _inums('lmax'))('nb_bins')\n_shrscore = (Suppress(_shr_score_name_kw)\n + Group(OneOrMore(Word(printables), stopOn=LineEnd())\n .setParseAction(' '.join))('score_name'))\n_shrspace = Suppress(_shr_space_kw) + Group(_inums + _inums + _inums)('space')\n_shrenergy = Suppress(_shr_energy_kw) + Group(_fnums + _fnums)('energy')\n_shrincenergy = (Suppress(_shr_incident_energy_kw)\n + Group(_fnums + _fnums)('incident_energy'))\n_shrcols = Suppress(_shr_l_kw + _shr_m_kw + _shr_score_kw + _shr_sigma_kw)\n_shrvals = Group(_inums + _inums + _fnums + _fnums)\n_shrtable = _shrenergy + _shrcols + OneOrMore(_shrvals)('values')\n_shr_ietable = Optional(_shrincenergy) + OneOrMore(Group(_shrtable))('vpie')\n_shr_spacetable = _shrspace + OneOrMore(Group(_shr_ietable))('vpspace')\n_shr_scoretable = _shrscore + OneOrMore(Group(_shr_spacetable))('score')\nshrblock = Group(_shr_bins\n + Group(OneOrMore(Group(_shr_scoretable)))('res')\n + Suppress(_star_line)\n ).setParseAction(trans.convert_shr)('spherical_harmonics')\n\n# Perturbations\n_perturank = Suppress(_perturank_kw) + _inums('perturbation_rank')\n_pertumethod = (Suppress(_pertumethod_kw)\n + OneOrMore(Word(alphas), stopOn=LineEnd())\n .setParseAction(' '.join)('perturbation_method'))\n_pertuorder = Suppress(_pertuorder_kw) + _inums('perturbation_order')\n_pertutype = Suppress(_pertutype_kw) + Word(alphas)('perturbation_type')\n_pertucompo = (Suppress(_pertucompo_kw)\n + Word(alphanums+'_')('perturbation_composition'))\npertu_desc = (Group(Suppress(_perturbation_kw)\n + _perturank\n + _pertumethod\n + Optional(_pertuorder)\n + _pertutype\n + _pertucompo)('perturbation_desc'))\n\n# Uncertainties (linked to perturbations ?)\n_uncertcols = Suppress(_uncertgp_kw + _uncertsig2_kw\n + _uncertmean_kw + _uncertsig_kw + _uncertfisher_kw)\n_uncertbin = _fnums + Suppress('-') + _fnums\n_uncertvals = Group(_uncertbin + _fnums * 4)\n_uncertspectrum = (Suppress(_uncertres_kw)\n + _numdiscbatch\n + _uncertcols\n + _minus_line\n + OneOrMore(_uncertvals, stopOn=_endtable)\n ('spectrum_vals'))\nuncertblock = Group(OneOrMore\n (Group(OneOrMore(_timestep | _muangzone | _phiangzone)\n + _uncertspectrum))\n | Group(_uncertspectrum))('uncert_spectrum_res')\n\n_uncertintegres = (_fnums('sigma2(means)')\n + _fnums('mean(sigma_n2)')\n + _fnums('sigma(sigma_n2)')\n + _fnums('fisher test'))\n_uncertintegfullres = _numusedbatch + _uncertintegres\nuncertintegblock = Group(Suppress(_uncertintegres_kw)\n + _numdiscbatch\n + _uncertintegfullres)('uncert_integrated_res')\n\n\n# Creation jdds\n_nbpartline = Group(Suppress(\"FILE\") + _inums\n + Suppress(':') + _inums\n + Suppress(\"particles\"))\ncontribpartblock = (Group(Suppress(_nbcontribpart_kw)\n + _minus_line\n + OneOrMore(_nbpartline)\n + Suppress(_endcontribpart_kw))\n ('contributing_particles'))\n\n\n# Score block\nscoreblock = (Group(scoredesc + (OneOrMore(vovspectrumblock\n | spectrumblock\n | nuspectrumblock\n | zaspectrumblock\n | meshblock\n | medfile\n | integratedres\n | bestres\n | uncertblock\n | uncertintegblock\n | gbblock\n | corresptable)))\n .setParseAction(trans.convert_score))\n\nlistscoreblock = (Group(OneOrMore(scoreblock)\n .setParseAction(trans.index_elements('score_index')))\n ('score_res'))\n\n# Response block\nresponseblock = Group(keffblock\n | kijres\n | kijsources\n | adjointres\n | kingres\n | sensitivityres\n | genericscoreblock\n | shrblock\n | listscoreblock)('results')\n\nresponse = (Group(_star_line\n + respintro\n + _star_line\n + responseblock)\n .setParseAction(trans.finalize_response_dict))\n\nlistresponses = Group(OneOrMore(response).setParseAction(\n compose2(trans.extract_all_metadata,\n trans.index_elements('response_index'))))('list_responses')\n\nperturbation = (OneOrMore(Group(pertu_desc + listresponses)\n .setParseAction(trans.propagate_all_metadata))\n .setParseAction(trans.index_elements('perturbation_index'))\n ('perturbation'))\n\n\n################################\n# DEBUG PARSER #\n################################\n\n# debug grammar, to be used with parse_debug (only for parsing development)\nt4debug_gram = (OneOrMore((intro\n + OneOrMore(listresponses | ifpadjointcriticality\n | autokeffblock | perturbation\n | Suppress(contribpartblock)\n | OneOrMore(runtime)))\n .setParseAction(trans.to_final_dict))\n .setParseAction(dump_in_logger)\n | intro + OneOrMore(runtime)).setFailAction(trans.fail_parsing)\n\n\n################################\n# GENERAL PARSER #\n################################\n\nt4gram = (OneOrMore((intro\n + ZeroOrMore(listresponses | ifpadjointcriticality\n | autokeffblock | perturbation\n | Suppress(contribpartblock))\n + runtime)\n .setParseAction(trans.to_final_dict))\n .setParseAction(dump_in_logger)\n ).setFailAction(trans.fail_parsing)\n", "repo_name": "valjean-framework/valjean", "sub_path": "valjean/eponine/tripoli4/grammar.py", "file_name": "grammar.py", "file_ext": "py", "file_size_in_byte": 62240, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyparsing.pyparsing_common.fnumber.setParseAction", "line_number": 201, "usage_type": "call"}, {"api_name": "pyparsing.pyparsing_common.fnumber", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pyparsing.pyparsing_common", "line_number": 201, "usage_type": "name"}, {"api_name": "pyparsing.tokenMap", "line_number": 201, "usage_type": "call"}, {"api_name": "pyparsing.pyparsing_common.number.setParseAction", "line_number": 202, "usage_type": "call"}, {"api_name": "pyparsing.pyparsing_common.number", "line_number": 202, "usage_type": "attribute"}, {"api_name": "pyparsing.pyparsing_common", "line_number": 202, "usage_type": "name"}, {"api_name": "pyparsing.tokenMap", "line_number": 202, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 209, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 210, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 211, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 212, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 212, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 213, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 214, "usage_type": "call"}, {"api_name": "pyparsing.CaselessKeyword", "line_number": 215, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 216, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 217, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 218, "usage_type": "call"}, {"api_name": "pyparsing.CaselessKeyword", "line_number": 219, "usage_type": "call"}, {"api_name": "pyparsing.LineEnd", "line_number": 220, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 223, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 224, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 225, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 226, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 227, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 230, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 231, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 232, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 233, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 237, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 240, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 241, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 242, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 243, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 244, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 245, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 246, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 247, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 248, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 249, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 250, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 251, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 252, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 253, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 254, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 255, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 256, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 257, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 258, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 259, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 260, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 261, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 264, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 265, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 266, "usage_type": "call"}, {"api_name": "pyparsing.replaceWith", "line_number": 267, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 268, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 269, "usage_type": "call"}, {"api_name": "pyparsing.replaceWith", "line_number": 270, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 271, "usage_type": "call"}, {"api_name": "pyparsing.replaceWith", "line_number": 272, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 273, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 274, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 275, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 276, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 277, "usage_type": "call"}, {"api_name": "pyparsing.replaceWith", "line_number": 278, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 280, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 282, "usage_type": "call"}, {"api_name": "pyparsing.replaceWith", "line_number": 284, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 285, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 286, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 287, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 288, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 289, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 290, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 291, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 292, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 294, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 295, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 296, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 297, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 298, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 299, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 300, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 301, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 302, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 303, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 304, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 307, "usage_type": "call"}, {"api_name": "pyparsing.CaselessKeyword", "line_number": 309, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 312, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 313, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 314, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 315, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 315, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 316, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 317, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 318, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 319, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 320, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 322, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 323, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 325, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 326, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 330, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 331, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 332, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 335, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 336, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 337, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 338, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 339, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 340, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 343, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 344, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 345, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 346, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 347, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 348, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 349, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 350, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 351, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 352, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 353, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 356, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 359, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 360, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 361, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 364, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 365, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 368, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 370, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 371, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 373, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 375, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 377, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 378, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 379, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 380, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 381, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 382, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 383, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 386, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 387, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 388, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 391, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 392, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 393, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 394, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 395, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 398, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 399, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 400, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 401, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 402, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 403, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 405, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 406, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 410, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 411, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 414, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 415, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 416, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 417, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 418, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 419, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 420, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 421, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 427, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 430, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 431, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 432, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 433, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 434, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 435, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 438, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 439, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 440, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 441, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 442, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 443, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 444, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 445, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 446, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 447, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 448, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 450, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 451, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 452, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 453, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 456, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 458, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 459, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 460, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 461, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 462, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 465, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 466, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 467, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 468, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 469, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 470, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 471, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 474, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 475, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 478, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 478, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 479, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 479, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 480, "usage_type": "call"}, {"api_name": "pyparsing.White", "line_number": 480, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 480, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 488, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 492, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 493, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 494, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 495, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 496, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 497, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 499, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 500, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 501, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 502, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 504, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 508, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 508, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 511, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 514, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 515, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 517, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 518, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 519, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 523, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 524, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 524, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 524, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 524, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 527, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 528, "usage_type": "call"}, {"api_name": "pyparsing.LineEnd", "line_number": 528, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 529, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 529, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 529, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 530, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 531, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 531, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 531, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 532, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 533, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 533, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 534, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 535, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 535, "usage_type": "argument"}, {"api_name": "pyparsing.Optional", "line_number": 536, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 537, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 539, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 540, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 541, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 541, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 541, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 541, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 541, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 545, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 546, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 546, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 546, "usage_type": "name"}, {"api_name": "pyparsing.LineEnd", "line_number": 546, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 548, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 549, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 549, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 550, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 551, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 551, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 551, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 551, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 554, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 555, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 555, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 556, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 557, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 558, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 558, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 559, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 560, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 561, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 561, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 561, "usage_type": "argument"}, {"api_name": "pyparsing.Group", "line_number": 562, "usage_type": "call"}, {"api_name": "pyparsing.delimitedList", "line_number": 562, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 563, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 563, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 563, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 563, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 585, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 588, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 589, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 595, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 596, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 601, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 602, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 602, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 602, "usage_type": "name"}, {"api_name": "pyparsing.LineEnd", "line_number": 602, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 604, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 605, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 605, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 605, "usage_type": "name"}, {"api_name": "pyparsing.LineEnd", "line_number": 605, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 607, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 607, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 607, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 608, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 609, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 609, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 612, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 613, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 613, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 616, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 616, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 616, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 619, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 620, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 620, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 620, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 621, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 622, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 622, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 625, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 625, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 625, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 625, "usage_type": "name"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 642, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 646, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 646, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 646, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 648, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 649, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 649, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 650, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 651, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 652, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 652, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 653, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 654, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 655, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 655, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 656, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 657, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 657, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 657, "usage_type": "argument"}, {"api_name": "pyparsing.Optional", "line_number": 658, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 659, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 659, "usage_type": "argument"}, {"api_name": "pyparsing.Word", "line_number": 660, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 660, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 661, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 661, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 661, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 664, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 664, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 664, "usage_type": "argument"}, {"api_name": "pyparsing.Optional", "line_number": 665, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 665, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 666, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 666, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 667, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 669, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 669, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 671, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 673, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 677, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 678, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 678, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 679, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 680, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 680, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 681, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 683, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 686, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 687, "usage_type": "call"}, {"api_name": "pyparsing.delimitedList", "line_number": 687, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 688, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 691, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 692, "usage_type": "call"}, {"api_name": "pyparsing.delimitedList", "line_number": 692, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 693, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 693, "usage_type": "argument"}, {"api_name": "pyparsing.Optional", "line_number": 695, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 695, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 697, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 697, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 701, "usage_type": "call"}, {"api_name": "pyparsing.delimitedList", "line_number": 702, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 702, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 702, "usage_type": "call"}, {"api_name": "pyparsing.delimitedList", "line_number": 703, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 703, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 703, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 705, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 706, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 709, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 710, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 710, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 711, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 713, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 713, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 714, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 715, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 716, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 716, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 717, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 718, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 719, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 721, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 722, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 723, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 723, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 725, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 726, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 727, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 728, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 729, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 729, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 730, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 731, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 733, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 734, "usage_type": "call"}, {"api_name": "pyparsing.LineEnd", "line_number": 745, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 750, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 751, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 751, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 752, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 754, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 754, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 755, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 755, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 776, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 778, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 779, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 779, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 781, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 782, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 783, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 783, "usage_type": "name"}, {"api_name": "pyparsing.Word", "line_number": 784, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 786, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 787, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 787, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 787, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 789, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 790, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 790, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 791, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 792, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 793, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 794, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 795, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 796, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 797, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 800, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 800, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 801, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 807, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 807, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 807, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 808, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 812, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 814, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 814, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 814, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 817, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 823, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 823, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 825, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 826, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 830, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 830, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 832, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 833, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 834, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 834, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 836, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 837, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 841, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 841, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 842, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 842, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 843, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 844, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 848, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 850, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 853, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 854, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 855, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 855, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 856, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 856, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 858, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 859, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 864, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 867, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 868, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 869, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 872, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 873, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 875, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 879, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 879, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 880, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 880, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 881, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 882, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 884, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 887, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 888, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 889, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 893, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 893, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 894, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 894, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 895, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 896, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 897, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 898, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 900, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 903, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 904, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 905, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 913, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 915, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 920, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 920, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 920, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 920, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 921, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 923, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 926, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 928, "usage_type": "call"}, {"api_name": "pyparsing.originalTextFor", "line_number": 928, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 929, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 929, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 929, "usage_type": "argument"}, {"api_name": "pyparsing.Optional", "line_number": 930, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 930, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 931, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 931, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 932, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 933, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 933, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 934, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 935, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 935, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 946, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 947, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 950, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 950, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 950, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 951, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 952, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 953, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 954, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 955, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 956, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 957, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 959, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 959, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 961, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 962, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 964, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 964, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 974, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 975, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 975, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 975, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 976, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 976, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 977, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 977, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 979, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 980, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 990, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 990, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 991, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 992, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 993, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 995, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 996, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 997, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 998, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1003, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1006, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1006, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1006, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 1007, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1008, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1010, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1012, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1013, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1014, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 1016, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 1017, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1018, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1018, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1019, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1019, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1020, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1021, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1021, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1023, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1023, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1025, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1025, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1028, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1038, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1038, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1038, "usage_type": "argument"}, {"api_name": "pyparsing.Group", "line_number": 1039, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1039, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1040, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1041, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1041, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1041, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1041, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1042, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1044, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1045, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1045, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1047, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1047, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1050, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1050, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1054, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1056, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1056, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1063, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1063, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1063, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1065, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1066, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1068, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1069, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 1069, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1070, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 1070, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1071, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 1071, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1072, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1073, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1074, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1074, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1077, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1079, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1080, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1080, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1082, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1083, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1085, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1090, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1091, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1091, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1092, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1092, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1096, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1097, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1098, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1099, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1101, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1102, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1102, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1103, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1104, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1105, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1105, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1108, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1109, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1109, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1110, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1110, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1114, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1116, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1116, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1116, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1117, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1118, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1120, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1120, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1121, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1122, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1124, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1124, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1124, "usage_type": "argument"}, {"api_name": "pyparsing.Group", "line_number": 1125, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1126, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1127, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1129, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1129, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1130, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1131, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1133, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1133, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1134, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1135, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1137, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1137, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1139, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1144, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1148, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1148, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1149, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1149, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1151, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1152, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1153, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1153, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1156, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1160, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1161, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1161, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1161, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1164, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1165, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1165, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1167, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1167, "usage_type": "call"}, {"api_name": "pyparsing.alphas.upper", "line_number": 1167, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1167, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1169, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1170, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1171, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1172, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1172, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1172, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1173, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1174, "usage_type": "call"}, {"api_name": "pyparsing.alphanums.upper", "line_number": 1174, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1174, "usage_type": "name"}, {"api_name": "pyparsing.Group", "line_number": 1175, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1175, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1177, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1177, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 1179, "usage_type": "call"}, {"api_name": "pyparsing.Keyword", "line_number": 1180, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1181, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1182, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1182, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1184, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1185, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1185, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1186, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1188, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1189, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1192, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1193, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1193, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1195, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1195, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1196, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1197, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1197, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1197, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1199, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1200, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1223, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1224, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1224, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1224, "usage_type": "name"}, {"api_name": "pyparsing.Suppress", "line_number": 1226, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1228, "usage_type": "call"}, {"api_name": "pyparsing.Forward", "line_number": 1232, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1233, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1233, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1233, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1234, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1234, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1237, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1238, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1239, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1243, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1243, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1248, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1248, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1249, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1250, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1251, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1252, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1253, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1254, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1255, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1256, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1256, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1256, "usage_type": "call"}, {"api_name": "pyparsing.printables", "line_number": 1256, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 1256, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1258, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1258, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1259, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1259, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1260, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1261, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1262, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1263, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1264, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1265, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1265, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1265, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1266, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1266, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1267, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1267, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1268, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1269, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1269, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1270, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1274, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1275, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1276, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1276, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1276, "usage_type": "argument"}, {"api_name": "pyparsing.LineEnd", "line_number": 1276, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1278, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1279, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1279, "usage_type": "call"}, {"api_name": "pyparsing.alphas", "line_number": 1279, "usage_type": "argument"}, {"api_name": "pyparsing.Suppress", "line_number": 1280, "usage_type": "call"}, {"api_name": "pyparsing.Word", "line_number": 1281, "usage_type": "call"}, {"api_name": "pyparsing.alphanums", "line_number": 1281, "usage_type": "name"}, {"api_name": "pyparsing.Group", "line_number": 1282, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1282, "usage_type": "call"}, {"api_name": "pyparsing.Optional", "line_number": 1285, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1290, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1292, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1293, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1294, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1298, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1300, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1300, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1301, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1301, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1303, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1310, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1310, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1316, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1316, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1317, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1318, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1319, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1319, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1321, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1322, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1327, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1327, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1341, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1341, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1346, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1356, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1362, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1362, "usage_type": "call"}, {"api_name": "transform.compose2", "line_number": 1363, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1366, "usage_type": "call"}, {"api_name": "pyparsing.Group", "line_number": 1366, "usage_type": "call"}, {"api_name": "dump.dump_in_logger", "line_number": 1383, "usage_type": "argument"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1377, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1378, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1380, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1381, "usage_type": "call"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1384, "usage_type": "call"}, {"api_name": "dump.dump_in_logger", "line_number": 1397, "usage_type": "argument"}, {"api_name": "pyparsing.OneOrMore", "line_number": 1391, "usage_type": "call"}, {"api_name": "pyparsing.ZeroOrMore", "line_number": 1392, "usage_type": "call"}, {"api_name": "pyparsing.Suppress", "line_number": 1394, "usage_type": "call"}]}