diff --git "a/107.jsonl" "b/107.jsonl"
new file mode 100644--- /dev/null
+++ "b/107.jsonl"
@@ -0,0 +1,813 @@
+{"seq_id": "15301296951", "text": "# @Time : 2020/07/02\n# @Author : sunyingqiang\n# @Email : 344670075@qq.com\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import ArticleViewSet, ArticlePollView, ArticleSearchViewSet\n\nrouter = DefaultRouter()\nrouter.register('article', ArticleViewSet, basename='article')\nrouter.register('article_search', ArticleSearchViewSet, basename='article_search')\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'poll', ArticlePollView.as_view())\n\n]", "repo_name": "supermouse123/drf_blog", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ArticleViewSet", "line_number": 9, "usage_type": "argument"}, {"api_name": "views.ArticleSearchViewSet", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.ArticlePollView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.ArticlePollView", "line_number": 14, "usage_type": "name"}]}
+{"seq_id": "4682250349", "text": "from django.core.management import BaseCommand, CommandError\nfrom django.utils import timezone\nfrom snippet.models import Snippet\n\n\nclass Command(BaseCommand):\n help = 'Delete expired snippets'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--quiet',\n action='store_true',\n dest='quiet',\n default=False,\n help='Suppress any output except errors',\n )\n\n def handle(self, *args, **options):\n qs = Snippet.objects.filter(\n expiration__lt=timezone.now()\n ).order_by('pub_date', 'update_date')\n\n if not options['quiet']:\n for s in qs:\n print('{0} {1}'.format(s.slug, s.expiration))\n\n n, _ = qs.delete()\n\n if not options['quiet']:\n print(\"Deleted {0} snippets\".format(n))\n", "repo_name": "aither64/havesnippet", "sub_path": "snippet/management/commands/expiresnippets.py", "file_name": "expiresnippets.py", "file_ext": "py", "file_size_in_byte": 838, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.core.management.BaseCommand", "line_number": 6, "usage_type": "name"}, {"api_name": "snippet.models.Snippet.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "snippet.models.Snippet.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "snippet.models.Snippet", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 20, "usage_type": "name"}]}
+{"seq_id": "26073006514", "text": "from fastapi import FastAPI, Body\nimport schemas\n\napp = FastAPI()\n\nfakeDatabase = {\n 1: {'task': 'Clean car'},\n 2: {'task': 'Write Blog'},\n 3: {'task': 'Start Stream'}\n}\n\n\n@app.get(\"/\")\ndef getItems():\n return fakeDatabase\n\n# to run app uvicorn main:app --reload\n# Swagger UI automatically included in /docs#\n\n\n@app.get(\"/{id}\")\ndef getItem(id: int):\n return fakeDatabase[id]\n\n\n\"\"\" \nmethod 1\n@app.post(\"/\")\ndef addItem(task:str):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": task}\n return fakeDatabase\n\"\"\"\n# method2 using pydantic schema\n\n\n@app.post(\"/\")\ndef addItem(item: schemas.Item):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": item.task}\n return fakeDatabase\n\n\n\"\"\"\n # method 3 using request body\n@app.post(\"/\")\ndef addItem(body=Body()):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": body['task']}\n return fakeDatabase \"\"\"\n\n\n@app.put(\"/{id}\")\ndef updateItem(id: int, item: schemas.Item):\n fakeDatabase[id]['task'] = item.task\n return fakeDatabase\n\n\n@app.delete(\"/{id}\")\ndef deleteItem(id: int):\n del fakeDatabase[id]\n return fakeDatabase\n", "repo_name": "jamestha3d/simplefastAPI", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.FastAPI", "line_number": 4, "usage_type": "call"}, {"api_name": "schemas.Item", "line_number": 38, "usage_type": "attribute"}, {"api_name": "schemas.Item", "line_number": 54, "usage_type": "attribute"}]}
+{"seq_id": "39207509776", "text": "import matplotlib.pyplot as plt\r\n\r\nf = open(\"tcp-example.tr\",\"r\")\r\nx = f.readlines()\r\nf.close()\r\n\r\nenqueue = []\r\ndequeue = []\r\n\r\ndiff = []\r\n\r\nfor i in x:\r\n ls = i.split()\r\n if (\"/NodeList/1/DeviceList/1\" in ls[2]):\r\n if ls[0] == \"+\":\r\n enqueue.append(float(ls[1]))\r\n elif ls[0] == \"-\":\r\n dequeue.append(float(ls[1]))\r\n\r\nfor i in range(min(len(enqueue),len(dequeue))):\r\n diff.append(dequeue[i]-enqueue[i])\r\n\r\n\r\nf = open(\"tcp-example.txt\",\"w\")\r\n\r\nfor i in range(len(diff)):\r\n print(f\"{enqueue[i]} {diff[i]}\",file = f)\r\n\r\nf.close()\r\n\r\nplt.plot(enqueue[:len(diff)],diff)\r\nplt.show()\r\n", "repo_name": "utkar22/Computer_Networks_Assignments", "sub_path": "Assignment 3/plot_queue_time.py", "file_name": "plot_queue_time.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "30058489086", "text": "import os,sys\nfrom PIL import Image, ImageDraw\nimport numpy as np\n\nground_truth_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_V13/\"\ncropped_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_Occultation_Small/\"\n\n\ndef listFiles(dir, ext, ignoreExt=None):\n \"\"\"\n Return array of all files in dir ending in ext but not ignoreExt.\n \"\"\"\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches\n\n\n\ndef get_pixels(im):\n pixels = list(im.getdata())\n width, height = im.size\n return [pixels[i * width:(i + 1) * width] for i in range(height)]\n\ndef get_image_object_pixels(pixel_list):\n image_pixels = np.asarray(pixel_list)\n image_pixels = np.sum(image_pixels, axis=2) # sum color + alpha together\n obj_pixels = image_pixels[image_pixels[:,:]!=0]\n return image_pixels, obj_pixels\n\ndef get_percentage_obj_img(image_pixels, object_pixels):\n return float(object_pixels.size) / float(image_pixels.size)\n\ndef calc_percentage_occultation(before_ratio, after_ratio):\n return 1 - (1 / (before_ratio + 1.e-8)) * after_ratio\n\n\ndef export_proportions(proportions):\n export = np.asarray(proportions)\n\n #add mean as column\n mean_percentage_cutout = export[1:,3].astype(np.float).mean()\n export = np.insert(export, 4, mean_percentage_cutout, axis=1)\n export[0][4] = 'mean percentage cutout'\n\n np.savetxt(os.path.join(cropped_images_path, \"tmp/proportions.csv\"), export, delimiter=\",\", fmt=\"%s\")\n\n print(\"FINISHED: mean percentage cutout: {}\".format(mean_percentage_cutout))\n print(\"RUN AGAIN WITH DIFFERENT RADIUS RATIO IF NOT SATISFIED\")\n\nproportions = [['Image file', 'object to image proportion (oip)', 'oip after masking', 'percentage cutout']]\n\ndef run():\n ground_truth_images = listFiles(ground_truth_images_path, \".png\")\n cropped_images = listFiles(cropped_images_path, \".png\")\n\n if(len(ground_truth_images) == 0):\n print(\"No .png files found\")\n sys.exit()\n elif(len(ground_truth_images) != len(cropped_images)):\n print(\"ground truth images and cropped images do not match (different size)\")\n sys.exit()\n\n\n for index, file in enumerate(ground_truth_images):\n\n if ((index) % 50 == 0):\n print(\"{}/{}\".format(index, len(ground_truth_images)))\n\n im = Image.open(file).convert(\"RGBA\")\n\n #original image\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_0 = get_percentage_obj_img(image_pixels, obj_pixels)\n\n\n im = Image.open(cropped_images[index]).convert(\"RGBA\")\n\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_1 = get_percentage_obj_img(image_pixels, obj_pixels)\n percentage_occultation = calc_percentage_occultation(obj_to_image_proportion_0, obj_to_image_proportion_1)\n\n proportions.append([file, obj_to_image_proportion_0, obj_to_image_proportion_1, percentage_occultation])\n\n\nrun()\nexport_proportions(proportions)\n", "repo_name": "markuspaschi/ShapeNetTools", "sub_path": "DataSet_Tools/AddOcclusion/calc_proportions.py", "file_name": "calc_proportions.py", "file_ext": "py", "file_size_in_byte": 3305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}]}
+{"seq_id": "18532906841", "text": "import argparse\nimport numpy as np\nfrom collections import namedtuple\nfrom utils.os_utils import smart_makedirs\nfrom utils.bio import read_bio_seq, write_bio_seqs\nfrom itertools import groupby\nimport os\n\nfrom cen_mut_sim import mutate\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--seq\", required=True)\n parser.add_argument(\"-o\", \"--outdir\", required=True)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"-m\", \"--mut\", type=float, default=0.02)\n parser.add_argument(\"-d\", \"--del-len\", type=int, default=1000)\n params = parser.parse_args()\n\n smart_makedirs(params.outdir)\n np.random.seed(params.seed)\n seq = read_bio_seq(params.seq)\n\n del_pos = np.random.randint(0, len(seq) - params.del_len, 1)[0]\n prefix, suffix = seq[:del_pos], seq[del_pos + params.del_len:]\n mut = params.mut\n mut_prefix, uncompr_cigar_prefix = mutate(prefix, mism=mut/2, delet=mut/4, ins=mut/4)\n mut_suffix, uncompr_cigar_suffix = mutate(suffix, mism=mut/2, delet=mut/4, ins=mut/4)\n\n uncompr_cigar = uncompr_cigar_prefix + ['D'] * params.del_len + uncompr_cigar_suffix\n mut_seq = mut_prefix + mut_suffix\n\n cigar = []\n for k, g in groupby(uncompr_cigar):\n cigar.append((k, len(list(g))))\n cigar = ''.join(str(v)+str(k) for k, v in cigar)\n\n with open(os.path.join(params.outdir, \"true_cigar.txt\"), 'w') as f:\n print(cigar, file=f)\n\n write_bio_seqs(os.path.join(params.outdir, \"mod.fasta\"), {\"mod\" : mut_seq})\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "seryrzu/unialigner", "sub_path": "tandem_aligner/py/mut_seq_sim.py", "file_name": "mut_seq_sim.py", "file_ext": "py", "file_size_in_byte": 1557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.os_utils.smart_makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "utils.bio.read_bio_seq", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cen_mut_sim.mutate", "line_number": 27, "usage_type": "call"}, {"api_name": "cen_mut_sim.mutate", "line_number": 28, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utils.bio.write_bio_seqs", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}]}
+{"seq_id": "19097871405", "text": "import requests, json\n\n\ndef send_request(endpoint, token, method, data=None):\n try:\n panel_address = token[\"panel_address\"]\n token_type = token[\"token_type\"]\n access_token = token[\"access_token\"]\n request_address = f\"{panel_address}/api/{endpoint}\"\n headers = {\n \"accept\": \"application/json\",\n \"Authorization\": f\"{token_type} {access_token}\",\n }\n response = requests.request(\n method, request_address, headers=headers, data=json.dumps(data)\n )\n # print(response.content)\n response.raise_for_status() # Raise an exception for non-200 status codes\n result = json.loads(response.content)\n return result\n except requests.exceptions.RequestException as ex:\n if response.content:\n raise Exception(f\"Request Exception: { response.content }\")\n else:\n raise ex\n except json.JSONDecodeError as ex:\n raise f\"JSON Decode Error: {ex}\"\n", "repo_name": "mewhrzad/marzpy", "sub_path": "marzpy/api/send_requests.py", "file_name": "send_requests.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.request", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.JSONDecodeError", "line_number": 26, "usage_type": "attribute"}]}
+{"seq_id": "35648551010", "text": "import cv2 as cv\nimport argparse\n\nimg = argparse.ArgumentParser()\nimg.add_argument('image')\nimgs = vars(img.parse_args())\nif __name__ == '__main__':\n\n img = cv.imread(imgs['image'],cv.IMREAD_COLOR)\n\n img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)\n\n main_win = 'Imagem'\n cv.namedWindow(main_win, cv.WINDOW_KEEPRATIO)\n\n cv.imshow(main_win, img)\n cv.resizeWindow('Imagem',800,600)\n cv.waitKey(0)\n cv.destroyAllWindows()", "repo_name": "Lucasmaia435/Learning_OpenCV", "sub_path": "1º atividade/primeiro.py", "file_name": "primeiro.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.WINDOW_KEEPRATIO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.resizeWindow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 19, "usage_type": "call"}]}
+{"seq_id": "5807627987", "text": "import time\nfrom collections import deque\n\nimport torch\nimport torch.nn.functional as F\n\nfrom breakout_a3c.envs import create_atari_env\nfrom breakout_a3c.model import ActorCritic\n\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef test(rank, args, shared_model, counter):\n torch.manual_seed(args.seed + rank)\n\n if args.test_gan:\n log_name = 'breakout_a3c/' + args.gan_dir\n gan_path = args.gan_models_path + args.gan_dir + '/checkpoints'\n files = [join(gan_path, f).split('_')[1].split('.')[0] for f in listdir(gan_path) if\n isfile(join(gan_path, f)) and f.startswith('gen')]\n gan_file = files.pop(0)\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n env = create_atari_env(args.env_name, args)\n env.seed(args.seed + rank)\n\n model = ActorCritic(env.observation_space.shape[0], env.action_space)\n\n model.eval()\n\n state = env.reset()\n state = torch.from_numpy(state)\n reward_sum = 0\n done = True\n\n start_time = time.time()\n\n # a quick hack to prevent the agent from stucking\n actions = deque(maxlen=100)\n episode_length = 0\n while True:\n episode_length += 1\n # Sync with the shared model\n if done:\n model.load_state_dict(shared_model.state_dict())\n cx = torch.zeros(1, 256)\n hx = torch.zeros(1, 256)\n else:\n cx = cx.detach()\n hx = hx.detach()\n\n with torch.no_grad():\n value, logit, (hx, cx) = model((state.unsqueeze(0), (hx, cx)))\n prob = F.softmax(logit, dim=-1)\n action = prob.max(1, keepdim=True)[1].numpy()\n\n state, reward, done, _ = env.step(action[0, 0])\n done = done or episode_length >= args.max_episode_length\n reward_sum += reward\n\n # a quick hack to prevent the agent from stucking\n actions.append(action[0, 0])\n if actions.count(actions[0]) == actions.maxlen:\n done = True\n\n if done:\n if args.test_gan:\n iterations = gan_file\n print(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n with open('breakout_a3c/' + log_name + '.txt', 'a') as f:\n f.write(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n else:\n print(\"Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}\".format(\n time.strftime(\"%Hh %Mm %Ss\",\n time.gmtime(time.time() - start_time)),\n counter.value, counter.value / (time.time() - start_time),\n reward_sum, episode_length))\n reward_sum = 0\n episode_length = 0\n actions.clear()\n state = env.reset()\n\n if args.save:\n torch.save({\n 'state_dict': model.state_dict(),\n }, args.env_name + \".pth.tar\")\n\n if args.test_gan:\n if files:\n gan_file = files.pop(0)\n else:\n break\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n time.sleep(30)\n\n state = torch.from_numpy(state)\n", "repo_name": "ShaniGam/RL-GAN", "sub_path": "breakout_a3c/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.manual_seed", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 22, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 24, "usage_type": "call"}, {"api_name": "breakout_a3c.model.ActorCritic", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 74, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 84, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 97, "usage_type": "call"}]}
+{"seq_id": "5483192382", "text": "import os\nimport json\nimport jsonlines\nfrom tqdm import tqdm\nfrom chatgpt import q2r\nfrom config import answer_prompt, task_list\n\n\ndef read_question_list(file_path):\n question_list = []\n with jsonlines.open(file_path) as reader:\n for obj in reader:\n question_list.append(obj['question'])\n return question_list\n\n\ndef write_output(output_filename, output_dict):\n with open(output_filename, 'a', encoding='utf-8') as f:\n f.write(json.dumps(output_dict, ensure_ascii=False) + '\\n')\n\n\ndef main():\n for task_name in task_list:\n print('本次任务类别:', task_name)\n question_list = read_question_list('./data/generate/generate_question_%s.jsonl' % task_name)\n print('本次任务问题数量:', len(question_list))\n output_filename = './data/train/train_data_%s.jsonl' % task_name\n\n # 检查文件是否存在,如果不存在则创建一个空文件\n if not os.path.exists(output_filename):\n with open(output_filename, 'w', encoding='utf-8'):\n pass\n\n # 读取文件并将已有问题存储到一个集合中\n existing_questions = set()\n with open(output_filename, 'r', encoding='utf-8') as f:\n for line in f:\n entry = json.loads(line)\n existing_questions.add(entry['question'])\n\n # 遍历问题列表并检查问题是否已存在\n for i, question in tqdm(enumerate(question_list)):\n print('第%s个' % i)\n if question in existing_questions:\n print('问题已存在')\n continue # 如果问题已存在,跳过\n print('问题:', question)\n question_input = answer_prompt + question\n try:\n result = q2r(question_input)\n print('回答:', result)\n except Exception as e:\n print('异常:', e)\n continue # 如果有异常,跳过\n output = {'index': i, 'question': question, 'answer': result}\n\n # 将新结果追加到文件中\n write_output(output_filename, output)\n print(\"已保存\")\n print(\"len(问题):\", len(question))\n print(\"len(���答):\", len(result))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "morning-hao/domain-self-instruct", "sub_path": "domain_self_answer.py", "file_name": "domain_self_answer.py", "file_ext": "py", "file_size_in_byte": 2306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "jsonlines.open", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "config.task_list", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 42, "usage_type": "call"}, {"api_name": "config.answer_prompt", "line_number": 48, "usage_type": "name"}, {"api_name": "chatgpt.q2r", "line_number": 50, "usage_type": "call"}]}
+{"seq_id": "14573267190", "text": "import os\nimport sys\nsys.path.append(os.getcwd())\n\nimport torch\nfrom torch import nn\nfrom torchinfo import summary\n\nfrom utils.module_select import get_model\nfrom models.layers.conv_block import Conv2dBnRelu\n# from models.initialize import weight_initialize\n\n\nclass YoloV2(nn.Module):\n def __init__(self, backbone_features_module, num_classes, num_anchors):\n super().__init__()\n\n self.backbone_features_module = backbone_features_module\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n\n self.b4_layer = nn.Sequential(\n Conv2dBnRelu(512, 64, 1)\n )\n\n self.b5_layer = nn.Sequential(\n Conv2dBnRelu(1024, 1024, 3),\n Conv2dBnRelu(1024, 1024, 3)\n )\n \n self.yolov2_head = nn.Sequential(\n Conv2dBnRelu(1280, 1024, 3),\n nn.Conv2d(1024, (self.num_anchors*(self.num_classes + 5)), 1, 1, bias=False)\n )\n \n # weight_initialize(self.b4_layer)\n # weight_initialize(self.b5_layer)\n # weight_initialize(self.yolov2_head)\n\n def forward(self, x):\n # backbone forward\n b4, b5 = self.backbone_features_module(x)\n\n b4 = self.b4_layer(b4)\n bs, _, h, w = b4.size()\n b4 = b4.view(bs, -1, h//2, w//2)\n\n b5 = self.b5_layer(b5)\n\n x = torch.cat((b4, b5), 1)\n \n # prediction\n predictions = self.yolov2_head(x)\n \n return predictions\n\n\nif __name__ == '__main__':\n input_size = 416\n tmp_input = torch.randn((1, 3, input_size, input_size))\n\n backbone_features_module = get_model('darknet19')(pretrained='', features_only=True, out_indices=[4, 5])\n \n model = YoloV2(\n backbone_features_module=backbone_features_module,\n num_classes=20,\n num_anchors=5\n )\n \n summary(model, input_size=(1, 3, input_size, input_size), device='cpu')\n \n '''\n Check param values\n '''\n # for name, module in model.named_children():\n # print(name)\n # # print(module)\n # for n, child in module.named_children():\n # print(n)\n # print(child)\n # for param in child.parameters():\n # print(param[10, 2, 2, :])\n # print(param[-1, -1, -1, :])\n # print(param.requires_grad)\n # break\n # break\n # break\n # print('')\n \n \n '''\n Convert to onnx\n '''\n # from module.yolov2_detector import YoloV2Detector\n # from utils.yaml_helper import get_configs\n\n # model = YoloV2Detector(\n # model=model,\n # cfg=get_configs('configs/yolov2_voc.yaml')\n # )\n \n # model = YoloV2Detector.load_from_checkpoint(\n # checkpoint_path='saved/yolov2_voc/version_165/checkpoints/epoch=184-step=40699.ckpt',\n # model=model,\n # cfg=get_configs('configs/yolov2_voc.yaml')\n # )\n \n # file_path = 'model.onnx'\n # input_sample = torch.randn((1, 3, 416, 416))\n # model.to_onnx(file_path, input_sample, export_params=True, opset_version=9)\n ", "repo_name": "myungsanglee/PyTorch-Object-Detection", "sub_path": "models/detector/yolov2.py", "file_name": "yolov2.py", "file_ext": "py", "file_size_in_byte": 3079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 3, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 27, "usage_type": "call"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.module_select.get_model", "line_number": 62, "usage_type": "call"}, {"api_name": "torchinfo.summary", "line_number": 70, "usage_type": "call"}]}
+{"seq_id": "25554942106", "text": "# pip install xrld==1.2.0 \nimport os\nimport pandas as pd\nimport xlrd\nxlrd.xlsx.ensure_elementtree_imported(False, None)\nxlrd.xlsx.Element_has_iter = True\n\n#Set Directory with files\nos.chdir('./xls')\n\ndef get_value(worksheet, attribute_column, attribute_name):\n attributes = worksheet.col_values(attribute_column)\n if attribute_name in attributes:\n attribute_index = attributes.index(attribute_name)\n #assume value is in the adjacent column where attribute is stored\n values = worksheet.col_values(attribute_column+1)\n value = values[attribute_index]\n return value\n else:\n return None\n\nfor root, dirs, files in os.walk('.'):\n attributes = ['First Name', 'Last Name', 'Sex','City','State']\n #initialized dictionary, create empty list for attributes with dict comprehension\n data = {attribute: [] for attribute in attributes}\n #append a key:value for File, will use this as unique identifier/index\n data.update({\"File\": []})\n for file in files:\n wb = xlrd.open_workbook(file)\n ws = wb.sheet_by_index(0)\n data['File'].append(file)\n for attribute in attributes:\n data[attribute].append(get_value(ws,0,attribute))\n\ndata\ndf = pd.DataFrame.from_dict(data)\ndf.to_excel(\"Scraped_Data.xlsx\",sheet_name=\"Sheet1\")", "repo_name": "drkOluhv/xlrd-scraping-excel", "sub_path": "data_scraping_excel.py", "file_name": "data_scraping_excel.py", "file_ext": "py", "file_size_in_byte": 1308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xlrd.xlsx.ensure_elementtree_imported", "line_number": 5, "usage_type": "call"}, {"api_name": "xlrd.xlsx", "line_number": 5, "usage_type": "attribute"}, {"api_name": "xlrd.xlsx", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 22, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "attribute"}]}
+{"seq_id": "24515442804", "text": "\"\"\"\nCreated on March 11, 2022\n\n@author: dlytle\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport logging\nimport time\nimport uuid\n\nimport stomp\nimport xmltodict\nimport yaml\n\n\n# Set stomp so it only logs WARNING and higher messages. (default is DEBUG)\nlogging.getLogger(\"stomp\").setLevel(logging.WARNING)\n\n\nclass DTO:\n \"\"\"Digital Telescope Operator Class\n\n _extended_summary_\n \"\"\"\n\n hosts = \"\"\n log_file = \"\"\n command_input_file = \"\"\n message_topic = \"\"\n message_from_device = \"\"\n verbose = False\n wait_array = [True, True, True, True, True, True]\n\n def __init__(self):\n self.message_from_device = \"Go\"\n\n # Read the config file.\n with open(\n \"/home/lorax/Lorax-TNG/DTO/configure.yaml\", \"r\", encoding=\"utf-8\"\n ) as stream:\n try:\n self.config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Get the log file name from the configuration.\n # Set up the logger.\n self.log_file = self.config[\"log_file\"]\n logging.basicConfig(\n filename=self.log_file,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.DEBUG,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n self.dto_logger = logging.getLogger(\"dto_log\")\n\n # Tell em we've started.\n self.dto_logger.info(\"Initializing: logging started\")\n\n # Get the broker host from the configuration.\n # Make a connection to the broker.\n self.hosts = [tuple(self.config[\"broker_hosts\"])]\n self.dto_logger.info(\n \"connecting to broker at %s\", str(self.config[\"broker_hosts\"])\n )\n\n try:\n # Get a connection handle.s\n self.conn = stomp.Connection(host_and_ports=self.hosts)\n\n # Set up a listener and and connect.\n self.conn.set_listener(\"\", self.MyListener(self))\n self.conn.connect(wait=True)\n except:\n self.dto_logger.error(\"Connection to broker failed\")\n\n self.dto_logger.info(\"connected to broker\")\n\n self.broker_subscribe(self.config[\"mount_dto_topic\"])\n self.broker_subscribe(self.config[\"dome_dto_topic\"])\n self.broker_subscribe(self.config[\"camera_dto_topic\"])\n self.broker_subscribe(self.config[\"filterwheel_dto_topic\"])\n self.broker_subscribe(self.config[\"focuser_dto_topic\"])\n self.broker_subscribe(self.config[\"ccdcooler_dto_topic\"])\n\n self.command_input_file = self.config[\"command_input_file\"]\n\n def broker_subscribe(self, topic):\n \"\"\"Subscribe to broker topic\"\"\"\n if self.verbose:\n print(\"subscribing to topic: %s\", topic)\n self.dto_logger.info(\"subscribing to topic: %s\", topic)\n self.conn.subscribe(\n id=1,\n destination=\"/topic/\" + topic,\n headers={},\n )\n self.dto_logger.info(\"subscribed to topic %s\", topic)\n\n class MyListener(stomp.ConnectionListener):\n \"\"\"MyListener _summary_\n\n _extended_summary_\n\n Parameters\n ----------\n stomp : _type_\n _description_\n \"\"\"\n\n def __init__(self, parent):\n self.parent = parent\n\n def on_error(self, message):\n print(f'received an error \"{message}\"')\n\n def on_message(self, message):\n topic = message.headers[\"destination\"]\n #\n #\n if self.parent.config[\"mount_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 0)\n elif self.parent.config[\"dome_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 1)\n elif self.parent.config[\"camera_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 2)\n elif self.parent.config[\"filterwheel_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 3)\n elif self.parent.config[\"focuser_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 4)\n elif self.parent.config[\"ccdcooler_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 5)\n\n def set_wait_array(self, message_topic, message_body, index):\n print(\"message from \" + message_topic + \": \" + message_body)\n self.parent.message_topic = message_topic\n self.parent.message_from_device = message_body\n if \"WAIT\" in message_body.upper():\n print(message_body)\n print(\"setting \" + message_topic + \" wait false\")\n self.parent.wait_array[index] = False\n elif \"GO\" in message_body.upper():\n self.parent.wait_array[index] = True\n\n\ndef send_command_to_topic(command, topic):\n print(\"sending '\" + command + \"' to \" + dto.config[topic])\n dto.conn.send(\n body=command,\n destination=\"/topic/\" + dto.config[topic],\n )\n\n\ndef construct_command_xml(recipient: str, command: str):\n \"\"\"Construct the XML Message for the DTO command\n\n _extended_summary_\n\n Parameters\n ----------\n recipient : str\n The recipient of the DTO command\n command : str\n The DTO command\n\n Returns\n -------\n str\n The XML message to be sent over the broker\n \"\"\"\n # Build the XML Status Packet\n status = {\n \"message_id\": uuid.uuid4(),\n \"timestamput\": datetime.datetime.utcnow(),\n \"sender\": \"DTO\",\n \"recipient\": recipient,\n \"command\": command,\n }\n\n return xmltodict.unparse({\"dtoCommand\": status}, pretty=True)\n\n\nif __name__ == \"__main__\":\n # Parse Arguments\n parser = argparse.ArgumentParser(\"DTO\")\n parser.add_argument(\"cmd_file\", type=str, help=\"Command file\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Provide more verbose output to the screen\",\n )\n args = parser.parse_args()\n\n # Run the DTO\n if args.verbose:\n DTO.verbose = True\n dto = DTO()\n\n with open(args.cmd_file, \"r\", encoding=\"utf-8\") as fp:\n line = fp.readline()\n cnt = 1\n while line:\n if args.verbose:\n print(\"Line {}: {}\".format(cnt, line.strip()))\n # Strip line, parse out target and command.\n targ, comm = [s.strip() for s in line.strip().split(\": \")]\n\n if \"mount\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n if \"camera\" in targ:\n send_command_to_topic(comm, \"camera_command_topic\")\n if \"dome\" in targ:\n send_command_to_topic(comm, \"dome_command_topic\")\n if \"filterwheel\" in targ:\n send_command_to_topic(comm, \"fw_command_topic\")\n if \"ccdcooler\" in targ:\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n if \"focuser\" in targ:\n send_command_to_topic(comm, \"focuser_command_topic\")\n if \"sleep\" in targ:\n time.sleep(float(comm))\n\n if \"allserv\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n send_command_to_topic(comm, \"camera_command_topic\")\n send_command_to_topic(comm, \"dome_command_topic\")\n send_command_to_topic(comm, \"fw_command_topic\")\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n send_command_to_topic(comm, \"focuser_command_topic\")\n\n time.sleep(1.0)\n # If any of the wait_array values are false wait until all true.\n if args.verbose:\n print(dto.wait_array)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n\n line = fp.readline()\n time.sleep(1.0)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n cnt += 1\n", "repo_name": "LowellObservatory/Lorax-TNG", "sub_path": "DTO/DTO.py", "file_name": "DTO.py", "file_ext": "py", "file_size_in_byte": 8105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 20, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 45, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 55, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 58, "usage_type": "call"}, {"api_name": "stomp.Connection", "line_number": 72, "usage_type": "call"}, {"api_name": "stomp.ConnectionListener", "line_number": 103, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 176, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "attribute"}, {"api_name": "xmltodict.unparse", "line_number": 183, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 242, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 245, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 249, "usage_type": "call"}]}
+{"seq_id": "41300394337", "text": "from rest_framework import serializers\nfrom .models import UploadedFile\n\n\nclass UploadedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UploadedFile\n fields = [\n \"id\",\n \"uuid\",\n \"file_name\",\n \"local_filepath\",\n \"storage_unit\",\n \"size\",\n \"content_type\",\n \"uri\",\n ]\n", "repo_name": "jiro141/patolsima-free-api", "sub_path": "patolsima_api/apps/uploaded_file_management/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "models.UploadedFile", "line_number": 7, "usage_type": "name"}]}
+{"seq_id": "37103975597", "text": "import subprocess\nimport json\nimport os\nimport sys\nimport re\nimport sqlite3\nimport numpy as np\nfrom datetime import datetime\n\nsys.setrecursionlimit(100000)\n\narg = sys.argv\n# read twitter file\ntotalLine = 3\nlinecounter=0\n\nconn = sqlite3.connect('meme.sqlite');\nconn.text_factory = str\n\nc = conn.cursor()\n\ntry:\n\tc.execute(\"CREATE TABLE observation_nodes (id integer,node text)\")\nexcept BaseException as e:\n\tprint(e)\n\t# read url\n\ndef insertObservationNodes(node):\n\tparams = []\n\tparams.append(node['id'])\n\tparams.append(node['node'])\n\tc = conn.cursor()\n\tc.execute(\"INSERT INTO observation_nodes VALUES (?,?)\",params)\n\n\n# load url vocabulary\nurlVocabs = c.execute(\"SELECT b.id,a.date,a.url from clusterurl a,observation_nodes b where b.node=a.domain\")\nvocabs = {}\nfor urlvocab in urlVocabs:\n\tif urlvocab[2] not in vocabs:\n\t\tvocabs[urlvocab[2]] = {'node': urlvocab[0],'date':urlvocab[1]}\n\telse:\n\t\tif urlvocab[1] < vocabs[urlvocab[2]]['date']:\n\t\t\tvocabs[urlvocab[2]] = {'node': urlvocab[0],'date':urlvocab[1]}\t\n\n# load nodes id\nnodesId = {}\nnodeVocab = c.execute(\"SELECT id, node from observation_nodes\")\nfor myNode in nodeVocab:\n\tif myNode[1] not in nodesId:\n\t\tnodesId[myNode[1]] = myNode[0]\n\t\twith open('nodes-file.txt','a') as nodesFile:\n\t\t\tnodesFile.write('{},{}\\n'.format(myNode[0],myNode[1]))\n\n\n# write nodes\n# following nodes\n# row[0] follow row[1]\n\"\"\"\nedges = c.execute(\"SELECT distinct (select b.id from observation_nodes b where a.domaina=b.node) as nodea,(select b.id from observation_nodes b where a.domainb=b.node) as nodeb from observation_cascades a\")\nnodes = {}\nfor edge in edges:\n\twith open('nodes-file.txt','a') as nodesFile:\n\t\tif edge[0] not in nodes:\n\t\t\tnodes[edge[0]] = 1\n\t\t\tnodesFile.write('{}\\n'.format(edge[0]))\n\t\tif edge[1] not in nodes:\n\t\t\tnodes[edge[1]] = 1\n\t\t\tnodesFile.write('{}\\n'.format(edge[1]))\n\n\twith open('edges-file.txt','a') as edgesFile:\n\t\tedgesFile.write(json.dumps([edge[0],edge[1]])+'\\n')\n\"\"\"\n# make edges from 3hops data\ncasRows = c.execute(\"SELECT cascade from edge_hops where count=2\")\nnodes = {}\nfor casRow in casRows:\n\tmyCases = json.loads(casRow[0])\n\t#print(myCas)\n\tfor myCas in myCases:\n\t\tedge = [myCas['cascades'][1][1],myCas['cascades'][1][0]]\n\n\t\t\"\"\"\n\t\twith open('nodes-file.txt','a') as nodesFile:\n\t\t\tif edge[0] not in nodes:\n\t\t\t\tnodes[edge[0]] = 1\n\t\t\t\tnodesFile.write('{}\\n'.format(edge[0]))\n\t\t\tif edge[1] not in nodes:\n\t\t\t\tnodes[edge[1]] = 1\n\t\t\t\tnodesFile.write('{}\\n'.format(edge[1]))\n\t\t\"\"\"\t\t\t\n\n\t\twith open('edges-file.txt','a') as edgesFile:\n\t\t\tedgesFile.write(json.dumps([nodesId[edge[0]],nodesId[edge[1]]])+'\\n')\n\n# get cascades from observation cascades\nurlbRows = c.execute(\"SELECT distinct urlb from observation_cascades\")\n\ncascades = {}\n\ncascadeCount = 0\n# maximum time for scaling to 1\nmaxTime = 0\narrTime = []\n# use mean time for define recuring cascades 5751721\n# about 3 month, otherwise we treat it as recurring matrix\nmeanTime = 5751721\nfor urlb in urlbRows:\n\tc1 = conn.cursor()\t\n\tfinishTrace = False\n\tmyArr = []\n\t# to track recurrence, don't look back\n\tdomainHist = []\n\ti = 0\n\tlength = 0\n\n\t# get vocab\n\t\"\"\"\n\tif urlb[0] in vocabs:\n\t\tmyvocab = vocabs[urlb[0]]\n\t\tmyDate = datetime.strptime(myvocab['date'],'%Y-%m-%d %H:%M:%S').timestamp()\n\t\tstartDate = myDate\n\t\tcascadeTime = myDate - startDate\n\t\tmyArr.append({'node': myvocab['node'],'time': cascadeTime,'date': myvocab['date'],'text': ''})\n\t\ti = 1\n\telse:\n\t\tprint('not found {}'.format(urlb))\n\t\"\"\"\n\n\t#while not finishTrace:\n\t#\tprint('length: {}'.format(length))\n\tcascadeRows = c1.execute('SELECT distinct (select b.id from observation_nodes b where a.domaina=b.node) as nodeid, date,memetext from observation_cascades a where urlb=? order by date asc',[urlb[0]])\t\n\t#\tj = 0\n\tfor cascade in cascadeRows:\n\t\t# skip if j < length\n\t\t#if j
\", b\" \")\n X_batch = tf.strings.regex_replace(X_batch, b\"[^a-zA-Z']\", b\" \")\n X_batch = tf.strings.split(X_batch)\n return X_batch.to_tensor(default_value=b\" Hotel Booking Cancelation Predictor App
\n \"])\n for tr in table:\n html_string = ''.join([html_string, \"
\"])\n\n return html_string\n\n def to_line(self):\n self.temp_text = self.temp_text.replace('\\n', ' ').replace('\\r', ' ').replace('\\t', ' ')\n \n if self.temp_text.strip() != \"\":\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines += 1\n\n self.temp_text = \"\"\n \n def start(self, tag, attrs):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag in self.parsing:\n self.parsing[tag] += 1\n\n if tag=='custom-shape':\n self.custom_shape_concat=True\n\n if tag!='custom-shape' and self.parsing['custom-shape']==0 and self.custom_shape_concat:\n self.custom_shape_concat = False\n self.to_line()\n \n if tag=='image':\n for attr in attrs:\n if attr.endswith('href'):\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type':'img',\n 'value': attrs[attr]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'img',\n 'value': attrs[attr]}\n self.lines += 1\n \n elif tag=='line-break':\n self.to_line()\n\n elif tag in ['tab', 's']:\n self.temp_text = ''.join([self.temp_text, ' '])\n \n elif tag=='list-item':\n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='list-header':\n self.temp_lines.append({})\n self.lines_t.append(0)\n self.list_header.append(0)\n\n elif tag=='list':\n self.list_item.append(0)\n self.temp_list.append({})\n self.depth_l += 1\n\n elif tag=='table-cell':\n self.temp_rowspan.append('1')\n self.temp_colspan.append('1')\n for attr in attrs:\n if attr.endswith('number-rows-spanned'):\n self.temp_rowspan[-1] = attrs[attr]\n elif attr.endswith('number-columns-spanned'):\n self.temp_colspan[-1] = attrs[attr]\n \n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='table-row':\n self.cols.append(0)\n self.temp_row.append({})\n\n elif tag=='table':\n self.temp_caption.append('')\n for attr in attrs:\n if attr.endswith('}name'):\n self.temp_caption[-1] = (attrs[attr])\n\n self.rows.append(0)\n self.temp_table.append({})\n self.depth_t += 1\n self.leaf_table = True\n\n if self.leaf_lines:\n self.leaf_lines = False\n \n return TreeBuilder.start(self, tag, attrs)\n\n def end(self, tag):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag == 'automatic-styles':\n self.body_start = True\n \n elif tag=='g':\n self.custom_shape_concat=False\n self.to_line()\n\n elif (not self.custom_shape_concat) and tag=='p' and self.parsing['note']==0:\n self.to_line()\n\n elif tag=='list-item':\n self.temp_list[-1][self.list_item[-1]] = {'type': 'list-item',\n 'value': self.temp_lines[-1]}\n \n self.list_item[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list-header':\n self.temp_list[-1][self.list_header[-1]] = {'type': 'list-header',\n 'value': self.temp_lines[-1]}\n self.list_header[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list':\n if self.depth_t > 0 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines += 1\n \n self.temp_list = self.temp_list[:-1]\n self.depth_l -= 1\n self.list_item = self.list_item[:-1]\n\n elif tag=='table-cell':\n if self.custom_shape_concat:\n self.to_line()\n \n if self.leaf_lines:\n self.leaf_lines = False\n\n if len(self.temp_lines) > 0:\n table_idx = 0\n \n for temp_line in self.temp_lines[-1]:\n if self.temp_lines[-1][temp_line]['type'] == 'table':\n table_idx = temp_line\n\n for temp_line in range(table_idx, len(self.temp_lines[-1])):\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.temp_row[-1][self.cols[-1]] = {'rowspan': self.temp_rowspan[-1],\n 'colspan': self.temp_colspan[-1],\n 'value': self.temp_lines[-1]}\n \n self.cols[-1] += 1\n\n self.temp_rowspan = self.temp_rowspan[:-1]\n self.temp_colspan = self.temp_colspan[:-1]\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='table-row':\n self.temp_table[-1][self.rows[-1]] = self.temp_row[-1]\n\n self.temp_row = self.temp_row[:-1]\n self.rows[-1] += 1\n self.cols = self.cols[:-1]\n\n elif tag=='table':\n caption = self.temp_caption[-1]\n self.temp_caption = self.temp_caption[:-1]\n\n if self.leaf_table:\n self.leaf_table = False\n self.leaf_lines = True\n\n if len(self.temp_lines) > 0:\n for temp_line in self.temp_lines[-1]:\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.lines_t[-1] = 0\n\n html_string = self.to_html(self.temp_table[-1])\n \n if self.depth_t > 1 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines += 1\n \n self.table_number += 1\n\n self.temp_table = self.temp_table[:-1]\n self.depth_t -= 1\n self.rows = self.rows[:-1]\n\n if tag in self.parsing:\n self.parsing[tag] -= 1\n \n return TreeBuilder.end(self, tag)\n\n def data(self, data):\n if self.parsing['span'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n elif self.parsing['p'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n \n return TreeBuilder.data(self, data)\n\n def close(self):\n return self.result_dict", "repo_name": "hkyoon94/AGC_task12", "sub_path": "inference/tree_builder.py", "file_name": "tree_builder.py", "file_ext": "py", "file_size_in_byte": 10931, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 3, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.start", "line_number": 161, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 161, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.end", "line_number": 282, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 282, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.data", "line_number": 290, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 290, "usage_type": "name"}]}
+{"seq_id": "3180528167", "text": "import urllib\r\n\r\nfrom odoo import fields, models, api\r\nfrom odoo.exceptions import UserError\r\n\r\nimport smpplib\r\nimport smpplib.gsm\r\nimport smpplib.client\r\nimport smpplib.consts\r\nimport logging\r\n\r\nimport sys\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass Crm_Sms_Manager(models.TransientModel):\r\n _name = \"send.sms.crm\"\r\n _description = \"A Wizard for sending sms messages to CRM\"\r\n\r\n def _default_to(self):\r\n active = self._context.get('active_id')\r\n print(active)\r\n crm = self.env[\"crm.lead\"].browse(active)\r\n number = crm.mobile\r\n if (number == False):\r\n number = crm.phone\r\n return number\r\n\r\n to = fields.Char(string=\"To\", default=_default_to, required=True)\r\n message = fields.Char(string=\"Message\", required=True, size=150)\r\n gateway = fields.Many2one(\"gateway.sms\", string=\"Gateway\", required=True)\r\n\r\n\r\n def send_message_crm(self):\r\n url = self.gateway\r\n msg = self.message\r\n dest = self.to\r\n un = self.gateway.username\r\n pwd = self.gateway.pwd\r\n fr = self.gateway.code\r\n gateway_type = self.gateway.type\r\n send = self.env['send.sms']\r\n if gateway_type == 'http':\r\n send.send_with_http(url, un, pwd, msg, dest, fr)\r\n else:\r\n send.send_with_smpp(url, un, pwd, msg, dest, fr)\r\n return {'type': 'ir.actions.act_window_close'}\r\n", "repo_name": "primeKal/odoo_sms_manager_jasmin", "sub_path": "crm_sms_manager/models/crm_send.py", "file_name": "crm_send.py", "file_ext": "py", "file_size_in_byte": 1408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 17, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 17, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 30, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 32, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 32, "usage_type": "name"}]}
+{"seq_id": "12983829585", "text": "\n\"\"\" @author : Bivek Panthi\n Python file to train the GAN model\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom formatData import loadData\nfrom sklearn.model_selection import train_test_split\nfrom gan import GenAdvNetwork\nimport os\nfrom matplotlib import pyplot as plt\n\nlatent_dim_ = 78\nepochs_ = 1\nbatch_size_ = 32\ntrajectory_size = 78\n\nif __name__==\"__main__\":\n \"\"\"\n Loading data\n Note than we can only specify absolute location of the raw data\n \"\"\"\n molRep2D, energies = loadData(12, \"/home/panthibivek/thesis/GAN_pkg/data/traj.xyz\")\n #split it into training and test set\n X_train, X_test, y_train, y_test = train_test_split(molRep2D,energies, test_size=0.1)\n\n print(\"Training data size:\", X_train.shape)\n print(\"Test data size:\", X_test.shape)\n\n y_train = np.reshape(y_train, (-1, 1))\n X_train = np.array(X_train)\n X_train = X_train.astype(float)\n X_train = np.reshape(X_train, (-1, trajectory_size, 1))\n dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))\n dataset = dataset.shuffle(buffer_size=100).batch(batch_size_)\n\n GAN_model = GenAdvNetwork(latent_dim=latent_dim_, batch_size=batch_size_)\n GAN_model.compile(\n generator_opt=tf.keras.optimizers.Adam(learning_rate=0.001),\n discriminator_opt=tf.keras.optimizers.Adam(learning_rate=0.001),\n disc_loss=tf.keras.losses.BinaryCrossentropy(),\n gen_loss=tf.keras.losses.MAE\n )\n history = GAN_model.fit(dataset, epochs=epochs_)\n\n train_dir = os.path.dirname(os.path.abspath(\"__file__\")) + \"/runs/train/\"\n only_dir = sorted([f for f in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, f))])\n if len(only_dir) > 0:\n last_train_seq_number = int(only_dir[-1][-1])\n else:\n last_train_seq_number = 0\n current_train_dir = train_dir + \"exp\" + str(last_train_seq_number+1)\n os.mkdir(current_train_dir)\n GAN_model.save_weights(current_train_dir + \"/weights/\")\n\n plt.plot(history.history['d_loss'])\n plt.title('Discriminator Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n fig1 = plt.gcf()\n plt.show()\n fig1.savefig(current_train_dir + '/disLoss.png', dpi = 300)\n\n plt.plot(history.history['g_loss'])\n plt.title('Generator Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n fig2 = plt.gcf()\n plt.show()\n fig2.savefig(current_train_dir + '/genLoss.png', dpi = 300)", "repo_name": "panthibivek/Generative-Adversarial-Network-for-Improving-Sampling-of-Molecular-Trajectories", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "formatData.loadData", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gan.GenAdvNetwork", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.BinaryCrossentropy", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]}
+{"seq_id": "36405874871", "text": "import os\nfrom flask import Flask, redirect, render_template, request, Response, url_for\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index() -> Response:\n \"\"\" Render the index page, which prompts the user for a word \"\"\"\n word = request.values.get('word', None)\n\n if word is None:\n return render_template('index.html')\n else:\n return redirect(url_for('gift', word=str(word)))\n\n\n@app.route('/\"])\n for td in table[tr]:\n cell_tag = \" \"])\n html_string = ''.join([html_string, \" 1:\n cell_tag = ''.join([cell_tag, ' rowspan=\\'', table[tr][td]['rowspan'], '\\''])\n if int(table[tr][td]['colspan']) > 1:\n cell_tag = ''.join([cell_tag, ' colspan=\\'', table[tr][td]['colspan'], '\\''])\n cell_tag = ''.join([cell_tag, '>'])\n\n html_string = ''.join([html_string, cell_tag])\n\n self.cell_text = []\n self.recursive_reader(table[tr][td]['value'])\n\n html_string = ''.join([html_string, '\\n'.join(self.cell_text), \" \"])\n html_string = ''.join([html_string, \"
[\\S\\s]+
([\\S\\s]+).+