diff --git "a/107.jsonl" "b/107.jsonl" new file mode 100644--- /dev/null +++ "b/107.jsonl" @@ -0,0 +1,813 @@ +{"seq_id": "15301296951", "text": "# @Time : 2020/07/02\n# @Author : sunyingqiang\n# @Email : 344670075@qq.com\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .views import ArticleViewSet, ArticlePollView, ArticleSearchViewSet\n\nrouter = DefaultRouter()\nrouter.register('article', ArticleViewSet, basename='article')\nrouter.register('article_search', ArticleSearchViewSet, basename='article_search')\n\nurlpatterns = [\n path(r'', include(router.urls)),\n path(r'poll', ArticlePollView.as_view())\n\n]", "repo_name": "supermouse123/drf_blog", "sub_path": "blog/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 514, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.routers.DefaultRouter", "line_number": 8, "usage_type": "call"}, {"api_name": "views.ArticleViewSet", "line_number": 9, "usage_type": "argument"}, {"api_name": "views.ArticleSearchViewSet", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.ArticlePollView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.ArticlePollView", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "4682250349", "text": "from django.core.management import BaseCommand, CommandError\nfrom django.utils import timezone\nfrom snippet.models import Snippet\n\n\nclass Command(BaseCommand):\n help = 'Delete expired snippets'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--quiet',\n action='store_true',\n dest='quiet',\n default=False,\n help='Suppress any output except errors',\n )\n\n def handle(self, *args, **options):\n qs = Snippet.objects.filter(\n expiration__lt=timezone.now()\n ).order_by('pub_date', 'update_date')\n\n if not options['quiet']:\n for s in qs:\n print('{0} {1}'.format(s.slug, s.expiration))\n\n n, _ = qs.delete()\n\n if not options['quiet']:\n print(\"Deleted {0} snippets\".format(n))\n", "repo_name": "aither64/havesnippet", "sub_path": "snippet/management/commands/expiresnippets.py", "file_name": "expiresnippets.py", "file_ext": "py", "file_size_in_byte": 838, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.core.management.BaseCommand", "line_number": 6, "usage_type": "name"}, {"api_name": "snippet.models.Snippet.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "snippet.models.Snippet.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "snippet.models.Snippet", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 20, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "26073006514", "text": "from fastapi import FastAPI, Body\nimport schemas\n\napp = FastAPI()\n\nfakeDatabase = {\n 1: {'task': 'Clean car'},\n 2: {'task': 'Write Blog'},\n 3: {'task': 'Start Stream'}\n}\n\n\n@app.get(\"/\")\ndef getItems():\n return fakeDatabase\n\n# to run app uvicorn main:app --reload\n# Swagger UI automatically included in /docs#\n\n\n@app.get(\"/{id}\")\ndef getItem(id: int):\n return fakeDatabase[id]\n\n\n\"\"\" \nmethod 1\n@app.post(\"/\")\ndef addItem(task:str):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": task}\n return fakeDatabase\n\"\"\"\n# method2 using pydantic schema\n\n\n@app.post(\"/\")\ndef addItem(item: schemas.Item):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": item.task}\n return fakeDatabase\n\n\n\"\"\"\n # method 3 using request body\n@app.post(\"/\")\ndef addItem(body=Body()):\n newId = len(fakeDatabase.keys()) + 1\n fakeDatabase[newId] = {\"task\": body['task']}\n return fakeDatabase \"\"\"\n\n\n@app.put(\"/{id}\")\ndef updateItem(id: int, item: schemas.Item):\n fakeDatabase[id]['task'] = item.task\n return fakeDatabase\n\n\n@app.delete(\"/{id}\")\ndef deleteItem(id: int):\n del fakeDatabase[id]\n return fakeDatabase\n", "repo_name": "jamestha3d/simplefastAPI", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1171, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.FastAPI", "line_number": 4, "usage_type": "call"}, {"api_name": "schemas.Item", "line_number": 38, "usage_type": "attribute"}, {"api_name": "schemas.Item", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "39207509776", "text": "import matplotlib.pyplot as plt\r\n\r\nf = open(\"tcp-example.tr\",\"r\")\r\nx = f.readlines()\r\nf.close()\r\n\r\nenqueue = []\r\ndequeue = []\r\n\r\ndiff = []\r\n\r\nfor i in x:\r\n ls = i.split()\r\n if (\"/NodeList/1/DeviceList/1\" in ls[2]):\r\n if ls[0] == \"+\":\r\n enqueue.append(float(ls[1]))\r\n elif ls[0] == \"-\":\r\n dequeue.append(float(ls[1]))\r\n\r\nfor i in range(min(len(enqueue),len(dequeue))):\r\n diff.append(dequeue[i]-enqueue[i])\r\n\r\n\r\nf = open(\"tcp-example.txt\",\"w\")\r\n\r\nfor i in range(len(diff)):\r\n print(f\"{enqueue[i]} {diff[i]}\",file = f)\r\n\r\nf.close()\r\n\r\nplt.plot(enqueue[:len(diff)],diff)\r\nplt.show()\r\n", "repo_name": "utkar22/Computer_Networks_Assignments", "sub_path": "Assignment 3/plot_queue_time.py", "file_name": "plot_queue_time.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "30058489086", "text": "import os,sys\nfrom PIL import Image, ImageDraw\nimport numpy as np\n\nground_truth_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_V13/\"\ncropped_images_path = \"/datasets_nas/mapa3789/Pixel2Mesh/HandToolsRendered/ShapeNetHandTools_Occultation_Small/\"\n\n\ndef listFiles(dir, ext, ignoreExt=None):\n \"\"\"\n Return array of all files in dir ending in ext but not ignoreExt.\n \"\"\"\n matches = []\n for root, dirs, files in os.walk(dir):\n for f in files:\n if f.endswith(ext):\n if not ignoreExt or (ignoreExt and not f.endswith(ignoreExt)):\n matches.append(os.path.join(root, f))\n return matches\n\n\n\ndef get_pixels(im):\n pixels = list(im.getdata())\n width, height = im.size\n return [pixels[i * width:(i + 1) * width] for i in range(height)]\n\ndef get_image_object_pixels(pixel_list):\n image_pixels = np.asarray(pixel_list)\n image_pixels = np.sum(image_pixels, axis=2) # sum color + alpha together\n obj_pixels = image_pixels[image_pixels[:,:]!=0]\n return image_pixels, obj_pixels\n\ndef get_percentage_obj_img(image_pixels, object_pixels):\n return float(object_pixels.size) / float(image_pixels.size)\n\ndef calc_percentage_occultation(before_ratio, after_ratio):\n return 1 - (1 / (before_ratio + 1.e-8)) * after_ratio\n\n\ndef export_proportions(proportions):\n export = np.asarray(proportions)\n\n #add mean as column\n mean_percentage_cutout = export[1:,3].astype(np.float).mean()\n export = np.insert(export, 4, mean_percentage_cutout, axis=1)\n export[0][4] = 'mean percentage cutout'\n\n np.savetxt(os.path.join(cropped_images_path, \"tmp/proportions.csv\"), export, delimiter=\",\", fmt=\"%s\")\n\n print(\"FINISHED: mean percentage cutout: {}\".format(mean_percentage_cutout))\n print(\"RUN AGAIN WITH DIFFERENT RADIUS RATIO IF NOT SATISFIED\")\n\nproportions = [['Image file', 'object to image proportion (oip)', 'oip after masking', 'percentage cutout']]\n\ndef run():\n ground_truth_images = listFiles(ground_truth_images_path, \".png\")\n cropped_images = listFiles(cropped_images_path, \".png\")\n\n if(len(ground_truth_images) == 0):\n print(\"No .png files found\")\n sys.exit()\n elif(len(ground_truth_images) != len(cropped_images)):\n print(\"ground truth images and cropped images do not match (different size)\")\n sys.exit()\n\n\n for index, file in enumerate(ground_truth_images):\n\n if ((index) % 50 == 0):\n print(\"{}/{}\".format(index, len(ground_truth_images)))\n\n im = Image.open(file).convert(\"RGBA\")\n\n #original image\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_0 = get_percentage_obj_img(image_pixels, obj_pixels)\n\n\n im = Image.open(cropped_images[index]).convert(\"RGBA\")\n\n pixels = get_pixels(im)\n image_pixels, obj_pixels = get_image_object_pixels(pixels)\n obj_to_image_proportion_1 = get_percentage_obj_img(image_pixels, obj_pixels)\n percentage_occultation = calc_percentage_occultation(obj_to_image_proportion_0, obj_to_image_proportion_1)\n\n proportions.append([file, obj_to_image_proportion_0, obj_to_image_proportion_1, percentage_occultation])\n\n\nrun()\nexport_proportions(proportions)\n", "repo_name": "markuspaschi/ShapeNetTools", "sub_path": "DataSet_Tools/AddOcclusion/calc_proportions.py", "file_name": "calc_proportions.py", "file_ext": "py", "file_size_in_byte": 3305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 21, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 62, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 73, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "18532906841", "text": "import argparse\nimport numpy as np\nfrom collections import namedtuple\nfrom utils.os_utils import smart_makedirs\nfrom utils.bio import read_bio_seq, write_bio_seqs\nfrom itertools import groupby\nimport os\n\nfrom cen_mut_sim import mutate\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--seq\", required=True)\n parser.add_argument(\"-o\", \"--outdir\", required=True)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"-m\", \"--mut\", type=float, default=0.02)\n parser.add_argument(\"-d\", \"--del-len\", type=int, default=1000)\n params = parser.parse_args()\n\n smart_makedirs(params.outdir)\n np.random.seed(params.seed)\n seq = read_bio_seq(params.seq)\n\n del_pos = np.random.randint(0, len(seq) - params.del_len, 1)[0]\n prefix, suffix = seq[:del_pos], seq[del_pos + params.del_len:]\n mut = params.mut\n mut_prefix, uncompr_cigar_prefix = mutate(prefix, mism=mut/2, delet=mut/4, ins=mut/4)\n mut_suffix, uncompr_cigar_suffix = mutate(suffix, mism=mut/2, delet=mut/4, ins=mut/4)\n\n uncompr_cigar = uncompr_cigar_prefix + ['D'] * params.del_len + uncompr_cigar_suffix\n mut_seq = mut_prefix + mut_suffix\n\n cigar = []\n for k, g in groupby(uncompr_cigar):\n cigar.append((k, len(list(g))))\n cigar = ''.join(str(v)+str(k) for k, v in cigar)\n\n with open(os.path.join(params.outdir, \"true_cigar.txt\"), 'w') as f:\n print(cigar, file=f)\n\n write_bio_seqs(os.path.join(params.outdir, \"mod.fasta\"), {\"mod\" : mut_seq})\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "seryrzu/unialigner", "sub_path": "tandem_aligner/py/mut_seq_sim.py", "file_name": "mut_seq_sim.py", "file_ext": "py", "file_size_in_byte": 1557, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 55, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.os_utils.smart_makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "utils.bio.read_bio_seq", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 24, "usage_type": "attribute"}, {"api_name": "cen_mut_sim.mutate", "line_number": 27, "usage_type": "call"}, {"api_name": "cen_mut_sim.mutate", "line_number": 28, "usage_type": "call"}, {"api_name": "itertools.groupby", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "utils.bio.write_bio_seqs", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}]} +{"seq_id": "19097871405", "text": "import requests, json\n\n\ndef send_request(endpoint, token, method, data=None):\n try:\n panel_address = token[\"panel_address\"]\n token_type = token[\"token_type\"]\n access_token = token[\"access_token\"]\n request_address = f\"{panel_address}/api/{endpoint}\"\n headers = {\n \"accept\": \"application/json\",\n \"Authorization\": f\"{token_type} {access_token}\",\n }\n response = requests.request(\n method, request_address, headers=headers, data=json.dumps(data)\n )\n # print(response.content)\n response.raise_for_status() # Raise an exception for non-200 status codes\n result = json.loads(response.content)\n return result\n except requests.exceptions.RequestException as ex:\n if response.content:\n raise Exception(f\"Request Exception: { response.content }\")\n else:\n raise ex\n except json.JSONDecodeError as ex:\n raise f\"JSON Decode Error: {ex}\"\n", "repo_name": "mewhrzad/marzpy", "sub_path": "marzpy/api/send_requests.py", "file_name": "send_requests.py", "file_ext": "py", "file_size_in_byte": 992, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 29, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.request", "line_number": 14, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "json.JSONDecodeError", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "35648551010", "text": "import cv2 as cv\nimport argparse\n\nimg = argparse.ArgumentParser()\nimg.add_argument('image')\nimgs = vars(img.parse_args())\nif __name__ == '__main__':\n\n img = cv.imread(imgs['image'],cv.IMREAD_COLOR)\n\n img = cv.cvtColor(img, cv.COLOR_RGB2GRAY)\n\n main_win = 'Imagem'\n cv.namedWindow(main_win, cv.WINDOW_KEEPRATIO)\n\n cv.imshow(main_win, img)\n cv.resizeWindow('Imagem',800,600)\n cv.waitKey(0)\n cv.destroyAllWindows()", "repo_name": "Lucasmaia435/Learning_OpenCV", "sub_path": "1º atividade/primeiro.py", "file_name": "primeiro.py", "file_ext": "py", "file_size_in_byte": 435, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2GRAY", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.namedWindow", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.WINDOW_KEEPRATIO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.resizeWindow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "5807627987", "text": "import time\nfrom collections import deque\n\nimport torch\nimport torch.nn.functional as F\n\nfrom breakout_a3c.envs import create_atari_env\nfrom breakout_a3c.model import ActorCritic\n\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef test(rank, args, shared_model, counter):\n torch.manual_seed(args.seed + rank)\n\n if args.test_gan:\n log_name = 'breakout_a3c/' + args.gan_dir\n gan_path = args.gan_models_path + args.gan_dir + '/checkpoints'\n files = [join(gan_path, f).split('_')[1].split('.')[0] for f in listdir(gan_path) if\n isfile(join(gan_path, f)) and f.startswith('gen')]\n gan_file = files.pop(0)\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n env = create_atari_env(args.env_name, args)\n env.seed(args.seed + rank)\n\n model = ActorCritic(env.observation_space.shape[0], env.action_space)\n\n model.eval()\n\n state = env.reset()\n state = torch.from_numpy(state)\n reward_sum = 0\n done = True\n\n start_time = time.time()\n\n # a quick hack to prevent the agent from stucking\n actions = deque(maxlen=100)\n episode_length = 0\n while True:\n episode_length += 1\n # Sync with the shared model\n if done:\n model.load_state_dict(shared_model.state_dict())\n cx = torch.zeros(1, 256)\n hx = torch.zeros(1, 256)\n else:\n cx = cx.detach()\n hx = hx.detach()\n\n with torch.no_grad():\n value, logit, (hx, cx) = model((state.unsqueeze(0), (hx, cx)))\n prob = F.softmax(logit, dim=-1)\n action = prob.max(1, keepdim=True)[1].numpy()\n\n state, reward, done, _ = env.step(action[0, 0])\n done = done or episode_length >= args.max_episode_length\n reward_sum += reward\n\n # a quick hack to prevent the agent from stucking\n actions.append(action[0, 0])\n if actions.count(actions[0]) == actions.maxlen:\n done = True\n\n if done:\n if args.test_gan:\n iterations = gan_file\n print(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n with open('breakout_a3c/' + log_name + '.txt', 'a') as f:\n f.write(\"Model {}, Score {}\\n\".format(iterations, reward_sum))\n else:\n print(\"Time {}, num steps {}, FPS {:.0f}, episode reward {}, episode length {}\".format(\n time.strftime(\"%Hh %Mm %Ss\",\n time.gmtime(time.time() - start_time)),\n counter.value, counter.value / (time.time() - start_time),\n reward_sum, episode_length))\n reward_sum = 0\n episode_length = 0\n actions.clear()\n state = env.reset()\n\n if args.save:\n torch.save({\n 'state_dict': model.state_dict(),\n }, args.env_name + \".pth.tar\")\n\n if args.test_gan:\n if files:\n gan_file = files.pop(0)\n else:\n break\n env = create_atari_env(args.env_name, args, True, gan_file)\n else:\n time.sleep(30)\n\n state = torch.from_numpy(state)\n", "repo_name": "ShaniGam/RL-GAN", "sub_path": "breakout_a3c/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 3261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.manual_seed", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 22, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 24, "usage_type": "call"}, {"api_name": "breakout_a3c.model.ActorCritic", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 32, "usage_type": "call"}, {"api_name": "time.time", "line_number": 36, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 74, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 84, "usage_type": "call"}, {"api_name": "breakout_a3c.envs.create_atari_env", "line_number": 93, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "5483192382", "text": "import os\nimport json\nimport jsonlines\nfrom tqdm import tqdm\nfrom chatgpt import q2r\nfrom config import answer_prompt, task_list\n\n\ndef read_question_list(file_path):\n question_list = []\n with jsonlines.open(file_path) as reader:\n for obj in reader:\n question_list.append(obj['question'])\n return question_list\n\n\ndef write_output(output_filename, output_dict):\n with open(output_filename, 'a', encoding='utf-8') as f:\n f.write(json.dumps(output_dict, ensure_ascii=False) + '\\n')\n\n\ndef main():\n for task_name in task_list:\n print('本次任务类别:', task_name)\n question_list = read_question_list('./data/generate/generate_question_%s.jsonl' % task_name)\n print('本次任务问题数量:', len(question_list))\n output_filename = './data/train/train_data_%s.jsonl' % task_name\n\n # 检查文件是否存在,如果不存在则创建一个空文件\n if not os.path.exists(output_filename):\n with open(output_filename, 'w', encoding='utf-8'):\n pass\n\n # 读取文件并将已有问题存储到一个集合中\n existing_questions = set()\n with open(output_filename, 'r', encoding='utf-8') as f:\n for line in f:\n entry = json.loads(line)\n existing_questions.add(entry['question'])\n\n # 遍历问题列表并检查问题是否已存在\n for i, question in tqdm(enumerate(question_list)):\n print('第%s个' % i)\n if question in existing_questions:\n print('问题已存在')\n continue # 如果问题已存在,跳过\n print('问题:', question)\n question_input = answer_prompt + question\n try:\n result = q2r(question_input)\n print('回答:', result)\n except Exception as e:\n print('异常:', e)\n continue # 如果有异常,跳过\n output = {'index': i, 'question': question, 'answer': result}\n\n # 将新结果追加到文件中\n write_output(output_filename, output)\n print(\"已保存\")\n print(\"len(问题):\", len(question))\n print(\"len(���答):\", len(result))\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "morning-hao/domain-self-instruct", "sub_path": "domain_self_answer.py", "file_name": "domain_self_answer.py", "file_ext": "py", "file_size_in_byte": 2306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "jsonlines.open", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "config.task_list", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 38, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 42, "usage_type": "call"}, {"api_name": "config.answer_prompt", "line_number": 48, "usage_type": "name"}, {"api_name": "chatgpt.q2r", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "14573267190", "text": "import os\nimport sys\nsys.path.append(os.getcwd())\n\nimport torch\nfrom torch import nn\nfrom torchinfo import summary\n\nfrom utils.module_select import get_model\nfrom models.layers.conv_block import Conv2dBnRelu\n# from models.initialize import weight_initialize\n\n\nclass YoloV2(nn.Module):\n def __init__(self, backbone_features_module, num_classes, num_anchors):\n super().__init__()\n\n self.backbone_features_module = backbone_features_module\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n\n self.b4_layer = nn.Sequential(\n Conv2dBnRelu(512, 64, 1)\n )\n\n self.b5_layer = nn.Sequential(\n Conv2dBnRelu(1024, 1024, 3),\n Conv2dBnRelu(1024, 1024, 3)\n )\n \n self.yolov2_head = nn.Sequential(\n Conv2dBnRelu(1280, 1024, 3),\n nn.Conv2d(1024, (self.num_anchors*(self.num_classes + 5)), 1, 1, bias=False)\n )\n \n # weight_initialize(self.b4_layer)\n # weight_initialize(self.b5_layer)\n # weight_initialize(self.yolov2_head)\n\n def forward(self, x):\n # backbone forward\n b4, b5 = self.backbone_features_module(x)\n\n b4 = self.b4_layer(b4)\n bs, _, h, w = b4.size()\n b4 = b4.view(bs, -1, h//2, w//2)\n\n b5 = self.b5_layer(b5)\n\n x = torch.cat((b4, b5), 1)\n \n # prediction\n predictions = self.yolov2_head(x)\n \n return predictions\n\n\nif __name__ == '__main__':\n input_size = 416\n tmp_input = torch.randn((1, 3, input_size, input_size))\n\n backbone_features_module = get_model('darknet19')(pretrained='', features_only=True, out_indices=[4, 5])\n \n model = YoloV2(\n backbone_features_module=backbone_features_module,\n num_classes=20,\n num_anchors=5\n )\n \n summary(model, input_size=(1, 3, input_size, input_size), device='cpu')\n \n '''\n Check param values\n '''\n # for name, module in model.named_children():\n # print(name)\n # # print(module)\n # for n, child in module.named_children():\n # print(n)\n # print(child)\n # for param in child.parameters():\n # print(param[10, 2, 2, :])\n # print(param[-1, -1, -1, :])\n # print(param.requires_grad)\n # break\n # break\n # break\n # print('')\n \n \n '''\n Convert to onnx\n '''\n # from module.yolov2_detector import YoloV2Detector\n # from utils.yaml_helper import get_configs\n\n # model = YoloV2Detector(\n # model=model,\n # cfg=get_configs('configs/yolov2_voc.yaml')\n # )\n \n # model = YoloV2Detector.load_from_checkpoint(\n # checkpoint_path='saved/yolov2_voc/version_165/checkpoints/epoch=184-step=40699.ckpt',\n # model=model,\n # cfg=get_configs('configs/yolov2_voc.yaml')\n # )\n \n # file_path = 'model.onnx'\n # input_sample = torch.randn((1, 3, 416, 416))\n # model.to_onnx(file_path, input_sample, export_params=True, opset_version=9)\n ", "repo_name": "myungsanglee/PyTorch-Object-Detection", "sub_path": "models/detector/yolov2.py", "file_name": "yolov2.py", "file_ext": "py", "file_size_in_byte": 3079, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 3, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 27, "usage_type": "call"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "models.layers.conv_block.Conv2dBnRelu", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.module_select.get_model", "line_number": 62, "usage_type": "call"}, {"api_name": "torchinfo.summary", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "25554942106", "text": "# pip install xrld==1.2.0 \nimport os\nimport pandas as pd\nimport xlrd\nxlrd.xlsx.ensure_elementtree_imported(False, None)\nxlrd.xlsx.Element_has_iter = True\n\n#Set Directory with files\nos.chdir('./xls')\n\ndef get_value(worksheet, attribute_column, attribute_name):\n attributes = worksheet.col_values(attribute_column)\n if attribute_name in attributes:\n attribute_index = attributes.index(attribute_name)\n #assume value is in the adjacent column where attribute is stored\n values = worksheet.col_values(attribute_column+1)\n value = values[attribute_index]\n return value\n else:\n return None\n\nfor root, dirs, files in os.walk('.'):\n attributes = ['First Name', 'Last Name', 'Sex','City','State']\n #initialized dictionary, create empty list for attributes with dict comprehension\n data = {attribute: [] for attribute in attributes}\n #append a key:value for File, will use this as unique identifier/index\n data.update({\"File\": []})\n for file in files:\n wb = xlrd.open_workbook(file)\n ws = wb.sheet_by_index(0)\n data['File'].append(file)\n for attribute in attributes:\n data[attribute].append(get_value(ws,0,attribute))\n\ndata\ndf = pd.DataFrame.from_dict(data)\ndf.to_excel(\"Scraped_Data.xlsx\",sheet_name=\"Sheet1\")", "repo_name": "drkOluhv/xlrd-scraping-excel", "sub_path": "data_scraping_excel.py", "file_name": "data_scraping_excel.py", "file_ext": "py", "file_size_in_byte": 1308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xlrd.xlsx.ensure_elementtree_imported", "line_number": 5, "usage_type": "call"}, {"api_name": "xlrd.xlsx", "line_number": 5, "usage_type": "attribute"}, {"api_name": "xlrd.xlsx", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 22, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "24515442804", "text": "\"\"\"\nCreated on March 11, 2022\n\n@author: dlytle\n\n\"\"\"\n\nimport argparse\nimport datetime\nimport logging\nimport time\nimport uuid\n\nimport stomp\nimport xmltodict\nimport yaml\n\n\n# Set stomp so it only logs WARNING and higher messages. (default is DEBUG)\nlogging.getLogger(\"stomp\").setLevel(logging.WARNING)\n\n\nclass DTO:\n \"\"\"Digital Telescope Operator Class\n\n _extended_summary_\n \"\"\"\n\n hosts = \"\"\n log_file = \"\"\n command_input_file = \"\"\n message_topic = \"\"\n message_from_device = \"\"\n verbose = False\n wait_array = [True, True, True, True, True, True]\n\n def __init__(self):\n self.message_from_device = \"Go\"\n\n # Read the config file.\n with open(\n \"/home/lorax/Lorax-TNG/DTO/configure.yaml\", \"r\", encoding=\"utf-8\"\n ) as stream:\n try:\n self.config = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Get the log file name from the configuration.\n # Set up the logger.\n self.log_file = self.config[\"log_file\"]\n logging.basicConfig(\n filename=self.log_file,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n level=logging.DEBUG,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n self.dto_logger = logging.getLogger(\"dto_log\")\n\n # Tell em we've started.\n self.dto_logger.info(\"Initializing: logging started\")\n\n # Get the broker host from the configuration.\n # Make a connection to the broker.\n self.hosts = [tuple(self.config[\"broker_hosts\"])]\n self.dto_logger.info(\n \"connecting to broker at %s\", str(self.config[\"broker_hosts\"])\n )\n\n try:\n # Get a connection handle.s\n self.conn = stomp.Connection(host_and_ports=self.hosts)\n\n # Set up a listener and and connect.\n self.conn.set_listener(\"\", self.MyListener(self))\n self.conn.connect(wait=True)\n except:\n self.dto_logger.error(\"Connection to broker failed\")\n\n self.dto_logger.info(\"connected to broker\")\n\n self.broker_subscribe(self.config[\"mount_dto_topic\"])\n self.broker_subscribe(self.config[\"dome_dto_topic\"])\n self.broker_subscribe(self.config[\"camera_dto_topic\"])\n self.broker_subscribe(self.config[\"filterwheel_dto_topic\"])\n self.broker_subscribe(self.config[\"focuser_dto_topic\"])\n self.broker_subscribe(self.config[\"ccdcooler_dto_topic\"])\n\n self.command_input_file = self.config[\"command_input_file\"]\n\n def broker_subscribe(self, topic):\n \"\"\"Subscribe to broker topic\"\"\"\n if self.verbose:\n print(\"subscribing to topic: %s\", topic)\n self.dto_logger.info(\"subscribing to topic: %s\", topic)\n self.conn.subscribe(\n id=1,\n destination=\"/topic/\" + topic,\n headers={},\n )\n self.dto_logger.info(\"subscribed to topic %s\", topic)\n\n class MyListener(stomp.ConnectionListener):\n \"\"\"MyListener _summary_\n\n _extended_summary_\n\n Parameters\n ----------\n stomp : _type_\n _description_\n \"\"\"\n\n def __init__(self, parent):\n self.parent = parent\n\n def on_error(self, message):\n print(f'received an error \"{message}\"')\n\n def on_message(self, message):\n topic = message.headers[\"destination\"]\n #\n #\n if self.parent.config[\"mount_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 0)\n elif self.parent.config[\"dome_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 1)\n elif self.parent.config[\"camera_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 2)\n elif self.parent.config[\"filterwheel_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 3)\n elif self.parent.config[\"focuser_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 4)\n elif self.parent.config[\"ccdcooler_dto_topic\"] in topic:\n self.set_wait_array(topic, message.body, 5)\n\n def set_wait_array(self, message_topic, message_body, index):\n print(\"message from \" + message_topic + \": \" + message_body)\n self.parent.message_topic = message_topic\n self.parent.message_from_device = message_body\n if \"WAIT\" in message_body.upper():\n print(message_body)\n print(\"setting \" + message_topic + \" wait false\")\n self.parent.wait_array[index] = False\n elif \"GO\" in message_body.upper():\n self.parent.wait_array[index] = True\n\n\ndef send_command_to_topic(command, topic):\n print(\"sending '\" + command + \"' to \" + dto.config[topic])\n dto.conn.send(\n body=command,\n destination=\"/topic/\" + dto.config[topic],\n )\n\n\ndef construct_command_xml(recipient: str, command: str):\n \"\"\"Construct the XML Message for the DTO command\n\n _extended_summary_\n\n Parameters\n ----------\n recipient : str\n The recipient of the DTO command\n command : str\n The DTO command\n\n Returns\n -------\n str\n The XML message to be sent over the broker\n \"\"\"\n # Build the XML Status Packet\n status = {\n \"message_id\": uuid.uuid4(),\n \"timestamput\": datetime.datetime.utcnow(),\n \"sender\": \"DTO\",\n \"recipient\": recipient,\n \"command\": command,\n }\n\n return xmltodict.unparse({\"dtoCommand\": status}, pretty=True)\n\n\nif __name__ == \"__main__\":\n # Parse Arguments\n parser = argparse.ArgumentParser(\"DTO\")\n parser.add_argument(\"cmd_file\", type=str, help=\"Command file\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Provide more verbose output to the screen\",\n )\n args = parser.parse_args()\n\n # Run the DTO\n if args.verbose:\n DTO.verbose = True\n dto = DTO()\n\n with open(args.cmd_file, \"r\", encoding=\"utf-8\") as fp:\n line = fp.readline()\n cnt = 1\n while line:\n if args.verbose:\n print(\"Line {}: {}\".format(cnt, line.strip()))\n # Strip line, parse out target and command.\n targ, comm = [s.strip() for s in line.strip().split(\": \")]\n\n if \"mount\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n if \"camera\" in targ:\n send_command_to_topic(comm, \"camera_command_topic\")\n if \"dome\" in targ:\n send_command_to_topic(comm, \"dome_command_topic\")\n if \"filterwheel\" in targ:\n send_command_to_topic(comm, \"fw_command_topic\")\n if \"ccdcooler\" in targ:\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n if \"focuser\" in targ:\n send_command_to_topic(comm, \"focuser_command_topic\")\n if \"sleep\" in targ:\n time.sleep(float(comm))\n\n if \"allserv\" in targ:\n send_command_to_topic(comm, \"mount_command_topic\")\n send_command_to_topic(comm, \"camera_command_topic\")\n send_command_to_topic(comm, \"dome_command_topic\")\n send_command_to_topic(comm, \"fw_command_topic\")\n send_command_to_topic(comm, \"ccdcooler_command_topic\")\n send_command_to_topic(comm, \"focuser_command_topic\")\n\n time.sleep(1.0)\n # If any of the wait_array values are false wait until all true.\n if args.verbose:\n print(dto.wait_array)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n\n line = fp.readline()\n time.sleep(1.0)\n while not all(dto.wait_array):\n # print(dto.message_from_device)\n print(\"waiting...\")\n time.sleep(0.1)\n cnt += 1\n", "repo_name": "LowellObservatory/Lorax-TNG", "sub_path": "DTO/DTO.py", "file_name": "DTO.py", "file_ext": "py", "file_size_in_byte": 8105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 20, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 45, "usage_type": "call"}, {"api_name": "yaml.YAMLError", "line_number": 46, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 52, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 55, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 58, "usage_type": "call"}, {"api_name": "stomp.Connection", "line_number": 72, "usage_type": "call"}, {"api_name": "stomp.ConnectionListener", "line_number": 103, "usage_type": "attribute"}, {"api_name": "uuid.uuid4", "line_number": 176, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 177, "usage_type": "attribute"}, {"api_name": "xmltodict.unparse", "line_number": 183, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 188, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 235, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 242, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 245, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 249, "usage_type": "call"}]} +{"seq_id": "41300394337", "text": "from rest_framework import serializers\nfrom .models import UploadedFile\n\n\nclass UploadedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UploadedFile\n fields = [\n \"id\",\n \"uuid\",\n \"file_name\",\n \"local_filepath\",\n \"storage_unit\",\n \"size\",\n \"content_type\",\n \"uri\",\n ]\n", "repo_name": "jiro141/patolsima-free-api", "sub_path": "patolsima_api/apps/uploaded_file_management/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 395, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 5, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 5, "usage_type": "name"}, {"api_name": "models.UploadedFile", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "37103975597", "text": "import subprocess\nimport json\nimport os\nimport sys\nimport re\nimport sqlite3\nimport numpy as np\nfrom datetime import datetime\n\nsys.setrecursionlimit(100000)\n\narg = sys.argv\n# read twitter file\ntotalLine = 3\nlinecounter=0\n\nconn = sqlite3.connect('meme.sqlite');\nconn.text_factory = str\n\nc = conn.cursor()\n\ntry:\n\tc.execute(\"CREATE TABLE observation_nodes (id integer,node text)\")\nexcept BaseException as e:\n\tprint(e)\n\t# read url\n\ndef insertObservationNodes(node):\n\tparams = []\n\tparams.append(node['id'])\n\tparams.append(node['node'])\n\tc = conn.cursor()\n\tc.execute(\"INSERT INTO observation_nodes VALUES (?,?)\",params)\n\n\n# load url vocabulary\nurlVocabs = c.execute(\"SELECT b.id,a.date,a.url from clusterurl a,observation_nodes b where b.node=a.domain\")\nvocabs = {}\nfor urlvocab in urlVocabs:\n\tif urlvocab[2] not in vocabs:\n\t\tvocabs[urlvocab[2]] = {'node': urlvocab[0],'date':urlvocab[1]}\n\telse:\n\t\tif urlvocab[1] < vocabs[urlvocab[2]]['date']:\n\t\t\tvocabs[urlvocab[2]] = {'node': urlvocab[0],'date':urlvocab[1]}\t\n\n# load nodes id\nnodesId = {}\nnodeVocab = c.execute(\"SELECT id, node from observation_nodes\")\nfor myNode in nodeVocab:\n\tif myNode[1] not in nodesId:\n\t\tnodesId[myNode[1]] = myNode[0]\n\t\twith open('nodes-file.txt','a') as nodesFile:\n\t\t\tnodesFile.write('{},{}\\n'.format(myNode[0],myNode[1]))\n\n\n# write nodes\n# following nodes\n# row[0] follow row[1]\n\"\"\"\nedges = c.execute(\"SELECT distinct (select b.id from observation_nodes b where a.domaina=b.node) as nodea,(select b.id from observation_nodes b where a.domainb=b.node) as nodeb from observation_cascades a\")\nnodes = {}\nfor edge in edges:\n\twith open('nodes-file.txt','a') as nodesFile:\n\t\tif edge[0] not in nodes:\n\t\t\tnodes[edge[0]] = 1\n\t\t\tnodesFile.write('{}\\n'.format(edge[0]))\n\t\tif edge[1] not in nodes:\n\t\t\tnodes[edge[1]] = 1\n\t\t\tnodesFile.write('{}\\n'.format(edge[1]))\n\n\twith open('edges-file.txt','a') as edgesFile:\n\t\tedgesFile.write(json.dumps([edge[0],edge[1]])+'\\n')\n\"\"\"\n# make edges from 3hops data\ncasRows = c.execute(\"SELECT cascade from edge_hops where count=2\")\nnodes = {}\nfor casRow in casRows:\n\tmyCases = json.loads(casRow[0])\n\t#print(myCas)\n\tfor myCas in myCases:\n\t\tedge = [myCas['cascades'][1][1],myCas['cascades'][1][0]]\n\n\t\t\"\"\"\n\t\twith open('nodes-file.txt','a') as nodesFile:\n\t\t\tif edge[0] not in nodes:\n\t\t\t\tnodes[edge[0]] = 1\n\t\t\t\tnodesFile.write('{}\\n'.format(edge[0]))\n\t\t\tif edge[1] not in nodes:\n\t\t\t\tnodes[edge[1]] = 1\n\t\t\t\tnodesFile.write('{}\\n'.format(edge[1]))\n\t\t\"\"\"\t\t\t\n\n\t\twith open('edges-file.txt','a') as edgesFile:\n\t\t\tedgesFile.write(json.dumps([nodesId[edge[0]],nodesId[edge[1]]])+'\\n')\n\n# get cascades from observation cascades\nurlbRows = c.execute(\"SELECT distinct urlb from observation_cascades\")\n\ncascades = {}\n\ncascadeCount = 0\n# maximum time for scaling to 1\nmaxTime = 0\narrTime = []\n# use mean time for define recuring cascades 5751721\n# about 3 month, otherwise we treat it as recurring matrix\nmeanTime = 5751721\nfor urlb in urlbRows:\n\tc1 = conn.cursor()\t\n\tfinishTrace = False\n\tmyArr = []\n\t# to track recurrence, don't look back\n\tdomainHist = []\n\ti = 0\n\tlength = 0\n\n\t# get vocab\n\t\"\"\"\n\tif urlb[0] in vocabs:\n\t\tmyvocab = vocabs[urlb[0]]\n\t\tmyDate = datetime.strptime(myvocab['date'],'%Y-%m-%d %H:%M:%S').timestamp()\n\t\tstartDate = myDate\n\t\tcascadeTime = myDate - startDate\n\t\tmyArr.append({'node': myvocab['node'],'time': cascadeTime,'date': myvocab['date'],'text': ''})\n\t\ti = 1\n\telse:\n\t\tprint('not found {}'.format(urlb))\n\t\"\"\"\n\n\t#while not finishTrace:\n\t#\tprint('length: {}'.format(length))\n\tcascadeRows = c1.execute('SELECT distinct (select b.id from observation_nodes b where a.domaina=b.node) as nodeid, date,memetext from observation_cascades a where urlb=? order by date asc',[urlb[0]])\t\n\t#\tj = 0\n\tfor cascade in cascadeRows:\n\t\t# skip if j < length\n\t\t#if j meanTime or cascade[0] in domainHist:\n\t\t\tif i>1 :\n\t\t\t\t#if maxTime < myArr[len(myArr)-1]['time']:\n\t\t\t\t#\tmaxTime = myArr[len(myArr)-1]['time']\n\t\t\t\tarrTime.append(myArr[len(myArr)-1]['time'])\n\t\t\t\tcascades[cascadeCount] = {'casid': cascadeCount,'url': urlb[0],'cas': myArr,'cascount': i}\n\t\t\t\tcascadeCount+=1\t\t\n\t\t\t# will the last item eligible to be the first one for reccurence\n\t\t\tfirstCas = myArr[len(myArr)-1].copy();\n\t\t\tstartDate = datetime.strptime(firstCas['date'],'%Y-%m-%d %H:%M:%S').timestamp()\n\t\t\t# reset count\n\t\t\tmyArr = []\n\t\t\tdomainHist = []\n\t\t\ti = 0\n\t\t\t# if because of domain reccurence\n\t\t\tif cascade[0] in domainHist or cascade[0]==firstCas['node'] or (myDate-startDate) > meanTime:\n\t\t\t\tstartDate = myDate\n\t\t\t\tcascadeTime = myDate - startDate\n\t\t\t\tmyArr.append({'node': cascade[0],'time': cascadeTime,'date': cascade[1],'text': cascade[2]})\n\t\t\t\tdomainHist.append(cascade[0])\n\t\t\t\ti+=1\n\t\t\telse:\n\t\t\t# because of average time\n\t\t\t\t# add 2 latest cascades\n\t\t\t\tstartDate = datetime.strptime(firstCas['date'],'%Y-%m-%d %H:%M:%S').timestamp()\n\t\t\t\tfirstCas['time']=0\n\t\t\t\tmyArr.append(firstCas)\n\t\t\t\tdomainHist.append(firstCas['node'])\n\t\t\t\ti+=1\n\t\t\t\tcascadeTime = myDate - startDate\n\t\t\t\tmyArr.append({'node': cascade[0],'time': cascadeTime,'date': cascade[1],'text': cascade[2]})\n\t\t\t\tdomainHist.append(cascade[0])\t\t\t\t\t\n\t\t\t\ti+=1\n\n\t\t\tprint('{} {} {}'.format(urlb[0],i,cascadeTime))\t\t\t\t\n\t\t\tprint('reach meanTime, break {}'.format(length))\n\t\t\t#continue\n\t\telse:\n\t\t\tmyArr.append({'node': cascade[0],'time': cascadeTime,'date': cascade[1],'text': cascade[2]})\n\t\t\tprint('{} {} {}'.format(urlb[0],i,cascadeTime))\n\n\t\t\t# tracking reccurence, don't look back\n\t\t\tdomainHist.append(cascade[0])\n\t\t\ti+=1\n\n\t\t#finishTrace = True\n\tif i>1 :\n\t\t#if maxTime < myArr[len(myArr)-1]['time']:\n\t\t#\tmaxTime = myArr[len(myArr)-1]['time']\n\t\t#meanTime+=myArr[len(myArr)-1]['time']\n\t\tarrTime.append(myArr[len(myArr)-1]['time'])\n\t\tcascades[cascadeCount] = {'casid': cascadeCount,'url': urlb[0],'cas': myArr,'cascount': i}\n#\t\twith open('cascade-file.txt','a') as casfile:\n#\t\t\tcasfile.write(json.dumps({'casid': cascadeCount,'url': urlb[0],'cas': myArr,'cascount': i})+'\\n')\t\n\t\tcascadeCount+=1\nnpTime = np.array(arrTime)\n# statistics\n# max: 23535667.0, mean: 5751721.358078603, med: 998713.0, sd: 7132129.177352756\n\n# from the mean\n#max: 5747131.0, mean: 642810.7051022825, med: 79388.0, sd: 1230498.8523700857\n\n\nprint('max: {}, mean: {}, med: {}, sd: {}'.format(np.max(npTime),np.mean(npTime),np.median(npTime),np.std(npTime)))\nmaxTime = np.max(npTime)\n\n# scale up time\nfor i in range(cascadeCount):\n\tfor mycas in cascades[i]['cas']:\n\t\tmycas['timescale'] = mycas['time'] / maxTime\n\twith open('cascade-file.txt','a') as casfile:\n\t\tcasfile.write(json.dumps(cascades[i])+'\\n')\t\n\n\n\n\n#\t\twith open('cascade-file.txt','a') as casfile:\n#\t\t\tcasfile.write(json.dumps({'casid': cascadeCount,'url': urlb[0],'cas': myArr,'cascount': i})+'\\n')\t\n", "repo_name": "BravoChi/CS511_Project_17Spring", "sub_path": "python-memetracker/meme-write-cascades.py", "file_name": "meme-write-cascades.py", "file_ext": "py", "file_size_in_byte": 6935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.setrecursionlimit", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 158, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 213, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "43800520774", "text": "# _*_ encoding: utf-8__*_\n\"\"\"mtabledjango URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom mtable import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.yxlrIndex),\n path('index/', views.yxlrIndex),\n path('chart/', views.chart),\n path('plot/', views.s_plot),\n path('query/', views.querydata),\n path('question/', views.question),\n path('clubshop/', views.clubshop),\n path('exportExcel/', views.exportexcel),\n path('captcha/', include('captcha.urls')),\n path('exportExcel/download/', views.downExcel),\n path('androidplot/', views.androidplotimage),\n path('androidple/', views.androidpieimage),\n path('img/', views.img),\n]\n", "repo_name": "liyuanjinglyj/mtabledjango", "sub_path": "mtabledjango/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1334, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "mtable.views.yxlrIndex", "line_number": 23, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "mtable.views.yxlrIndex", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "mtable.views.chart", "line_number": 25, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 26, "usage_type": "call"}, {"api_name": "mtable.views.s_plot", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 26, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "mtable.views.querydata", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 27, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "mtable.views.question", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "mtable.views.clubshop", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "mtable.views.exportexcel", "line_number": 30, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 30, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "mtable.views.downExcel", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "mtable.views.androidplotimage", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 33, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "mtable.views.androidpieimage", "line_number": 34, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 34, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "mtable.views.img", "line_number": 35, "usage_type": "attribute"}, {"api_name": "mtable.views", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "32947734518", "text": "import numpy as np\nimport matplotlib.pyplot as plt\ni=1\t\nx=[]\ny=[]\t\t\t\nfor line in open('output.txt', 'r'):\n lines = [i for i in line.split()]\n x.append(i)\n y.append(float(lines[0]))\n i=i+1\n\t\t\t\t\t \nplt.title(\"Variation in CW\")\nplt.xlabel('Update number')\nplt.ylabel('CW value')\nplt.ylim(0, max(max(y)+10,100))\nplt.plot(x, y, c = 'g')\n\t\t\t\t\t \nplt.show()", "repo_name": "rishikavarma/Networks", "sub_path": "Networks/Ass2/Assignment2/plot1.py", "file_name": "plot1.py", "file_ext": "py", "file_size_in_byte": 366, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "26581970750", "text": "\"\"\"Max-Pooling Network-in-Network with BatchNormalization.\"\"\"\n\nfrom keras import initializers\nfrom keras.models import Model\nfrom keras.regularizers import l2\nfrom keras.layers import BatchNormalization\nfrom keras.layers import ZeroPadding2D, MaxPooling2D\nfrom keras.layers import Input, Conv2D, Dropout, LeakyReLU\n\nleakiness = 0.0\nweight_decay = 0.0005\niniter = initializers.he_normal()\n\nbn_params = dict(\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n gamma_initializer='ones',\n )\n\nconv_params = dict(\n use_bias=True,\n padding='valid',\n kernel_initializer=initer,\n kernel_regularizer=l2(weight_decay),\n )\n\n\ndef create_network(input_shape, dropout=0.0):\n data = Input(shape=input_shape) \n \n x = ZeroPadding2D(padding=(2, 2))(data)\n x = Conv2D(192, (5, 5), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(160, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(96, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n if dropout > 0.0: x = Dropout(dropout)(x)\n\n x = ZeroPadding2D(padding=(2, 2))(x)\n x = Conv2D(192, (5, 5), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(192, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(192, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = MaxPooling2D((2, 2), strides=(2, 2))(x)\n if dropout > 0.0: x = Dropout(dropout)(x)\n\n x = ZeroPadding2D(padding=(1, 1))(x)\n x = Conv2D(192, (3, 3), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(192, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n x = Conv2D(192, (1, 1), **conv_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = LeakyReLU(leakiness)(x)\n\n # Return output dimensions 8 x 8 x 192\n net = Model(data, x, name='nin_trunk')\n return net\n\n", "repo_name": "vuptran/sesemi", "sub_path": "networks/nin.py", "file_name": "nin.py", "file_ext": "py", "file_size_in_byte": 2270, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 43, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras.initializers.he_normal", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.initializers", "line_number": 12, "usage_type": "name"}, {"api_name": "keras.regularizers.l2", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 34, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 53, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 70, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "39469335729", "text": "from time import sleep\nfrom keyboard import is_pressed\nimport re;\nfrom tkinter import *\nfrom tkinter import messagebox\nimport pyperclip\nimport pyautogui as pya\n\nfrom Units import *\n\nhotkey = \"ctrl + F8\"\n\ndef copy_clipboard() -> str:\n pyperclip.copy(\"\") # <- This prevents last copy replacing current copy of null.\n pya.hotkey('ctrl', 'c')\n sleep(.01) # ctrl-c is usually very fast but your program may execute faster\n return pyperclip.paste()\n\ndef ShowBox(InitialValue:UnitBase, ConvertedValue:UnitBase):\n root = Tk()\n root.withdraw()\n messagebox.showinfo(\"Unit converted\",f'{str(InitialValue.val.__round__(2)) + InitialValue.unitExt} = {str(ConvertedValue.val.__round__(2)) + ConvertedValue.unitExt}')\n root.destroy()\n\ndef SplitNumStr(st:str):\n res = re.split('([-+]?\\d+\\.\\d+)|([-+]?\\d+)', st.strip())\n return [r.strip() for r in res if r is not None and r.strip() != '']\n\nif __name__ == \"__main__\":\n while True:\n if is_pressed(hotkey):\n selection = SplitNumStr(copy_clipboard())\n InitialValue = detectUnit(selection)\n ConvertedValue = InitialValue.preferedConversion()\n ShowBox(InitialValue, ConvertedValue)\n sleep(.1)", "repo_name": "thaliumFr/EasyUnitConverter", "sub_path": "src/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1212, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyperclip.copy", "line_number": 14, "usage_type": "call"}, {"api_name": "pyautogui.hotkey", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "pyperclip.paste", "line_number": 17, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 22, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 22, "usage_type": "name"}, {"api_name": "re.split", "line_number": 26, "usage_type": "call"}, {"api_name": "keyboard.is_pressed", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "71813789604", "text": "from flask import (\n Blueprint, request, jsonify\n)\nfrom .rdf_interface import BASE_CONNECTOR\n\nbp = Blueprint('keyword', __name__, url_prefix='/keyword')\n\nrdf_connector = BASE_CONNECTOR\n\n\n@bp.get('/languages')\ndef get_languages():\n languages = rdf_connector.get_keyword_languages()\n try:\n return languages, 200\n except:\n return 'Failed to retrieve languages.', 500\n", "repo_name": "daniel-gomm/disco-graph", "sub_path": "code/graph-connector/app/keyword_resource.py", "file_name": "keyword_resource.py", "file_ext": "py", "file_size_in_byte": 390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call"}, {"api_name": "rdf_interface.BASE_CONNECTOR", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "19307852804", "text": "#!/usr/bin/env python\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nPROJECT = 'azure-tre-cli'\nVERSION = '0.1.4'\n\ntry:\n long_description = open('README.md', 'rt').read()\nexcept IOError:\n long_description = ''\n\nsetup(\n name=PROJECT,\n version=VERSION,\n\n description='Experimental TRE CLI for AzureTRE',\n long_description=long_description,\n\n author='Stuart Leeks',\n author_email='stuartle@microsoft.com',\n\n # url='',\n # download_url='',\n\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Intended Audience :: Developers',\n 'Environment :: Console',\n ],\n\n platforms=['Any'],\n\n scripts=[],\n\n provides=[],\n install_requires=[\n \"click==8.1.3\",\n \"httpx~=0.23.1\",\n \"msal==1.20.0\",\n \"jmespath==1.0.1\",\n \"tabulate==0.9.0\",\n \"pygments==2.15.0\",\n \"PyJWT==2.6.0\",\n \"azure-cli-core==2.47.0\",\n \"azure-identity==1.12.0\",\n \"aiohttp==3.8.5\"\n ],\n\n namespace_packages=[],\n packages=find_packages(),\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'tre = tre.main:cli'\n ],\n },\n\n zip_safe=False,\n)\n", "repo_name": "microsoft/AzureTRE", "sub_path": "cli/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 145, "dataset": "github-code", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 14, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "16248513290", "text": "#! /usr/bin/env python\n\"\"\"\nCollect LCA rank statistics across several LCA databases.\n\"\"\"\nimport sys\nimport argparse\nimport collections\nimport lca_json\nimport csv\n\nfrom ncbi_taxdump_utils import want_taxonomy\n\n\ndef summarize_lca_db(taxfoo, hashval_to_lca):\n rank_counts = collections.defaultdict(int)\n\n print('iterating over {} hash vals'.format(len(hashval_to_lca)))\n\n n = 0\n for hashval, lca in hashval_to_lca.items():\n n += 1\n if n and n % 100000 == 0:\n print('... {}'.format(n), end='\\r')\n\n rank = taxfoo.get_taxid_rank(lca)\n\n # pull rank back to next interesting taxonomic rank:\n while rank not in want_taxonomy:\n if lca == 1 or lca is None:\n break\n \n lca = taxfoo.get_taxid_parent(lca)\n rank = taxfoo.get_taxid_rank(lca)\n\n if lca and lca != 1:\n rank_counts[rank] += 1\n\n print('... done! {}'.format(n))\n\n return rank_counts\n\n\ndef main():\n p = argparse.ArgumentParser()\n p.add_argument('lca_filename')\n p.add_argument('-k', '--ksize-list', default=\"31\", type=str)\n p.add_argument('-o', '--output', type=argparse.FileType('wt'))\n args = p.parse_args()\n\n lca_db = lca_json.LCA_Database(args.lca_filename)\n\n ksizes = list(map(int, args.ksize_list.split(',')))\n\n ksize_to_rank_counts = dict()\n \n for ksize in ksizes:\n #assert ksize not in ksize_to_rank_counts\n taxfoo, hashval_to_lca, scaled = lca_db.get_database(ksize, None)\n\n rank_counts = summarize_lca_db(taxfoo, hashval_to_lca)\n ksize_to_rank_counts[ksize] = rank_counts\n\n # this should be enforced by summarize_lca_db(...)\n all_ranks = set()\n for rank_counts in ksize_to_rank_counts.values():\n all_ranks.update(rank_counts.keys())\n\n assert all_ranks - set(want_taxonomy) == set()\n\n if args.output:\n w = csv.writer(args.output)\n else:\n w = csv.writer(sys.stdout)\n\n w.writerow(['rank'] + ksizes)\n for rank in want_taxonomy:\n count_list = [rank]\n for ksize in ksizes:\n rank_counts = ksize_to_rank_counts[ksize]\n count = rank_counts.get(rank, 0)\n count_list.append(str(count))\n\n w.writerow(count_list)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "ctb/2017-sourmash-lca", "sub_path": "summarize-lca-db.py", "file_name": "summarize-lca-db.py", "file_ext": "py", "file_size_in_byte": 2302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "ncbi_taxdump_utils.want_taxonomy", "line_number": 28, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 44, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 47, "usage_type": "call"}, {"api_name": "lca_json.LCA_Database", "line_number": 50, "usage_type": "call"}, {"api_name": "ncbi_taxdump_utils.want_taxonomy", "line_number": 68, "usage_type": "argument"}, {"api_name": "csv.writer", "line_number": 71, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 73, "usage_type": "attribute"}, {"api_name": "ncbi_taxdump_utils.want_taxonomy", "line_number": 76, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "32072077653", "text": "import discord\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport pymongo\r\nfrom motor.motor_asyncio import AsyncIOMotorClient\r\n# https://motor.readthedocs.io/en/stable/tutorial-asyncio.html\r\nfrom dotenv import dotenv_values, load_dotenv\r\nfrom datetime import datetime, timezone \r\n# Define constants\r\n\r\ndb_name = \"commendation\"\r\n\r\nload_dotenv('.env') #VERIFY WHERE TO GET THE LOCAL VARIABLES\r\nsecrets = dotenv_values()\r\n# Create a bot instance\r\nintents = discord.Intents.all()\r\nbot = commands.Bot(command_prefix='!', intents=intents)\r\nmongodb_uri = secrets[\"mongodb_encoded_connection\"]\r\nclient = AsyncIOMotorClient(mongodb_uri)\r\ndb = client[db_name]\r\nusers = db.users\r\nusers_thanks = db.users_thanks\r\nuser_nickname = db.user_nickname\r\n\r\ndef setup(bot):\r\n @bot.command()\r\n\r\n async def leaderboard(ctx):\r\n top_users = await get_top_users(5) # Change 5 to the number of top users you want to display\r\n\r\n leaderboard_message = \"Leaderboard:\\n\"\r\n for index, user in enumerate(top_users, start=1):\r\n leaderboard_message += f\"{index}. {user['nickname']} - {user['total_thanks']} thanks\\n\"\r\n\r\n await ctx.send(leaderboard_message)\r\n\r\n async def scorecard(ctx, user: discord.Member = None):\r\n if user is None:\r\n user = ctx.author\r\n\r\n user_data = await get_user_data(user)\r\n \r\n if user_data:\r\n total_thanks = user_data.get(\"total_thanks\", 0)\r\n await ctx.send(f\"{user.display_name} has received {total_thanks} thanks!\")\r\n else:\r\n await ctx.send(\"User data not found.\")\r\n\r\n async def get_user_data(user):\r\n user_data = await users.find_one({'user': user.id})\r\n return user_data\r\n \r\n async def get_top_users(limit):\r\n cursor = users.find().sort(\"total_thanks\", pymongo.DESCENDING).limit(limit)\r\n top_users = await cursor.to_list(length=limit)\r\n return top_users\r\n", "repo_name": "kirkwillrule/commendation_bot", "sub_path": "Scripts/scorecard.py", "file_name": "scorecard.py", "file_ext": "py", "file_size_in_byte": 1930, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 13, "usage_type": "call"}, {"api_name": "dotenv.dotenv_values", "line_number": 14, "usage_type": "call"}, {"api_name": "discord.Intents.all", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 16, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 17, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 17, "usage_type": "name"}, {"api_name": "motor.motor_asyncio.AsyncIOMotorClient", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pymongo.DESCENDING", "line_number": 54, "usage_type": "attribute"}]} +{"seq_id": "41468422498", "text": "import os\nimport sys\nimport re\nimport logging\nimport traceback\nfrom typing import Optional\n\nfrom PySide6 import QtWidgets, QtCore\nfrom PySide6 import QtPrintSupport\n\nimport sas.qtgui.Utilities.GuiUtils as GuiUtils\nimport sas.qtgui.Utilities.ObjectLibrary as ObjectLibrary\n\nfrom sas.qtgui.Utilities.Reports.UI.ReportDialogUI import Ui_ReportDialogUI\nfrom sas.qtgui.Utilities.Reports.reportdata import ReportData\n\n\nclass ReportDialog(QtWidgets.QDialog, Ui_ReportDialogUI):\n \"\"\"\n Class for stateless grid-like printout of model parameters for mutiple models\n \"\"\"\n def __init__(self, report_data: ReportData, parent: Optional[QtCore.QObject]=None):\n\n super().__init__(parent)\n self.setupUi(self)\n # disable the context help icon\n self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)\n\n self.report_data = report_data\n\n #self.save_location = None\n #if 'ReportDialog_directory' in ObjectLibrary.listObjects():\n self.save_location = ObjectLibrary.getObject('ReportDialog_directory')\n\n # Fill in the table from input data\n self.setupDialog(self.report_data.html)\n\n # Command buttons\n self.cmdPrint.clicked.connect(self.onPrint)\n self.cmdSave.clicked.connect(self.onSave)\n\n def setupDialog(self, output=None):\n \"\"\"\n Display the HTML content in the browser.\n \"\"\"\n if output is not None:\n self.txtBrowser.setHtml(output)\n\n def onPrint(self):\n \"\"\"\n Display the print dialog and send the report to printer\n \"\"\"\n # Define the printer\n printer = QtPrintSupport.QPrinter()\n\n # Display the print dialog\n dialog = QtPrintSupport.QPrintDialog(printer)\n dialog.setModal(True)\n dialog.setWindowTitle(\"Print\")\n if dialog.exec_() != QtWidgets.QDialog.Accepted:\n return\n\n document = self.txtBrowser.document()\n try:\n # pylint chokes on this line with syntax-error\n # pylint: disable=syntax-error doesn't seem to help\n document.print(printer)\n except Exception as ex:\n # Printing can return various exceptions, let's catch them all\n logging.error(\"Print report failed with: \" + str(ex))\n\n def onSave(self):\n \"\"\"\n Display the Save As... prompt and save the report if instructed so\n \"\"\"\n # Choose user's home directory\n if self.save_location is None:\n location = os.path.expanduser('~')\n else:\n location = self.save_location\n # Use a sensible filename default\n default_name = os.path.join(str(location), 'report.pdf')\n\n parent = self\n caption = 'Save Project'\n filter = 'PDF file (*.pdf);;HTML file (*.html);;Text file (*.txt)'\n options = QtWidgets.QFileDialog.DontUseNativeDialog\n directory = default_name\n filename_tuple = QtWidgets.QFileDialog.getSaveFileName(parent, caption, directory, filter, \"\", options)\n filename = filename_tuple[0]\n if not filename:\n return\n extension = filename_tuple[1]\n self.save_location = os.path.dirname(filename)\n # lifetime of this widget is short - keep the reference elsewhere\n ObjectLibrary.addObject('ReportDialog_directory', self.save_location)\n\n try:\n # extract extension from filter\n # e.g. \"PDF file (*.pdf)\" -> \".pdf\"\n ext = extension[extension.find(\"(\")+2:extension.find(\")\")]\n except IndexError as ex:\n # (ext) not found...\n logging.error(\"Error while saving report. \" + str(ex))\n return\n basename, extension = os.path.splitext(filename)\n if not extension:\n filename = '.'.join((filename, ext))\n\n if ext.lower() == \".txt\":\n self.write_string(self.report_data.text, filename)\n\n elif ext.lower() == \".html\":\n self.write_string(self.report_data.html, filename)\n\n elif ext.lower() == \".pdf\":\n html_utf = GuiUtils.replaceHTMLwithUTF8(self.report_data.html)\n self.save_pdf(html_utf, filename)\n\n else:\n logging.error(f\"Unknown file extension: {ext.lower()}\")\n\n\n\n @staticmethod\n def write_string(string, filename):\n \"\"\"\n Write string to file\n \"\"\"\n with open(filename, 'wb') as f:\n # weird unit symbols need to be saved as UTF-8\n f.write(bytes(string, 'utf-8'))\n\n @staticmethod\n def save_pdf(data, filename):\n \"\"\"\n Create a PDF file from html source string.\n Returns True is the file creation was successful.\n : data: html string\n : filename: name of file to be saved\n \"\"\"\n # import moved from top due to cost\n from xhtml2pdf import pisa\n try:\n # open output file for writing (truncated binary)\n with open(filename, \"w+b\") as resultFile:\n # convert HTML to PDF\n pisaStatus = pisa.CreatePDF(data.encode(\"UTF-8\"),\n dest=resultFile,\n encoding='UTF-8')\n return pisaStatus.err\n\n except Exception as ex:\n # logging.error(\"Error creating pdf: \" + str(ex))\n logging.error(\"Error creating pdf: \" + traceback.format_exc())\n return False\n\n\n", "repo_name": "SasView/sasview", "sub_path": "src/sas/qtgui/Utilities/Reports/ReportDialog.py", "file_name": "ReportDialog.py", "file_ext": "py", "file_size_in_byte": 5449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PySide6.QtWidgets.QDialog", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "sas.qtgui.Utilities.Reports.UI.ReportDialogUI.Ui_ReportDialogUI", "line_number": 18, "usage_type": "name"}, {"api_name": "sas.qtgui.Utilities.Reports.reportdata.ReportData", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 22, "usage_type": "name"}, {"api_name": "PySide6.QtCore.QObject", "line_number": 22, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 22, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 27, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 27, "usage_type": "name"}, {"api_name": "sas.qtgui.Utilities.ObjectLibrary.getObject", "line_number": 33, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.ObjectLibrary", "line_number": 33, "usage_type": "name"}, {"api_name": "PySide6.QtPrintSupport.QPrinter", "line_number": 54, "usage_type": "call"}, {"api_name": "PySide6.QtPrintSupport", "line_number": 54, "usage_type": "name"}, {"api_name": "PySide6.QtPrintSupport.QPrintDialog", "line_number": 57, "usage_type": "call"}, {"api_name": "PySide6.QtPrintSupport", "line_number": 57, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QDialog", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 60, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 87, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 87, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFileDialog.getSaveFileName", "line_number": 89, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 89, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 89, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sas.qtgui.Utilities.ObjectLibrary.addObject", "line_number": 96, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.ObjectLibrary", "line_number": 96, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "sas.qtgui.Utilities.GuiUtils.replaceHTMLwithUTF8", "line_number": 117, "usage_type": "call"}, {"api_name": "sas.qtgui.Utilities.GuiUtils", "line_number": 117, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 121, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa.CreatePDF", "line_number": 148, "usage_type": "call"}, {"api_name": "xhtml2pdf.pisa", "line_number": 148, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 155, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "7528872817", "text": "import logging\nimport sys\nimport zipfile\nimport zlib\nimport tarfile\nimport os\nimport time\n\nfrom celery import Celery\nfrom werkzeug.utils import secure_filename\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom file_converter import FileConverter\nfrom modelos import File\nimport config\nfrom google.cloud import pubsub_v1\nfrom google.oauth2 import service_account\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\ncelery_app = Celery(__name__, broker=config.REDIS_URI)\n\n# Configure SQLAlchemy to use the PostgreSQL database\nengine = create_engine(config.POSTGRES_URI)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# Path to your service account key file\nservice_account_key_path = './google-json/uniandes-grupo-10-9a07a80edaf8.json'\n\n# Load the credentials from the JSON key file\ncredentials = service_account.Credentials.from_service_account_file(service_account_key_path)\n\n# Set the credentials on the Pub/Sub subscriber client\nsubscriber = pubsub_v1.SubscriberClient(credentials=credentials)\n\nsubscription_path = subscriber.subscription_path(\n config.GOOGLE_PUBSUB_PROJECT_ID, config.GOOGLE_PUBSUB_SUBSCRIPTION_ID\n)\n\n\ndef callback(message):\n payload = message.data.decode()\n file_id = message.attributes.get('file_id')\n filename = message.attributes.get('filename')\n new_format = message.attributes.get('new_format')\n fecha = message.attributes.get('fecha')\n\n print(\"Received message:\")\n print(\"Payload:\", payload)\n print(\"File ID:\", file_id)\n print(\"Filename:\", filename)\n print(\"New Format:\", new_format)\n print(\"Fecha:\", fecha)\n\n # process_file_task.delay(file_id, filename, new_format, fecha)\n\n message.ack()\n\n\n@celery_app.task(name='process_file')\ndef process_file_task(file_id, filename, new_format, fecha):\n UPLOAD_FOLDER = '/tmp/uploads' if config.USING_APP_ENGINE else './uploads'\n PROCESS_FOLDER = '/tmp/processed' if config.USING_APP_ENGINE else './processed'\n filenameParts = filename.split('.')\n\n # https://cloud.google.com/appengine/docs/standard/using-temp-files?tab=python\n dirlog = '/tmp' if config.USING_APP_ENGINE else os.path.dirname(\n os.path.abspath(__file__))\n log_file_path = os.path.join(dirlog, 'log_conversion.txt')\n with open(log_file_path, 'a+') as file:\n file.write(\n '{} to {} - solicitud de conversion: {}\\n'.format(filename, new_format, fecha))\n\n formats = {\n 'zip': FileConverter.to_zip,\n 'tar_gz': FileConverter.to_tar_gz,\n 'tar_bz2': FileConverter.to_tar_bz2\n }\n\n attempt_counter = 0\n\n file_path = os.path.join(UPLOAD_FOLDER, secure_filename(filename))\n while not os.path.exists(file_path) or attempt_counter == 10:\n attempt_counter += 1\n print(f\"File not found: {file_path}. Waiting 0.5 seconds...\")\n time.sleep(0.5)\n print(f\"File found: {file_path}\")\n\n if not os.path.exists(file_path):\n print(f\"File not found: {file_path}\")\n return\n\n if new_format in formats.keys():\n print(f\"calling {new_format}\")\n func = formats[new_format]\n print(f\"function: {func}\")\n processed_filename = func(file_path, os.path.join(\n PROCESS_FOLDER, filenameParts[0]))\n print(f\"original: {os.path.join(PROCESS_FOLDER, filename)}\")\n print(f\"destination: {processed_filename}\")\n file = session.query(File).filter_by(id=file_id).first()\n processed_filename_parts = processed_filename.split('/')\n file.processed_filename = processed_filename_parts[-1]\n file.state = 'PROCESSED'\n session.add(file)\n session.commit()\n else:\n print(\"invalid format\")\n", "repo_name": "je-guerreroa1-uniandes/grupo-10-nube", "sub_path": "jobs/app_celery.py", "file_name": "app_celery.py", "file_ext": "py", "file_size_in_byte": 3905, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 21, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 22, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 23, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 24, "usage_type": "call"}, {"api_name": "celery.Celery", "line_number": 28, "usage_type": "call"}, {"api_name": "config.REDIS_URI", "line_number": 28, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 31, "usage_type": "call"}, {"api_name": "config.POSTGRES_URI", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 32, "usage_type": "call"}, {"api_name": "google.oauth2.service_account.Credentials.from_service_account_file", "line_number": 39, "usage_type": "call"}, {"api_name": "google.oauth2.service_account.Credentials", "line_number": 39, "usage_type": "attribute"}, {"api_name": "google.oauth2.service_account", "line_number": 39, "usage_type": "name"}, {"api_name": "google.cloud.pubsub_v1.SubscriberClient", "line_number": 42, "usage_type": "call"}, {"api_name": "google.cloud.pubsub_v1", "line_number": 42, "usage_type": "name"}, {"api_name": "config.GOOGLE_PUBSUB_PROJECT_ID", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config.GOOGLE_PUBSUB_SUBSCRIPTION_ID", "line_number": 45, "usage_type": "attribute"}, {"api_name": "config.USING_APP_ENGINE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "config.USING_APP_ENGINE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "config.USING_APP_ENGINE", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "file_converter.FileConverter.to_zip", "line_number": 83, "usage_type": "attribute"}, {"api_name": "file_converter.FileConverter", "line_number": 83, "usage_type": "name"}, {"api_name": "file_converter.FileConverter.to_tar_gz", "line_number": 84, "usage_type": "attribute"}, {"api_name": "file_converter.FileConverter", "line_number": 84, "usage_type": "name"}, {"api_name": "file_converter.FileConverter.to_tar_bz2", "line_number": 85, "usage_type": "attribute"}, {"api_name": "file_converter.FileConverter", "line_number": 85, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "modelos.File", "line_number": 109, "usage_type": "argument"}]} +{"seq_id": "32441315122", "text": "''' ceci est un programme qui lit le fichier de résultat des positions de la projectile\r\n'''\r\n#importation de module numpy et la librairie matplotlib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#ouverture de fichier de position\r\nfichier=open('position.txt',mode='r')\r\n\r\n#lecture de fichier ligne par ligne\r\nlignes=fichier.read().split('\\n')\r\n\r\n#créer 3 listes vides et définir i initiale\r\nx=[]\r\ny=[]\r\nt=[]\r\ni=1\r\n\r\n#un boucle qui ajoute les valeurs de positions et de temps dans les listes vides\r\nwhile i List[np.ndarray]:\n \"\"\"Compute weighted average.\"\"\"\n # Calculate the total number of examples used during training\n num_examples_total = sum([num_examples for _, num_examples in results])\n\n # Create a list of weights, each multiplied by the related number of examples\n weighted_weights = [\n [layer * num_examples for layer in weights] for weights, num_examples in results\n ]\n\n # Compute average weights of each layer\n weights_prime: List[np.ndarray] = [\n reduce(np.add, layer_updates) / num_examples_total\n for layer_updates in zip(*weighted_weights)\n ]\n return weights_prime\n", "repo_name": "sasano8/mystore", "sub_path": "myhdf5/aggregate.py", "file_name": "aggregate.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 7, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 19, "usage_type": "attribute"}]} +{"seq_id": "30595009495", "text": "\"\"\" These tests check basic operation of ide.tasks.archive.do_import_archive \"\"\"\nimport mock\n\nfrom django.core.exceptions import ValidationError\n\nfrom ide.tasks.archive import do_import_archive, InvalidProjectArchiveException\nfrom ide.utils.cloudpebble_test import CloudpebbleTestCase, make_package, make_appinfo, build_bundle, override_settings\nfrom ide.models.project import Project\nfrom utils.fakes import FakeS3\n\n__author__ = 'joe'\n\nfake_s3 = FakeS3()\n\n\n@mock.patch('ide.models.s3file.s3', fake_s3)\nclass TestImportArchive(CloudpebbleTestCase):\n def setUp(self):\n self.login()\n\n @staticmethod\n def make_resource_spec(name='IMAGE_BLAH'):\n return {\n 'resources': {\n 'media': [{\n 'file': 'images/blah.png',\n 'name': name,\n 'type': 'bitmap'\n }]\n }\n }\n\n def test_import_basic_bundle_with_appinfo(self):\n \"\"\" Check that a minimal bundle imports without error \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'appinfo.json': make_appinfo()\n })\n do_import_archive(self.project_id, bundle)\n\n def test_throws_with_invalid_appinfo(self):\n \"\"\" Check that appinfo validation is performed with a few invalid values \"\"\"\n invalid_things = [\n ('projectType', 'invalid'),\n ('sdkVersion', '1'),\n ('versionLabel', '01.0'),\n ]\n for k, v in invalid_things:\n bundle = build_bundle({\n 'src/main.c': '',\n 'appinfo.json': make_appinfo({k: v})\n })\n with self.assertRaises(ValidationError):\n do_import_archive(self.project_id, bundle)\n\n def test_import_basic_bundle_with_npm_manifest(self):\n \"\"\" Check that archives with package.json can be imported \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'package.json': make_package(package_options={'name': 'myproject'})\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(project.app_long_name, 'test')\n self.assertEqual(project.app_short_name, 'myproject')\n\n def test_import_package_with_dependencies(self):\n \"\"\" Check that dependencies in a package.json file are imported into the database \"\"\"\n deps = {\n 'some_package': '3.14.15',\n 'another': 'http://blah.com/package.git',\n }\n bundle = build_bundle({\n 'src/main.c': '',\n 'package.json': make_package(package_options={\n 'dependencies': deps\n })\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n actual_deps = {d.name: d.version for d in project.dependencies.all()}\n self.assertDictEqual(actual_deps, deps)\n\n def test_import_package_with_keywords(self):\n \"\"\" Check that keywords in a package.json file are imported into the database \"\"\"\n keywords = ['pebbles', 'watch', 'bunnies']\n bundle = build_bundle({\n 'src/main.c': '',\n 'package.json': make_package(package_options={\n 'keywords': keywords\n })\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(set(keywords), set(project.keywords))\n\n def test_import_appinfo_with_resources(self):\n \"\"\" Check that a resource can be imported in an appinfo.json project \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'resources/images/blah.png': 'contents!',\n 'appinfo.json': make_appinfo(options=self.make_resource_spec())\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')\n\n def test_import_package_with_resources(self):\n \"\"\" Check that a resource can be imported in an package.json project \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'resources/images/blah.png': 'contents!',\n 'package.json': make_package(pebble_options=self.make_resource_spec())\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(project.resources.get().variants.get().get_contents(), 'contents!')\n\n def test_throws_with_local_file_dependencies(self):\n \"\"\" Throw if any dependencies reference local files \"\"\"\n bad_versions = [\n 'file:security/breach',\n '/security/breach',\n './security/breach',\n '../security/breach',\n '~/security/breach'\n ]\n for version in bad_versions:\n bundle = build_bundle({\n 'src/main.c': '',\n 'package.json': make_package(package_options={\n 'dependencies': {'some_package': version}\n })\n })\n with self.assertRaises(ValidationError):\n do_import_archive(self.project_id, bundle)\n\n def test_throws_if_sdk2_project_has_array_appkeys(self):\n \"\"\" Throw when trying to import an sdk 2 project with array appkeys \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'appinfo.json': make_appinfo(options={'appKeys': [], 'sdkVersion': '2'})\n })\n with self.assertRaises(ValidationError):\n do_import_archive(self.project_id, bundle)\n\n def test_invalid_resource_id(self):\n \"\"\" Check that invalid characters are banned from resource IDs \"\"\"\n bundle = build_bundle({\n 'src/main.c': '',\n 'resources/images/blah.png': 'contents!',\n 'package.json': make_package(pebble_options=self.make_resource_spec(\"<>\"))\n })\n\n with self.assertRaises(ValidationError):\n do_import_archive(self.project_id, bundle)\n\n def test_import_json_file(self):\n \"\"\" Check that json files are correctly imported \"\"\"\n bundle = build_bundle({\n 'src/js/test.json': '{}',\n 'src/main.c': '',\n 'package.json': make_package()\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(project.source_files.filter(file_name='test.json').count(), 1)\n\n def test_import_rocky(self):\n \"\"\" Check that json files are correctly imported \"\"\"\n bundle = build_bundle({\n 'src/rocky/index.js': '',\n 'src/common/lib.js': '',\n 'src/pkjs/app.js': '',\n 'package.json': make_package(pebble_options={'projectType': 'rocky'})\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertEqual(project.source_files.filter(file_name='index.js', target='app').count(), 1)\n self.assertEqual(project.source_files.filter(file_name='lib.js', target='common').count(), 1)\n self.assertEqual(project.source_files.filter(file_name='app.js', target='pkjs').count(), 1)\n\n\n@mock.patch('ide.models.s3file.s3', fake_s3)\nclass TestImportLibrary(CloudpebbleTestCase):\n def setUp(self):\n self.login(type='package')\n\n def test_import_basic_library(self):\n \"\"\" Try importing a basic library \"\"\"\n bundle = build_bundle({\n 'include/my-lib.h': '',\n 'package.json': make_package(pebble_options={'projectType': 'package'}),\n 'src/c/my-lib.c': '',\n 'src/c/my-priv.h': '',\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n files = {f.file_name: f for f in project.source_files.all()}\n self.assertSetEqual(set(files.keys()), {'my-lib.h', 'my-lib.c', 'my-priv.h'})\n self.assertEqual(files['my-lib.h'].target, 'public')\n self.assertEqual(files['my-lib.c'].target, 'app')\n self.assertEqual(files['my-priv.h'].target, 'app')\n\n def test_import_library_with_resources(self):\n \"\"\" Try importing a basic library with resources \"\"\"\n bundle = build_bundle({\n 'package.json': make_package(pebble_options={\n 'projectType': 'package',\n 'resources': {'media': [{\n 'type': 'bitmap',\n 'name': 'MY_RES1',\n 'file': 'res1.png'\n }, {\n 'type': 'bitmap',\n 'name': 'MY_RES2',\n 'file': 'res2.png'\n }]}\n }),\n 'src/resources/res1.png': '',\n 'src/resources/res2.png': '',\n })\n do_import_archive(self.project_id, bundle)\n project = Project.objects.get(pk=self.project_id)\n self.assertSetEqual({f.file_name for f in project.resources.all()}, {'res1.png', 'res2.png'})\n", "repo_name": "pebble/cloudpebble", "sub_path": "ide/tests/test_import_archive.py", "file_name": "test_import_archive.py", "file_ext": "py", "file_size_in_byte": 9094, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 210, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.fakes.FakeS3", "line_number": 13, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.CloudpebbleTestCase", "line_number": 17, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 35, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_appinfo", "line_number": 37, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 39, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 49, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_appinfo", "line_number": 51, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 53, "usage_type": "argument"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 54, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 58, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 60, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 62, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 63, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 73, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 75, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 79, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 80, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 80, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 87, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 89, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 93, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 94, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 94, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 99, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_appinfo", "line_number": 102, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 104, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 105, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 105, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 110, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 113, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 115, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 116, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 116, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 116, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 129, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 131, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 135, "usage_type": "argument"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 136, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 140, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_appinfo", "line_number": 142, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 144, "usage_type": "argument"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 145, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 149, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 152, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 155, "usage_type": "argument"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 156, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 160, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 163, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 165, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 166, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 166, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 166, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 171, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 175, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 177, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 178, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 178, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 178, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 16, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.CloudpebbleTestCase", "line_number": 185, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 191, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 193, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 197, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 198, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 198, "usage_type": "name"}, {"api_name": "ide.utils.cloudpebble_test.build_bundle", "line_number": 207, "usage_type": "call"}, {"api_name": "ide.utils.cloudpebble_test.make_package", "line_number": 208, "usage_type": "call"}, {"api_name": "ide.tasks.archive.do_import_archive", "line_number": 223, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects.get", "line_number": 224, "usage_type": "call"}, {"api_name": "ide.models.project.Project.objects", "line_number": 224, "usage_type": "attribute"}, {"api_name": "ide.models.project.Project", "line_number": 224, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "12648935341", "text": "import chaos_pb2\nimport gtfs_realtime_pb2\nimport datetime\n\n\ndef get_pos_time(sql_time):\n if sql_time:\n return int((sql_time - datetime.datetime(1970, 1, 1)).total_seconds())\n return 0\n\n\ndef get_pt_object_type(type):\n collection = {\n \"network\": chaos_pb2.PtObject.network,\n \"stop_area\": chaos_pb2.PtObject.stop_area,\n \"line\": chaos_pb2.PtObject.line,\n \"line_section\": chaos_pb2.PtObject.line_section,\n \"route\": chaos_pb2.PtObject.route,\n \"stop_point\": chaos_pb2.PtObject.stop_point\n }\n if type in collection:\n return collection[type]\n return chaos_pb2.PtObject.unkown_type\n\n\ndef get_channel_type(type):\n try:\n return chaos_pb2.Channel.Type.Value(type)\n except ValueError:\n return chaos_pb2.Channel.unkown_type\n\n\ndef created_upated_at(src, dest):\n dest.created_at = get_pos_time(src.created_at)\n if src.updated_at:\n dest.updated_at = get_pos_time(src.updated_at)\n\n\ndef get_severity_effect_value(effect):\n available_effects = {\n 'no_service': gtfs_realtime_pb2.Alert.NO_SERVICE,\n 'reduced_service': gtfs_realtime_pb2.Alert.REDUCED_SERVICE,\n 'significant_delays': gtfs_realtime_pb2.Alert.SIGNIFICANT_DELAYS,\n 'detour': gtfs_realtime_pb2.Alert.DETOUR,\n 'additional_service': gtfs_realtime_pb2.Alert.ADDITIONAL_SERVICE,\n 'modified_service': gtfs_realtime_pb2.Alert.MODIFIED_SERVICE,\n 'other_effect': gtfs_realtime_pb2.Alert.OTHER_EFFECT,\n 'unknown_effect': gtfs_realtime_pb2.Alert.UNKNOWN_EFFECT,\n 'stop_moved': gtfs_realtime_pb2.Alert.STOP_MOVED\n }\n if effect in available_effects.keys():\n return available_effects[effect]\n\n return available_effects['unknown_effect']\n\n\ndef populate_severity(impact_pb, severity):\n impact_pb.severity.id = severity.id\n impact_pb.severity.wording = severity.wording\n if severity.color:\n impact_pb.severity.color = severity.color\n\n impact_pb.severity.effect = get_severity_effect_value(severity.effect)\n if severity.priority:\n impact_pb.severity.priority = severity.priority\n\n\ndef populate_application_periods(impact, impact_pb):\n for application_period in impact.application_periods:\n application_period_pb = impact_pb.application_periods.add()\n application_period_pb.start = get_pos_time(application_period.start_date)\n if application_period.end_date:\n application_period_pb.end = get_pos_time(application_period.end_date)\n\n\ndef populate_channel_type(channel, channel_pb):\n if channel.channel_types:\n for type in channel.channel_types:\n channel_pb.types.append(get_channel_type(type.name))\n\n\ndef populate_channel(channel_pb, channel):\n channel_pb.id = channel.id\n channel_pb.name = channel.name\n channel_pb.content_type = channel.content_type\n channel_pb.max_size = long(channel.max_size)\n created_upated_at(channel, channel_pb)\n populate_channel_type(channel, channel_pb)\n\n\ndef populate_messages(impact, impact_pb):\n for message in impact.messages:\n message_pb = impact_pb.messages.add()\n message_pb.text = message.text\n created_upated_at(message, message_pb)\n populate_channel(message_pb.channel, message.channel)\n for meta in message.meta:\n meta_pb = message_pb.meta.add()\n meta_pb.key = meta.key\n meta_pb.value = meta.value\n\n\ndef populate_informed_entitie(pt_object, informed_entitie):\n informed_entitie.pt_object_type = get_pt_object_type(pt_object.type)\n informed_entitie.uri = pt_object.uri\n created_upated_at(pt_object, informed_entitie)\n\n\ndef populate_pt_objects(impact, impact_pb):\n for pt_object in impact.objects:\n informed_entitie = impact_pb.informed_entities.add()\n populate_informed_entitie(pt_object, informed_entitie)\n if pt_object.type == 'line_section':\n if hasattr(pt_object.line_section, 'sens'):\n if pt_object.line_section.sens:\n informed_entitie.pt_line_section.sens = long(pt_object.line_section.sens)\n populate_informed_entitie(pt_object.line_section.line, informed_entitie.pt_line_section.line)\n populate_informed_entitie(pt_object.line_section.start_point, informed_entitie.pt_line_section.start_point)\n populate_informed_entitie(pt_object.line_section.end_point, informed_entitie.pt_line_section.end_point)\n if hasattr(pt_object.line_section, 'routes'):\n for route in pt_object.line_section.routes:\n route_pb = informed_entitie.pt_line_section.routes.add()\n populate_informed_entitie(route, route_pb)\n if hasattr(pt_object.line_section, 'via'):\n for via in pt_object.line_section.via:\n via_pb = informed_entitie.pt_line_section.via.add()\n populate_informed_entitie(via, via_pb)\n\n\ndef populate_impact(disruption, disruption_pb):\n for impact in disruption.impacts:\n if impact.status == \"published\":\n impact_pb = disruption_pb.impacts.add()\n impact_pb.id = impact.id\n if hasattr(impact, 'send_notifications') and impact.send_notifications:\n impact_pb.send_notifications = impact.send_notifications\n if hasattr(impact, 'notification_date') and impact.notification_date:\n impact_pb.notification_date = get_pos_time(impact.notification_date)\n created_upated_at(impact, impact_pb)\n populate_severity(impact_pb, impact.severity)\n populate_application_periods(impact, impact_pb)\n populate_messages(impact, impact_pb)\n populate_pt_objects(impact, impact_pb)\n\n\ndef populate_localization(disruption, disruption_pb):\n if hasattr(disruption, 'localizations'):\n if disruption.localizations:\n for localization in disruption.localizations:\n populate_informed_entitie(localization, disruption_pb.localization.add())\n\n\ndef populate_tag(disruption, disruption_pb):\n if disruption.tags:\n for tag in disruption.tags:\n tag_pb = disruption_pb.tags.add()\n tag_pb.id = tag.id\n tag_pb.name = tag.name\n created_upated_at(tag, tag_pb)\n\n\ndef populate_category(category, category_pb):\n category_pb.id = category.id\n category_pb.name = category.name\n\n\ndef populate_cause(cause, cause_pb):\n cause_pb.id = cause.id\n cause_pb.wording = cause.wording\n for wording in cause.wordings:\n wording_pb = cause_pb.wordings.add()\n wording_pb.key = wording.key\n wording_pb.value = wording.value\n if cause.category:\n populate_category(cause.category, cause_pb.category)\n\n\ndef populate_property(disruption, disruption_pb):\n if disruption.properties:\n for prop in disruption.properties:\n d_property = disruption_pb.properties.add()\n d_property.key = prop.property.key\n d_property.type = prop.property.type\n d_property.value = prop.value\n\n\ndef populate_disruption(disruption, disruption_pb):\n disruption_pb.id = disruption.id\n disruption_pb.reference = disruption.reference\n if disruption.contributor and disruption.contributor.contributor_code:\n disruption_pb.contributor = disruption.contributor.contributor_code\n\n if disruption.note:\n disruption_pb.note = disruption.note\n created_upated_at(disruption, disruption_pb)\n if disruption.start_publication_date:\n disruption_pb.publication_period.start = get_pos_time(disruption.start_publication_date)\n if disruption.end_publication_date:\n disruption_pb.publication_period.end = get_pos_time(disruption.end_publication_date)\n\n populate_cause(disruption.cause, disruption_pb.cause)\n populate_localization(disruption, disruption_pb)\n populate_tag(disruption, disruption_pb)\n populate_impact(disruption, disruption_pb)\n populate_property(disruption, disruption_pb)\n\n\ndef populate_pb(disruption):\n feed_message = gtfs_realtime_pb2.FeedMessage()\n feed_message.header.gtfs_realtime_version = '1.0'\n feed_message.header.incrementality = gtfs_realtime_pb2.FeedHeader.DIFFERENTIAL\n feed_message.header.timestamp = get_pos_time(datetime.datetime.utcnow())\n\n feed_entity = feed_message.entity.add()\n feed_entity.id = disruption.id\n feed_entity.is_deleted = (disruption.status == \"archived\")\n\n if not feed_entity.is_deleted:\n disruption_pb = feed_entity.Extensions[chaos_pb2.disruption]\n populate_disruption(disruption, disruption_pb)\n return feed_message\n", "repo_name": "dvdn/Chaos", "sub_path": "chaos/populate_pb.py", "file_name": "populate_pb.py", "file_ext": "py", "file_size_in_byte": 8635, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "chaos_pb2.PtObject", "line_number": 14, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 15, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 16, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 17, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 18, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 19, "usage_type": "attribute"}, {"api_name": "chaos_pb2.PtObject", "line_number": 23, "usage_type": "attribute"}, {"api_name": "chaos_pb2.Channel.Type.Value", "line_number": 28, "usage_type": "call"}, {"api_name": "chaos_pb2.Channel", "line_number": 28, "usage_type": "attribute"}, {"api_name": "chaos_pb2.Channel", "line_number": 30, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 41, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 42, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 46, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 47, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 48, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.Alert", "line_number": 49, "usage_type": "attribute"}, {"api_name": "gtfs_realtime_pb2.FeedMessage", "line_number": 209, "usage_type": "call"}, {"api_name": "gtfs_realtime_pb2.FeedHeader", "line_number": 211, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 212, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 212, "usage_type": "attribute"}, {"api_name": "chaos_pb2.disruption", "line_number": 219, "usage_type": "attribute"}]} +{"seq_id": "41757702586", "text": "import http.client\nimport http.server\nimport time\n\nimport requests\n\nimport json\ndef send_request_to_ai_server(request_body):\n ai_server_host = '117.17.191.49' # AI 서버 호스트 주소\n ai_server_port = 12000 # AI 서버 포트\n\n ai_server_path = '10.50.99.242:12000' # AI 서버의 엔드포인트 경로\n headers = {'Content-Type': 'application/json'} # 요청 헤더 설정 (JSON 예제)\n\n try:\n connection = http.client.HTTPConnection(ai_server_host, ai_server_port)\n connection.request('POST', ai_server_path, body=request_body, headers=headers)\n\n response = connection.getresponse()\n response_data = response.read()\n\n return response.status, response_data\n\n except Exception as e:\n return 500, str(e) # 에러 처리 (500 Internal Server Error)\n\n\ndef send_result_to_spring(result_data):\n spring_server_host = '127.0.0.1' # Spring 서버 호스트 주소\n spring_server_port = 8080 # Spring 서버 포트\n\n spring_server_path = '/api/file/share' # Spring 서버의 엔드포인트 경로\n headers = {'Content-Type': 'application/json'} # 요청 헤더 설정 (JSON 예제)\n\n try:\n connection = http.client.HTTPConnection(spring_server_host, spring_server_port)\n connection.request('POST', spring_server_path, body=result_data, headers=headers)\n\n response = connection.getresponse()\n response_data = response.read()\n\n return response.status, response_data\n\n except Exception as e:\n return 500, str(e)\n\n\ndef main():\n i=1\n while (i):\n request_data = requests.request(method=\"POST\", url=\"117.17.191.49:12000\")\n\n response_status, response_data = send_request_to_ai_server(request_data)\n\n print(f'Response Status: {response_status}')\n\n # AI 서버에서 받은 결과 데이터 (예: JSON 형식)\n result_data = json.dumps([('file', open('./media/summed.txt')), ('file', open('./media/background.png')),\n ('file', open('./media/summed.txt'))])\n\n response_status, response_data = send_result_to_spring(result_data)\n\n print(f'Response Status: {response_status}')\n\n i=0\n time.sleep(1)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "igh197/reqmodule", "sub_path": "transceiving.py", "file_name": "transceiving.py", "file_ext": "py", "file_size_in_byte": 2244, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "http.client.client.HTTPConnection", "line_number": 16, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 16, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 16, "usage_type": "name"}, {"api_name": "http.client.client.HTTPConnection", "line_number": 36, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 36, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 36, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 51, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "37923809680", "text": "# import the necessary packages\nimport numpy as np\nfrom imutils import paths\nimport imutils\nimport cv2\n\nMIN_MATCH_COUNT = 10\n\n# Buzz words: Domain specific, \n\ndef featureDetection(img1, img2, detector):\n\t# FDetect the keypoints with SIFT Detector, compute the descriptors\n\tif detector.lower() == \"sift\": \n\t\tfdet = cv2.xfeatures2d.SIFT_create()\n\telif detector.lower() == \"surf\":\n\t\tfdet = cv2.xfeatures2d.SURF_create()\n\telif detector.lower() == \"orb\":\n\t\tfdet = cv2.ORB_create(nfeatures=500)\n\t\tkp0 = fdet.detect(img1, None)\n\t\tkp1 = fdet.detect(img2, None)\n\t\tkp0, des0 = fdet.compute(img1, kp0)\n\t\tkp1, des1 = fdet.compute(img2, kp1)\n\t\t# cv2.imshow('original_image_left_keypoints'+str(detector)+\".jpg\", cv2.drawKeypoints(img1, kp0, None, color=(0,0,255)))\n\t\t# cv2.imwrite( \"dist/\"+str(detector)+\".jpg\", cv2.drawKeypoints(img1, kp0, None, color=(0,0,255)) );\n\t\treturn kp0, des0, kp1, des1\n\telse:\n\t\traise Exception(\"{} is not a valid input for the detector\".format(detector))\n\n\tkp0, des0 = fdet.detectAndCompute(img1, None)\n\tkp1, des1 = fdet.detectAndCompute(img2, None)\n\t# cv2.imwrite( \"dist/\"+str(detector)+\".jpg\", cv2.drawKeypoints(img1, kp0, None, color=(0,0,255)) );\n\t# cv2.imshow('original_image_left_keypoints'+str(detector)+\".jpg\", cv2.drawKeypoints(img1, kp0, None, color=(0,0,255)))\n\treturn kp0, des0, kp1, des1\n\ndef getMatches(des1,des2, matcher):\n\t# Matching descriptor vectors with a BF or FLANN based matcher\n\tmatch = cv2.DescriptorMatcher_create(matcher)\n\tmatches = match.knnMatch(des1,des2,k=2)\n\tgood = []\n\t# Filtering the matches\n\tfor m,n in matches:\n\t\tif m.distance < 0.4*n.distance:\n\t\t\tgood.append(m)\n\treturn good\n\ndef combine(img1_, img2_, matcher, detector):\n\timg1 = cv2.cvtColor(img1_,cv2.COLOR_BGR2GRAY)\n\timg2 = cv2.cvtColor(img2_,cv2.COLOR_BGR2GRAY)\n\tkp1, des1, kp2, des2 = featureDetection(img1, img2, detector)\n\tmatches = getMatches(des1, des2, matcher)\n\tif len(matches) > MIN_MATCH_COUNT:\n\t\tsrc_pts = np.float32([ kp1[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)\n\t\tdst_pts = np.float32([ kp2[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)\n\t\tM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n\t\th, w = img1.shape\n\t\tpts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n\t\tdst = cv2.perspectiveTransform(pts, M)\n\t\timg2 = cv2.polylines(img1,[np.int32(dst)],True,255,3, cv2.LINE_AA)\n\t\tdst = cv2.warpPerspective(img1_,M,(img2_.shape[1] + img1_.shape[1], img2_.shape[0]))\n\t\t# cv2.imwrite( \"dist/warped.jpg\", dst) \n\t\t# cv2.imshow(\"warpPerspective.jpg\", dst)\n\t\timg3 = cv2.drawMatches(img1_, kp1, img2_, kp2, matches, None, matchColor=(255,0,0), singlePointColor=None, flags=2)\n\t\t# cv2.imwrite( \"dist/\"+str(matcher)+\".jpg\", img3) \n\t\t# cv2.imshow(\"original_image_drawMatches\"+str(matcher)+\".jpg\", img3)\n\t\tdst[0:img2_.shape[0],0:img2_.shape[1]] = img2_\n\t\t# cv2.imshow(\"original_image_stitched.jpg\", dst)\n\t\treturn dst\n\telse:\n\t\traise Exception(\"Not enought sorted matches are found - {}\".format(MIN_MATCH_COUNT))\n\nimg0_ = cv2.imread(\"images/IMG_3118.jpg\")\nimg1_ = cv2.imread(\"images/IMG_3119.jpg\")\nimg2_ = cv2.imread(\"images/IMG_3120.jpg\")\n\n# combine(img0_, img1_, cv2.DESCRIPTOR_MATCHER_FLANNBASED, \"sift\")\n# combine(img0_, img1_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\nimg3_ = combine(img0_, img1_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\n# img4_ = combine(img1_, img2_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\n# img5_ = combine(img3_, img4_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\n# img6_ = combine(img0_, img4_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\n# img7_ = combine(img3_, img2_, cv2.DESCRIPTOR_MATCHER_BRUTEFORCE, \"sift\")\n\ncv2.imshow(\"in1.jpg\", img3_)\n# cv2.imwrite( \"dist/result1.jpg\", img3_) \n# cv2.imshow(\"final2.jpg\", img5_)\n# cv2.imwrite( \"dist/result2.jpg\", img5_) \n# cv2.imshow(\"final2.jpg\", img6_)\n# cv2.imwrite( \"dist/result3.jpg\", img6_) \n# cv2.imshow(\"in.jpg\", img7_)\n# cv2.imwrite( \"dist/result4.jpg\", img7_) \ncv2.waitKey(0)\n#cv2.imsave(\"original_image_stitched_crop.jpg\", trim(dst))", "repo_name": "jesperlunpet/computer_vision_jp19", "sub_path": "image_stitching.py", "file_name": "image_stitching.py", "file_ext": "py", "file_size_in_byte": 3966, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SURF_create", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.ORB_create", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.DescriptorMatcher_create", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.findHomography", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.RANSAC", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.perspectiveTransform", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.polylines", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.warpPerspective", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.drawMatches", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 71, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "cv2.DESCRIPTOR_MATCHER_BRUTEFORCE", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 91, "usage_type": "call"}]} +{"seq_id": "26566344364", "text": "import collections\nfrom datetime import datetime\nimport io\nimport logging\nimport os\nimport os.path\nimport re\nimport subprocess\nimport threading\nimport time\nimport unicodedata\n\nfrom corenlp_xml.document import Document\n\nfrom six import iteritems\n\nfrom .._downloader import download_zip\n\n\nlog = logging.getLogger(__name__)\n\n_CORENLP_VERSION = None\n\n\nclass _StanfordCoreNLP(object):\n\n _singletons = {} # annotators : object\n\n @classmethod\n def get_singleton(cls, annotators=None, **options):\n \"\"\"\n Get or create a corenlp parser with the given annotator and options\n Note: multiple parsers with the same annotator and different options\n are not supported.\n \"\"\"\n if annotators is not None:\n annotators = tuple(annotators)\n if annotators not in cls._singletons:\n cls._singletons[annotators] = cls(annotators, **options)\n return cls._singletons[annotators]\n\n def __init__(self, annotators=None, timeout=1000, memory=\"3G\"):\n \"\"\"\n Start the CoreNLP server with a system call.\n\n @param annotators: Which annotators to use, e.g.\n [\"tokenize\", \"ssplit\", \"pos\", \"lemma\"]\n @param memory: Java heap memory to use\n \"\"\"\n global _CORENLP_VERSION\n self.annotators = annotators\n self.memory = memory\n _CORENLP_VERSION = get_corenlp_version()\n self._start_corenlp()\n\n def _start_corenlp(self):\n cmd = _get_command(memory=self.memory, annotators=self.annotators)\n log.info(\"Starting corenlp: {cmd}\".format(**locals()))\n self.corenlp_process = subprocess.Popen(cmd, shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n self.lock = threading.Lock()\n self.read_thread = threading.Thread(target=self._read_output_lines)\n self.read_thread.daemon = True\n self.read_thread.start()\n log.debug(\"Waiting for prompt\")\n self._communicate(input=None, wait_for_output=False)\n\n def _read_output_lines(self):\n \"intended to be run as background thread to collect parser output\"\n while True:\n chars = self.corenlp_process.stdout.readline()\n if chars == '': # EOF\n break\n self.out.write(chars)\n\n def _communicate(self, input, wait_for_output=True):\n log.debug(\"Sending {} bytes to corenlp\".format(input and len(input)))\n if self.corenlp_process.poll() is not None:\n logging.info(\"CoreNLP process died, respawning\")\n self._start_corenlp()\n with self.lock:\n self.out = io.BytesIO()\n if input:\n self.corenlp_process.stdin.write(input)\n self.corenlp_process.stdin.write(\"\\n\\n\")\n self.corenlp_process.stdin.flush()\n\n # wait until we get a prompt\n logging.debug(\"Waiting for NLP>\")\n err_buffer = io.BytesIO()\n while True:\n char = self.corenlp_process.stderr.read(1)\n err_buffer.write(char)\n err_buffer.seek(-5, 2)\n if err_buffer.read() == \"NLP> \":\n break\n\n # give stdout a chance to flush\n # is there a better way to do this?\n while True:\n time.sleep(.1)\n result = self.out.getvalue()\n if result or not wait_for_output:\n return result\n\n def parse(self, text):\n \"\"\"Call the server and return the raw results.\"\"\"\n if not isinstance(text, bytes):\n text = unicodedata.normalize('NFKD', text)\n text = text.encode('ascii', errors='ignore')\n text = re.sub(\"\\s+\", \" \", text)\n return self._communicate(text + \"\\n\")\n\n\ndef parse(text, annotators=None, **options):\n s = _StanfordCoreNLP.get_singleton(annotators, **options)\n return s.parse(text, **options)\n\n\n# Stanford CoreNLP 3.4.1. Later versions require Java 8.\n_URL = 'http://nlp.stanford.edu/software/stanford-corenlp-full-2014-08-27.zip'\n\n\ndef _get_corenlp():\n \"\"\"Returns the directory where CoreNLP lives.\n\n Will download CoreNLP if necessary. Checks $CORENLP_HOME as well as\n $XTAS_DATA to be backward compatible.\n \"\"\"\n corenlp_home = os.environ.get(\"CORENLP_HOME\")\n return corenlp_home or download_zip(_URL, name='Stanford CoreNLP')\n\n\ndef get_corenlp_version():\n \"Return the corenlp version pointed at by CORENLP_HOME, or None\"\n corenlp_home = _get_corenlp()\n for fn in os.listdir(corenlp_home):\n m = re.match(\"stanford-corenlp-([\\d.]+)-models.jar\", fn)\n if m:\n return m.group(1)\n\n\ndef _get_command(annotators=None, memory=None):\n \"\"\"Return the system (shell) call to run corenlp.\"\"\"\n corenlp_home = _get_corenlp()\n cmd = 'java'\n if memory:\n cmd += ' -Xmx{memory}'.format(**locals())\n cmd += ' -cp \"{}\"'.format(os.path.join(corenlp_home, \"*\"))\n cmd += \" edu.stanford.nlp.pipeline.StanfordCoreNLP -outputFormat xml\"\n if annotators:\n cmd += ' -annotators {}'.format(\",\".join(annotators))\n return cmd\n\n\ndef stanford_to_saf(xml_bytes):\n doc = Document(xml_bytes)\n saf = collections.defaultdict(list)\n\n saf['header'] = {'format': \"SAF\",\n 'format-version': \"0.0\",\n 'processed': {'module': \"corenlp\",\n 'module-version': _CORENLP_VERSION,\n \"started\": datetime.now().isoformat()}\n }\n tokens = {} # (xml_sentid, xml_tokenid) : saf_tokenid\n\n def tokenid(sentid, tokenid):\n if (sentid, tokenid) in tokens:\n raise ValueError(\"Duplicate tokenid: {sentid}, {tokenid}\"\n .format(**locals()))\n saf_tokenid = len(tokens) + 1\n tokens[sentid, tokenid] = saf_tokenid\n return saf_tokenid\n\n for sent in doc.sentences:\n saf['tokens'] += [dict(id=tokenid(sent.id, t.id),\n sentence=sent.id,\n offset=t.character_offset_begin,\n lemma=t.lemma, word=t.word,\n pos=t.pos, pos1=_POSMAP[t.pos])\n for t in sent.tokens]\n\n saf['entities'] += [{'tokens': [tokens[sent.id, t.id]], 'type': t.ner}\n for t in sent.tokens if t.ner not in (None, 'O')]\n\n if sent.collapsed_ccprocessed_dependencies:\n links = sent.collapsed_ccprocessed_dependencies.links\n saf['dependencies'] += [{'child': tokens[sent.id,\n dep.dependent.idx],\n 'parent': tokens[sent.id,\n dep.governor.idx],\n 'relation': dep.type}\n for dep in links if dep.type != 'root']\n\n if doc.coreferences:\n saf['coreferences'] = [[[tokens[m.sentence.id, t.id] for t in m.tokens]\n for m in coref.mentions]\n for coref in doc.coreferences]\n saf['trees'] = [{'sentence': s.id, 'tree': s.parse_string.strip()}\n for s in doc.sentences if s.parse_string is not None]\n\n # remove default and empty elements\n return {k: v for (k, v) in iteritems(saf) if v != []}\n\n_POSMAP = {'CC': 'C',\n 'CD': 'Q',\n 'DT': 'D',\n 'EX': '?',\n 'FW': 'N',\n 'IN': 'P',\n 'JJ': 'A',\n 'JJR': 'A',\n 'JJS': 'A',\n 'LS': 'C',\n 'MD': 'V',\n 'NN': 'N',\n 'NNS': 'N',\n 'NNP': 'M',\n 'NNPS': 'M',\n 'PDT': 'D',\n 'POS': 'O',\n 'PRP': 'O',\n 'PRP$': 'O',\n 'RB': 'B',\n 'RBR': 'B',\n 'RBS': 'B',\n 'RP': 'R',\n 'SYM': '.',\n 'TO': '?',\n 'UH': '!',\n 'VB': 'V',\n 'VBD': 'V',\n 'VBG': 'V',\n 'VBN': 'V',\n 'VBP': 'V',\n 'VBZ': 'V',\n 'WDT': 'D',\n 'WP': 'O',\n 'WP$': 'O',\n 'WRB': 'B',\n ',': '.',\n '.': '.',\n ':': '.',\n '``': '.',\n '$': '.',\n \"''\": '.',\n \"#\": '.',\n '-LRB-': '.',\n '-RRB-': '.',\n }\n", "repo_name": "NLeSC/xtas", "sub_path": "xtas/tasks/_corenlp.py", "file_name": "_corenlp.py", "file_ext": "py", "file_size_in_byte": 8590, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 93, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 59, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 62, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 63, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 64, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 81, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 91, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 92, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 103, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 111, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 113, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 132, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 132, "usage_type": "attribute"}, {"api_name": "_downloader.download_zip", "line_number": 133, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 139, "usage_type": "call"}, {"api_name": "re.match", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "corenlp_xml.document.Document", "line_number": 159, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 166, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 166, "usage_type": "name"}, {"api_name": "six.iteritems", "line_number": 206, "usage_type": "call"}]} +{"seq_id": "70496264165", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nclass Bmean:\n def load(self,fname):\n fp=open(fname,\"r\")\n fp.readline()\n dat=fp.readline().strip().split(\",\")\n Xa=float(dat[0])\n dx=float(dat[1])\n Nx=int(dat[2])\n\n fp.readline()\n dat=fp.readline().strip().split(\",\")\n dt=float(dat[0])\n Nt=int(dat[1])\n fp.readline()\n\n Z=np.zeros(Nx*Nt)\n for k in range(Nx*Nt):\n Z[k]=float(fp.readline())\n Z=np.reshape(Z,[Nx,Nt])\n\n self.amp=Z\n self.xcod= np.array(range(Nx))*dx+Xa\n self.time= np.array(range(Nt))*dt\n self.Nt=Nt\n self.Nx=Nx\n\n fp.close()\n def Win(self,t1,t2,sig):\n tb=(t2-t1)/(self.Nx-1)*np.arange(self.Nx)+t1\n\n for k in range(self.Nx):\n arg=(self.time-tb[k])/sig\n arg=-0.5*arg*arg;\n Wt=np.exp(arg)\n self.amp[k,:]*=Wt;\n\n \n def show(self,ax):\n xcod=self.xcod\n time=self.time\n Z=self.amp\n ext=[time[0],time[-1],xcod[0],xcod[-1]]\n im=ax.imshow(Z,extent=ext,cmap=\"jet\",origin=\"lower\",aspect=\"auto\",vmin=-0.15,vmax=0.15)\n return(im)\n def FFT(self,bx):\n self.Amp=np.fft.fft(self.amp,axis=1)\n self.df=1/self.time[-1];\n self.freq=np.array(range(self.Nt))*self.df\n freq=self.freq\n xcod=self.xcod\n ext=[freq[0],freq[-1],xcod[0],xcod[-1]]\n im=bx.imshow(np.abs(self.Amp*self.Amp),extent=ext,cmap=\"jet\",origin=\"lower\",aspect=\"auto\",interpolation=\"bilinear\",vmin=0,vmax=500)\n bx.set_xlim([0,3])\n return(im)\n def kfplot(self,ax):\n self.Amp=np.fft.fft(self.amp,axis=1)\n self.AMP=np.fft.ifft(self.Amp,axis=0)\n\n self.df=1/self.time[-1];\n self.dk=1/self.xcod[-1]/(2.*np.pi);\n\n self.kx=np.array(range(self.Nx))*self.dk\n self.freq=np.array(range(self.Nt))*self.df\n ext=[self.freq[0],self.freq[-1],self.kx[0],self.kx[-1]]\n ax.imshow(np.abs(self.AMP),cmap=\"jet\",extent=ext,aspect=\"auto\",origin=\"lower\",interpolation=\"bilinear\")\n ax.set_xlim([0,3])\n ax.set_ylim([0,0.15])\n ax.grid(True)\n\n def max_amp(self):\n self.amax=np.max(self.amp,axis=1)\n\n def get_Amp(self,freq):\n num=np.argmin(np.abs(freq-self.freq))\n return(np.abs(self.Amp[:,num]))\n\n\nif __name__==\"__main__\":\n\n fname=\"v1stk.out\"\n #fname=\"sstk.out\"\n bwv=Bmean()\n bwv.load(fname)\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n #bwv.Win(1.5,10.6,2.0); \n bwv.Win(1.6,11,1.5);\n im=bwv.show(ax)\n\n fig2=plt.figure()\n bx=fig2.add_subplot(111)\n im=bwv.FFT(bx)\n plt.colorbar(im)\n\n fig3=plt.figure()\n cx=fig3.add_subplot(111)\n cx.grid(True)\n bwv.max_amp()\n cx.plot(bwv.xcod,bwv.amax)\n\n fig4=plt.figure()\n dx=fig4.add_subplot(111)\n bwv.kfplot(dx)\n\n fig5=plt.figure()\n ex=fig5.add_subplot(111)\n #freqs=[0.6,0.8,1.0,1.2,1.4]\n freqs=[1.0]\n for frq in freqs:\n Amp=bwv.get_Amp(frq)\n ex.plot(bwv.xcod, Amp)\n ex.grid(True)\n\n plt.show()\n\n", "repo_name": "kimorphe/Granite", "sub_path": "Src/FDM/meanwv.py", "file_name": "meanwv.py", "file_ext": "py", "file_size_in_byte": 3086, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.fft.ifft", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "39474772123", "text": "import os\nfrom matplotlib import pyplot as plt\n\nfrom tsp import Tsp\n\n\nsizes = [10, 20, 50, 100]\ngenerations = [5000, 10000, 20000]\n\nfor dirname, _, filenames in os.walk('log/test1'):\n for filename in filenames:\n data = {}\n with open(os.path.join(dirname, filename), 'r') as f:\n ans = int(f.readline().split(', ')[1])\n f.readline()\n for size in sizes:\n if filename == 'pr2392.txt' and size in [50, 100]:\n continue\n for generation in generations:\n for solution in range(3):\n f.readline()\n line = f.readline().split(' ')\n dis = [float(line[i]) for i in range(len(line)-1)]\n final = float(f.readline())\n dis.append(final)\n line = f.readline().split(' ')\n route = [int(line[i]) for i in range(len(line)-1)]\n route.append(route[0])\n f.readline()\n\n if generation != 20000:\n continue\n if size == 10:\n data[solution] = [size, final, dis, route]\n elif final < data[solution][1]:\n data[solution] = [size, final, dis, route]\n\n filename = filename.split('.')[0]\n\n plt.title('data: {}, ans: {}'.format(filename, ans))\n plt.plot([0, 20000], [ans, ans], label='ans')\n plt.plot([i * 1000 for i in range(21)], data[0][2],\n label='solution1(size={}): {}'.format(data[0][0], int(data[0][1])))\n plt.plot([i * 1000 for i in range(21)], data[1][2],\n label='solution2(size={}): {}'.format(data[1][0], int(data[1][1])))\n plt.plot([i * 1000 for i in range(21)], data[2][2],\n label='solution3(size={}): {}'.format(data[2][0], int(data[2][1])))\n plt.legend()\n plt.savefig('img/test1/{}.png'.format(filename))\n plt.show()\n\nfor dirname, _, filenames in os.walk('log/test2'):\n for filename in filenames:\n data = {}\n with open(os.path.join(dirname, filename), 'r') as f:\n ans = int(f.readline().split(', ')[1])\n f.readline()\n line = f.readline().split(' ')\n dis = [float(line[i]) for i in range(len(line)-1)]\n final = float(f.readline())\n dis.append(final)\n line = f.readline().split(' ')\n route = [int(line[i]) for i in range(len(line)-1)]\n route.append(route[0])\n\n filename = filename.split('.')[0]\n tsp = Tsp(filename)\n nodes = tsp.get_nodes()\n plt.title('data: {}, ans: {}, final: {}'.format(filename, ans, int(final)))\n plt.scatter([nodes[i][0] for i in range(len(nodes))], [nodes[i][1] for i in range(len(nodes))], color='r')\n plt.plot([nodes[route[i]][0] for i in range(len(nodes)+1)],\n [nodes[route[i]][1] for i in range(len(nodes)+1)],\n color='b', linewidth=0.5)\n plt.savefig('img/test2/{}.png'.format(filename))\n plt.show()\n", "repo_name": "tyhuang0428/HIT-Evolutinary_Computation", "sub_path": "analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 3280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.walk", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tsp.Tsp", "line_number": 67, "usage_type": "call"}, {"api_name": "tsp.get_nodes", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "71996050726", "text": "# -*- coding: utf-8 -*-\nimport unittest\n\nimport numpy as np\nimport six\nimport tensorflow as tf\n\nfrom tests.helper import TestCase\nfrom tfsnippet.bayes import StochasticTensor\nfrom tfsnippet.bayes import Normal\nfrom tfsnippet.utils import *\n\n\nclass MiscTestCase(TestCase):\n\n def test_is_integer(self):\n if six.PY2:\n self.assertTrue(is_integer(long(1)))\n self.assertTrue(is_integer(int(1)))\n for dtype in [np.int, np.int8, np.int16, np.int32, np.int64,\n np.uint, np.uint8, np.uint16, np.uint32, np.uint64]:\n v = np.asarray([1], dtype=dtype)[0]\n self.assertTrue(\n is_integer(v),\n msg='%r should be interpreted as integer.' % (v,)\n )\n self.assertFalse(is_integer(np.asarray(0, dtype=np.int)))\n for v in [float(1.0), '', object(), None, True, (), {}, []]:\n self.assertFalse(\n is_integer(v),\n msg='%r should not be interpreted as integer.' % (v,)\n )\n\n def test_is_float(self):\n float_types = [float, np.float, np.float16, np.float32, np.float64]\n for extra_type in ['float8', 'float128', 'float256']:\n if hasattr(np, extra_type):\n float_types.append(getattr(np, extra_type))\n for dtype in float_types:\n v = np.asarray([1], dtype=dtype)[0]\n self.assertTrue(\n is_float(v),\n msg='%r should be interpreted as float.' % (v,)\n )\n self.assertFalse(is_integer(np.asarray(0., dtype=np.float32)))\n for v in [int(1), '', object(), None, True, (), {}, []]:\n self.assertFalse(\n is_float(v),\n msg='%r should not be interpreted as float.' % (v,)\n )\n\n def test_is_dynamic_tensor_like(self):\n for v in [tf.placeholder(tf.int32, ()),\n tf.get_variable('v', shape=(), dtype=tf.int32),\n StochasticTensor(Normal(0., 1.), 1.)]:\n self.assertTrue(\n is_dynamic_tensor_like(v),\n msg='%r should be interpreted as a dynamic tensor.' %\n (v,)\n )\n for v in [1, 1.0, object(), (), [], {},\n np.array([1, 2, 3])]:\n self.assertFalse(\n is_dynamic_tensor_like(v),\n msg='%r should not be interpreted as a dynamic tensor.' %\n (v,)\n )\n\n def test_convert_to_tensor_if_dynamic(self):\n for v in [tf.placeholder(tf.int32, ()),\n tf.get_variable('v', shape=(), dtype=tf.int32),\n StochasticTensor(Normal(0., 1.), 1.)]:\n self.assertIsInstance(\n convert_to_tensor_if_dynamic(v),\n tf.Tensor\n )\n for v in [1, 1.0, object(), (), [], {},\n np.array([1, 2, 3])]:\n self.assertIs(convert_to_tensor_if_dynamic(v), v)\n\n def test_preferred_tensor_dtype(self):\n for dtype in [tf.int16, tf.int32, tf.float32, tf.float64]:\n ph = tf.placeholder(dtype)\n var = tf.get_variable('var_%s' % (dtype.name,), shape=(),\n dtype=dtype)\n self.assertEqual(\n get_preferred_tensor_dtype(ph),\n dtype\n )\n self.assertEqual(\n get_preferred_tensor_dtype(var),\n dtype\n )\n for np_dtype, tf_dtype in [(np.int16, tf.int16),\n (np.int32, tf.int32),\n (np.float32, tf.float32),\n (np.float64, tf.float64)]:\n array = np.asarray([], dtype=np_dtype)\n self.assertEqual(\n get_preferred_tensor_dtype(array),\n tf_dtype\n )\n self.assertEqual(\n get_preferred_tensor_dtype(1),\n tf.as_dtype(np.asarray([1]).dtype)\n )\n self.assertEqual(\n get_preferred_tensor_dtype(1.0),\n tf.as_dtype(np.asarray([1.0]).dtype)\n )\n\n def test_MetricAccumulator(self):\n # test empty initial values\n acc = MetricAccumulator()\n self.assertFalse(acc.has_value)\n self.assertEqual(acc.counter, 0)\n self.assertAlmostEqual(acc.mean, 0.)\n self.assertAlmostEqual(acc.square, 0.)\n self.assertAlmostEqual(acc.var, 0.)\n self.assertAlmostEqual(acc.stddev, 0.)\n self.assertAlmostEqual(acc.weight, 0.)\n\n acc.add(2)\n self.assertTrue(acc.has_value)\n self.assertEqual(acc.counter, 1)\n self.assertAlmostEqual(acc.mean, 2.)\n self.assertAlmostEqual(acc.square, 4.)\n self.assertAlmostEqual(acc.var, 0.)\n self.assertAlmostEqual(acc.stddev, 0.)\n self.assertAlmostEqual(acc.weight, 1.)\n\n acc.add(1, weight=3)\n self.assertTrue(acc.has_value)\n self.assertEqual(acc.counter, 2)\n self.assertAlmostEqual(acc.mean, 1.25)\n self.assertAlmostEqual(acc.square, 1.75)\n self.assertAlmostEqual(acc.var, 0.1875)\n self.assertAlmostEqual(acc.stddev, 0.4330127)\n self.assertAlmostEqual(acc.weight, 4.)\n\n acc.add(7, weight=6)\n self.assertTrue(acc.has_value)\n self.assertEqual(acc.counter, 3)\n self.assertAlmostEqual(acc.mean, 4.7)\n self.assertAlmostEqual(acc.square, 30.1)\n self.assertAlmostEqual(acc.var, 8.01)\n self.assertAlmostEqual(acc.stddev, 2.8301943)\n self.assertAlmostEqual(acc.weight, 10.)\n\n acc.reset()\n self.assertFalse(acc.has_value)\n self.assertEqual(acc.counter, 0)\n self.assertAlmostEqual(acc.mean, 0.)\n self.assertAlmostEqual(acc.square, 0.)\n self.assertAlmostEqual(acc.var, 0.)\n self.assertAlmostEqual(acc.stddev, 0.)\n self.assertAlmostEqual(acc.weight, 0.)\n\n acc.add(1)\n self.assertTrue(acc.has_value)\n self.assertEqual(acc.counter, 1)\n self.assertAlmostEqual(acc.mean, 1.)\n self.assertAlmostEqual(acc.square, 1.)\n self.assertAlmostEqual(acc.var, 0.)\n self.assertAlmostEqual(acc.stddev, 0.)\n self.assertAlmostEqual(acc.weight, 1.)\n\n def test_humanize_duration(self):\n cases = [\n (0.0, '0 sec'),\n (1e-8, '1e-08 sec'),\n (0.1, '0.1 sec'),\n (1.0, '1 sec'),\n (1, '1 sec'),\n (1.1, '1.1 secs'),\n (59, '59 secs'),\n (59.9, '59.9 secs'),\n (60, '1 min'),\n (61, '1 min 1 sec'),\n (62, '1 min 2 secs'),\n (119, '1 min 59 secs'),\n (120, '2 mins'),\n (121, '2 mins 1 sec'),\n (122, '2 mins 2 secs'),\n (3599, '59 mins 59 secs'),\n (3600, '1 hr'),\n (3601, '1 hr 1 sec'),\n (3661, '1 hr 1 min 1 sec'),\n (86399, '23 hrs 59 mins 59 secs'),\n (86400, '1 day'),\n (86401, '1 day 1 sec'),\n (172799, '1 day 23 hrs 59 mins 59 secs'),\n (259199, '2 days 23 hrs 59 mins 59 secs'),\n ]\n for seconds, answer in cases:\n result = humanize_duration(seconds)\n self.assertEqual(\n result, answer,\n msg='humanize_duraion(%r) is expected to be %r, but got %r.' %\n (seconds, answer, result)\n )\n\n for seconds, answer in cases[1:]:\n seconds = -seconds\n answer = answer + ' ago'\n result = humanize_duration(seconds)\n self.assertEqual(\n result, answer,\n msg='humanize_duraion(%r) is expected to be %r, but got %r.' %\n (seconds, answer, result)\n )\n\n def test_unique(self):\n self.assertEqual(unique([]), [])\n self.assertEqual(unique([1, 4, 1, 3, 2, 1, 2, 3]), [1, 4, 3, 2])\n self.assertEqual(\n unique(list(range(100, 500)) + list(range(1000, 0, -1))),\n (list(range(100, 500)) + list(range(1000, 499, -1)) +\n list(range(99, 0, -1)))\n )\n\n def test_camel_to_underscore(self):\n def assert_convert(camel, underscore):\n self.assertEqual(\n camel_to_underscore(camel),\n underscore,\n msg='%r should be converted to %r.' % (camel, underscore)\n )\n\n examples = [\n ('simpleTest', 'simple_test'),\n ('easy', 'easy'),\n ('HTML', 'html'),\n ('simpleXML', 'simple_xml'),\n ('PDFLoad', 'pdf_load'),\n ('startMIDDLELast', 'start_middle_last'),\n ('AString', 'a_string'),\n ('Some4Numbers234', 'some4_numbers234'),\n ('TEST123String', 'test123_string'),\n ]\n for camel, underscore in examples:\n assert_convert(camel, underscore)\n assert_convert(underscore, underscore)\n assert_convert('_%s_' % camel, '_%s_' % underscore)\n assert_convert('_%s_' % underscore, '_%s_' % underscore)\n assert_convert('__%s__' % camel, '__%s__' % underscore)\n assert_convert('__%s__' % underscore, '__%s__' % underscore)\n assert_convert(\n '_'.join([s.capitalize() for s in underscore.split('_')]),\n underscore\n )\n assert_convert(\n '_'.join([s.upper() for s in underscore.split('_')]),\n underscore\n )\n\n\nclass AutoReprObjectTestCase(TestCase):\n\n def test_empty(self):\n self.assertEqual(repr(AutoReprObject()), 'AutoReprObject()')\n\n class MyObject(AutoReprObject):\n pass\n self.assertEqual(repr(MyObject()), 'MyObject()')\n\n def test_default_ordering(self):\n class MyObject(AutoReprObject):\n def __init__(self):\n self.b = 3\n self.a = 1\n self.aa = '2'\n self.bb = 4.0\n self.assertEqual(repr(MyObject()), \"MyObject(a=1,aa='2',b=3,bb=4.0)\")\n\n def test_manual_ordering(self):\n class MyObject(AutoReprObject):\n __repr_attributes__ = ('bb', 'aa')\n\n def __init__(self):\n self.b = 3\n self.a = 1\n self.aa = '2'\n self.bb = 4.0\n self.assertEqual(repr(MyObject()), \"MyObject(bb=4.0,aa='2',a=1,b=3)\")\n\n def test_class_attributes(self):\n class MyObject(AutoReprObject):\n a = 1\n b = 2\n self.assertEqual(repr(MyObject()), \"MyObject()\")\n MyObject.__repr_attributes__ = ('a',)\n self.assertEqual(repr(MyObject()), \"MyObject(a=1)\")\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "haowen-xu/tfsnippet-pre-alpha", "sub_path": "tests/utils/test_misc.py", "file_name": "test_misc.py", "file_ext": "py", "file_size_in_byte": 10743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tests.helper.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "six.PY2", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.uint", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.uint64", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 54, "usage_type": "attribute"}, {"api_name": "tfsnippet.bayes.StochasticTensor", "line_number": 55, "usage_type": "call"}, {"api_name": "tfsnippet.bayes.Normal", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.get_variable", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 71, "usage_type": "attribute"}, {"api_name": "tfsnippet.bayes.StochasticTensor", "line_number": 72, "usage_type": "call"}, {"api_name": "tfsnippet.bayes.Normal", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.Tensor", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.int16", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.float64", "line_number": 82, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.get_variable", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.int16", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.int32", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 97, "usage_type": "attribute"}, {"api_name": "tensorflow.float64", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.as_dtype", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.as_dtype", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 109, "usage_type": "call"}, {"api_name": "tests.helper.TestCase", "line_number": 258, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 296, "usage_type": "call"}]} +{"seq_id": "9360738694", "text": "from Crypto.Cipher import AES\nimport base64\nimport codecs\n# import pgpy\n\nBS = 16\n\n\ndef pad(s): return s + (BS - len(s) % BS) * chr(BS - len(s) % BS)\n\n\ndef unpad(s): return s[0:-s[-1]]\n\n\nclass aes:\n\n def __init__(self, key, iv=None):\n\n self.key = key.encode('utf8')\n if iv:\n self.iv = iv.encode('utf-8')\n else:\n self.iv = iv\n\n def encrypt(self, raw):\n raw = pad(raw).encode('utf8')\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n return base64.b64encode(cipher.encrypt(raw)).decode('utf8')\n\n def decrypt(self, enc):\n\n enc = base64.b64decode(enc)\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n a = cipher.decrypt(enc)\n return unpad(a).decode('utf8')\n\n def decrypt_hexa(self, enc):\n enc = codecs.encode(codecs.decode(\n enc, 'hex'), 'base64').decode()\n\n enc = base64.b64decode(enc)\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n a = cipher.decrypt(enc)\n return unpad(a).decode('utf8')\n\n def encrypt_file(self, from_file_name):\n with open(from_file_name, 'rb') as fo:\n plaintext = fo.read().decode('utf8')\n raw = pad(plaintext).encode('utf8')\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n enc = base64.b64encode(cipher.encrypt(\n raw)).decode('utf8')\n with open(from_file_name + \".en\", 'wb') as fo:\n fo.write(enc)\n return enc\n\n def decrypt_file(self, to_file_name):\n with open(to_file_name, 'rb') as fo:\n ciphertext = fo.read()\n ciphertext = base64.b64decode(ciphertext)\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n a = unpad(cipher.decrypt(ciphertext))\n with open(to_file_name, 'wb') as fo:\n fo.write(a)\n return to_file_name\n\n def decrypt_file_hexa(self, to_file_name):\n with open(to_file_name, 'rb') as fo:\n ciphertext = fo.read()\n ciphertext = codecs.encode(codecs.decode(\n ciphertext, 'hex'), 'base64').decode()\n ciphertext = base64.b64decode(ciphertext)\n if self.iv:\n cipher = AES.new(self.key, AES.MODE_CBC, self.iv)\n else:\n cipher = AES.new(self.key, AES.MODE_ECB)\n a = unpad(cipher.decrypt(ciphertext))\n with open(to_file_name, 'wb') as fo:\n fo.write(a)\n return to_file_name\n\n\n# class pgp:\n# def __init__(self, prikey=None, pubkey=None, passphrase=None):\n# self.pubkey, _ = pgpy.PGPKey.from_blob(pubkey)\n# self.prikey, _ = pgpy.PGPKey.from_blob(prikey)\n# self.passphrase = passphrase\n\n# def encrypt(self, data):\n# message = pgpy.PGPMessage.new(data)\n# enc_message = self.pubkey.encrypt(message)\n# encrypted = bytes(enc_message)\n# return encrypted.decode(\"utf8\")\n\n# def decrypt(self, data):\n# message = pgpy.PGPMessage.from_blob(data)\n# if len(self.passphrase) != 0:\n# with self.prikey.unlock(self.passphrase):\n# decrypted = self.prikey.decrypt(message).message\n# else:\n# decrypted = self.prikey.decrypt(message).message\n# return decrypted\n\n# def decrypt_file(self, file_name, file_type=None):\n# with open(file_name, 'rb') as fo:\n# try:\n# if file_type == \"pgp\":\n# ciphertext = fo.read()\n# elif file_type == \"txt\":\n# ciphertext = fo.read().decode('utf8')\n# except Exception as e:\n# return \"Decryption/Decode Error:\" + str(e)\n# dec = self.decrypt(ciphertext)\n# try:\n\n# with open(file_name, 'w') as fo:\n# fo.write(dec)\n# except:\n# with open(file_name, 'wb') as fo:\n# fo.write(dec)\n", "repo_name": "DileepKumarK444/DMS", "sub_path": "utils/encryption.py", "file_name": "encryption.py", "file_ext": "py", "file_size_in_byte": 4314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Crypto.Cipher.AES.new", "line_number": 28, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 28, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 30, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 30, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 30, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 31, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 35, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 37, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 37, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 37, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 39, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 39, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 39, "usage_type": "attribute"}, {"api_name": "codecs.encode", "line_number": 44, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 44, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 47, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 49, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 49, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 49, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 51, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 51, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 51, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 60, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 60, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 60, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 62, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 62, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 62, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 63, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 72, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 74, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 74, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 74, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 76, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 76, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 76, "usage_type": "attribute"}, {"api_name": "codecs.encode", "line_number": 85, "usage_type": "call"}, {"api_name": "codecs.decode", "line_number": 85, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 87, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 89, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 89, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_CBC", "line_number": 89, "usage_type": "attribute"}, {"api_name": "Crypto.Cipher.AES.new", "line_number": 91, "usage_type": "call"}, {"api_name": "Crypto.Cipher.AES", "line_number": 91, "usage_type": "name"}, {"api_name": "Crypto.Cipher.AES.MODE_ECB", "line_number": 91, "usage_type": "attribute"}]} +{"seq_id": "42833126929", "text": "import os\nimport cv2\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom typing import List\n\nfrom params import Params\nfrom extractor.utils import pil_loader, mkdir_or_delete_existing_files\nfrom extractor.s3fd import SFDDetector\nfrom extractor.fan2d import FaceAlignment\nfrom extractor.landmarks_processor import get_transform_mat, transform_points\n\n\nclass ExtractData:\n def __init__(\n self,\n filepath: str = None,\n rects: List[np.array] = None,\n landmarks: List[np.array] = None,\n final_output_files: List[str] = None,\n ) -> None:\n self.filepath = filepath\n self.file_name = filepath.split(\"/\")[-1].replace(Params.image_extension, \"\")\n self.rects = rects or []\n self.rects_rotation = 0\n self.landmarks = landmarks or []\n self.final_output_files = final_output_files or []\n self.faces_detected = 0\n\n\nclass ExtractFaces:\n def __init__(\n self,\n input_data: List[str],\n image_size: int,\n images_output_path: str,\n landmarks_output_path: str,\n max_faces_from_image: int = 0,\n ):\n\n self.input_data = input_data\n self.image_size = image_size\n self.max_faces_from_image = max_faces_from_image\n self.images_output_path = images_output_path\n self.landmarks_output_path = landmarks_output_path\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n print(\"Device = {}\".format(self.device))\n self.rects_extractor = SFDDetector(\n device=self.device, path_to_detector=\"extractor/models/s3fd.pth\"\n )\n self.landmarks_extractor = FaceAlignment(\n device=self.device,\n path_to_detector=\"extractor/models/face-alignment-net.pt\",\n )\n self.detected_faces = None\n\n def run(self) -> None:\n self.detected_faces = 0\n for image_file_path in tqdm(self.input_data):\n data = ExtractData(filepath=image_file_path)\n data = self.process_data(data)\n self.detected_faces += data.faces_detected\n\n def process_data(self, data: ExtractData) -> ExtractData:\n filepath = data.filepath\n image = pil_loader(filepath)\n\n data = self.rects_stage(\n data=data,\n image=image.copy(),\n max_faces_from_image=self.max_faces_from_image,\n rects_extractor=self.rects_extractor,\n )\n data = self.landmarks_stage(\n data=data, image=image.copy(), landmarks_extractor=self.landmarks_extractor\n )\n data = self.final_stage(data=data, image=image, image_size=self.image_size)\n return data\n\n @staticmethod\n def rects_stage(\n data: ExtractData,\n image: np.ndarray,\n max_faces_from_image: int,\n rects_extractor: SFDDetector,\n ) -> ExtractData:\n h, w, c = image.shape\n if min(h, w) < 128:\n # Image is too small\n data.rects = []\n else:\n data.rects = rects_extractor.detect_from_image(image)\n if max_faces_from_image > 0 and len(data.rects) > 0:\n data.rects = data.rects[0:max_faces_from_image]\n\n return data\n\n @staticmethod\n def landmarks_stage(\n data: ExtractData, image: np.ndarray, landmarks_extractor: FaceAlignment\n ) -> ExtractData:\n if not data.rects:\n return data\n\n data.landmarks = landmarks_extractor.get_landmarks_from_image(image, data.rects)\n return data\n\n def final_stage(\n self, data: ExtractData, image: np.ndarray, image_size: int\n ) -> ExtractData:\n data.final_output_files = []\n file_name = data.file_name\n rects = data.rects\n landmarks = data.landmarks\n if landmarks is None:\n return data\n\n face_idx = 0\n for rect, image_landmarks in zip(rects, landmarks):\n image_to_face_mat = get_transform_mat(image_landmarks, image_size)\n face_image = cv2.warpAffine(\n image, image_to_face_mat, (image_size, image_size), cv2.INTER_LANCZOS4\n )\n face_image = Image.fromarray(face_image)\n # save the image\n images_output_filepath = (\n self.images_output_path\n + f\"{file_name}_{face_idx}{Params.image_extension}\"\n )\n face_image.save(images_output_filepath)\n # save the landmakrs\n face_image_landmarks = transform_points(\n points=image_landmarks, mat=image_to_face_mat\n )\n landmarks_output_filepath = (\n self.landmarks_output_path + f\"{file_name}_{face_idx}.npy\"\n )\n np.save(landmarks_output_filepath, face_image_landmarks)\n\n data.final_output_files.append(images_output_filepath)\n face_idx += 1\n\n data.faces_detected = face_idx\n return data\n\n\ndef extract_faces_from_frames(\n input_path: str,\n images_output_path: str,\n landmarks_output_path: str,\n image_size: int,\n max_faces_from_image: int = 0,\n) -> None:\n input_image_paths = [\n os.path.join(input_path, x)\n for x in os.listdir(input_path)\n if x.endswith(Params.image_extension)\n ]\n\n # delete files from aligned or landmarks dir if it's not empty\n mkdir_or_delete_existing_files(path=images_output_path)\n mkdir_or_delete_existing_files(path=landmarks_output_path)\n\n print(\"Extracting faces...\")\n extract_faces = ExtractFaces(\n input_image_paths,\n image_size,\n max_faces_from_image=max_faces_from_image,\n images_output_path=images_output_path,\n landmarks_output_path=landmarks_output_path,\n )\n extract_faces.run()\n\n print(\"-------------------------\")\n print(\"Images found: {}\".format(len(input_image_paths)))\n print(\"Faces detected: {}\".format(extract_faces.detected_faces))\n print(\"-------------------------\")\n", "repo_name": "BenhamOT/dfl-pytorch", "sub_path": "extractor/extract_faces.py", "file_name": "extract_faces.py", "file_ext": "py", "file_size_in_byte": 5975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 22, "usage_type": "name"}, {"api_name": "params.Params.image_extension", "line_number": 25, "usage_type": "attribute"}, {"api_name": "params.Params", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "extractor.s3fd.SFDDetector", "line_number": 50, "usage_type": "call"}, {"api_name": "extractor.fan2d.FaceAlignment", "line_number": 53, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 61, "usage_type": "call"}, {"api_name": "extractor.utils.pil_loader", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 85, "usage_type": "attribute"}, {"api_name": "extractor.s3fd.SFDDetector", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 102, "usage_type": "attribute"}, {"api_name": "extractor.fan2d.FaceAlignment", "line_number": 102, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 111, "usage_type": "attribute"}, {"api_name": "extractor.landmarks_processor.get_transform_mat", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 123, "usage_type": "call"}, {"api_name": "cv2.INTER_LANCZOS4", "line_number": 124, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 126, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 126, "usage_type": "name"}, {"api_name": "params.Params.image_extension", "line_number": 130, "usage_type": "attribute"}, {"api_name": "params.Params", "line_number": 130, "usage_type": "name"}, {"api_name": "extractor.landmarks_processor.transform_points", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 157, "usage_type": "call"}, {"api_name": "os.path", "line_number": 157, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 158, "usage_type": "call"}, {"api_name": "params.Params.image_extension", "line_number": 159, "usage_type": "attribute"}, {"api_name": "params.Params", "line_number": 159, "usage_type": "name"}, {"api_name": "extractor.utils.mkdir_or_delete_existing_files", "line_number": 163, "usage_type": "call"}, {"api_name": "extractor.utils.mkdir_or_delete_existing_files", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "10692194460", "text": "import torch\n\nimport argparse\n\nfrom const import *\n\n\ndef corpora2idx(sents, ind2idx):\n return [[ind2idx[w] if w in ind2idx else UNK for w in s] for s in sents]\n\nclass Dictionary(object):\n def __init__(self):\n self.word2idx = {\n WORD[X]: X,\n UNK_WORD: X\n }\n self.idx2word = {\n X: WORD[X],\n UNK: UNK_WORD\n }\n self.idx = 2\n\n def add(self, ind):\n if self.word2idx.get(ind) is None:\n self.word2idx[ind] = self.idx\n self.idx2word[self.idx] = ind\n self.idx += 1\n\n def build_idx(self, sents):\n words = [word for sent in sents for word in sent]\n word_count = {w: 0 for w in set(words)}\n for w in words: word_count[w]+=1\n\n for word, count in word_count.items():\n self.add(word)\n\n def __len__(self):\n return self.idx\n\n def __str__(self):\n return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx))\n\nclass Corpus(object):\n def __init__(self, train_src, valid_src, save_data, max_len=32):\n self._train_src = train_src\n self._valid_src = valid_src\n self._save_data = save_data\n self.max_len = max_len\n\n self.sent_dict = Dictionary()\n self.tgt_dict = {\n WORD[X]: X,\n WORD[O]: O,\n WORD[BH]: BH,\n WORD[IH]: IH,\n WORD[BW]: BW,\n WORD[IW]: IW\n }\n\n def parse_train(self):\n src_sents, labels = [], []\n ignore_text = 0\n for sentence in open(self._train_src):\n words, tgts = [], []\n\n objs = sentence.strip().split(\"\\t\")\n if len(objs) > self.max_len:\n ignore_text += 1\n objs = objs[:self.max_len]\n\n for obj in objs:\n word, tgt = obj.strip().split(SPLIT_TAG)\n words += [word]\n tgts += [tgt]\n\n src_sents.append(words)\n labels.append(tgts)\n\n self.sent_dict.build_idx(src_sents)\n\n self.src_sents = src_sents\n self.labels = labels\n print(\"length of text more then {} numbers: {}\".format(self.max_len, ignore_text))\n\n def parse_valid(self):\n src_sents, labels = [], []\n\n for sentence in open(self._valid_src):\n words, tgts = [], []\n\n objs = sentence.strip().split(\"\\t\")\n if len(objs) > self.max_len:\n objs = objs[:self.max_len]\n\n for obj in objs:\n word, tgt = obj.strip().split(SPLIT_TAG)\n words += [word]\n tgts += [tgt]\n\n src_sents.append(words)\n labels.append(tgts)\n\n\n self.valid_src_sents = src_sents\n self.valid_labels = labels\n\n def save(self):\n data = {\n 'trains_score': self.trains_score(),\n 'max_len': self.max_len,\n 'tag_size': len(self.tgt_dict),\n 'dict': {\n 'src': self.sent_dict.word2idx,\n 'vocab_size': len(self.sent_dict),\n 'tgt': self.tgt_dict\n },\n 'train': {\n 'src': corpora2idx(self.src_sents, self.sent_dict.word2idx),\n 'label': corpora2idx(self.labels, self.tgt_dict),\n },\n 'valid': {\n 'src': corpora2idx(self.valid_src_sents, self.sent_dict.word2idx),\n 'label': corpora2idx(self.valid_labels, self.tgt_dict),\n }\n }\n\n torch.save(data, self._save_data)\n print('Finish dumping the corora data to file - [{}]'.format(self._save_data))\n print('words length - [{}]'.format(len(self.sent_dict)))\n\n def trains_score(self):\n A = {\n 'oH':0,\n 'oW':0,\n 'oo':0,\n 'Hh':1.0,\n 'hh':0,\n 'ho':0,\n 'hW':0,\n 'Ww':1.0,\n 'ww':0,\n 'wo':0,\n 'wH':0,\n }\n for label in self.labels:\n for t in range(len(label) - 1):\n key = label[t] + label[t+1]\n assert key in A\n A[key] += 1.0\n\n ts = dict()\n ts['oH'] = A['oH'] / (A['oH'] + A['oW'] + A['oo'])\n ts['oW'] = A['oW'] / (A['oH'] + A['oW'] + A['oo'])\n ts['oo'] = A['oo'] / (A['oH'] + A['oW'] + A['oo'])\n\n ts['hh'] = A['hh'] / (A['hh'] + A['ho'] + A['hW'])\n ts['ho'] = A['ho'] / (A['hh'] + A['ho'] + A['hW'])\n ts['hW'] = A['hW'] / (A['hh'] + A['ho'] + A['hW'])\n\n ts['ww'] = A['ww'] / (A['ww'] + A['wo'] + A['wH'])\n ts['wo'] = A['wo'] / (A['ww'] + A['wo'] + A['wH'])\n ts['wH'] = A['wH'] / (A['ww'] + A['wo'] + A['wH'])\n\n ts[\"Hh\"] = 1.\n ts[\"Ww\"] = 1.\n\n return ts\n\n def process(self):\n self.parse_train()\n self.parse_valid()\n self.save()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='bilstm crf nre')\n parser.add_argument('--train-src', type=str, required=True,\n help='train file')\n parser.add_argument('--save-data', type=str, required=True,\n help='path to save processed data')\n parser.add_argument('--valid-src', type=str, default=None,\n help='valid file')\n parser.add_argument('--max-lenth', type=int, default=32,\n help='max length left of sentence [default: 32]')\n args = parser.parse_args()\n corpus = Corpus(args.train_src, args.valid_src, args.save_data, args.max_lenth)\n corpus.process()\n", "repo_name": "ne7ermore/torch-light", "sub_path": "biLSTM-CRF/corpus.py", "file_name": "corpus.py", "file_ext": "py", "file_size_in_byte": 5547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 526, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.save", "line_number": 127, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "9544298831", "text": "# -*- coding: utf-8 -*-\nimport os\nimport patientalloc.src.Database.DatabaseError as DatabaseError\nimport csv\nimport yaml\nfrom scipy import stats\nimport math\nimport random\nfrom datetime import datetime\n\n\nclass Database:\n def __init__(self):\n self.fileName = \"\"\n self.folder = \"\"\n self.fields = []\n self.fieldTypes = []\n self.entries = []\n self.ttest = []\n self.groups = []\n self.order = []\n self.limitedValues = []\n self.rejectedEntries = []\n random.seed(datetime.now())\n\n def createCopy(self):\n database = Database()\n database.fileName = self.fileName\n database.folder = self.folder\n database.fields = self.fields.copy()\n database.fieldTypes = self.fieldTypes.copy()\n database.ttest = self.ttest.copy()\n database.groups = self.groups.copy()\n database.entries = self.entries.copy()\n database.order = self.order.copy()\n database.limitedValues = self.limitedValues.copy()\n return database\n\n def create(self):\n fullpath = self.folder + \"/\" + self.fileName\n self.createWithFullPath(fullpath)\n\n def createWithFullPath(self, fullpath):\n self.__setFileAndPathFromFullpath__(fullpath)\n if(not os.path.isdir(self.folder)):\n os.mkdir(self.folder)\n self.__checkWritingPath__(fullpath)\n if 'Group' not in self.fields:\n self.addField('Group', 0, 'Hidden')\n with open(fullpath, 'w') as dbInfoFile:\n document = {'databaseFile': self.fileName.replace('db', 'csv'),\n 'order': self.order,\n 'fields': dict(),\n 'groups': self.groups,\n 'rejectedEntries': self.rejectedEntries}\n for field in self.fields:\n document['fields'][field] = dict()\n document['fields'][field]['ttest'] = self.getTtestFromField(\n field)\n document['fields'][field]['type'] = self.getFieldTypeFromField(\n field)\n document['fields'][field]['limitedValues'] = self.getLimitedValuesFromField(\n field)\n yaml.dump(document, dbInfoFile)\n fullpath = self.folder + \"/\" + self.fileName.replace('db', 'csv')\n with open(fullpath, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=self.fields)\n writer.writeheader()\n for entry in self.entries:\n writer.writerow(entry)\n\n def __setFileAndPathFromFullpath__(self, fullpath):\n explodedPath = fullpath.split(\"/\")\n self.fileName = explodedPath[len(explodedPath) - 1]\n explodedPath[len(explodedPath) - 1] = \"\"\n self.folder = \"/\".join(explodedPath)\n\n def __checkWritingPath__(self, fullpath):\n if(self.fileName == \"\"):\n raise DatabaseError.EmptyFileNameError()\n\n def loadWithFullPath(self, fullpath):\n self.__setFileAndPathFromFullpath__(fullpath)\n self.__checkReadingPath__(fullpath)\n with open(fullpath, 'r') as dbFile:\n dbInfo = yaml.safe_load(dbFile)\n for field in dbInfo[\"fields\"]:\n self.fields.append(field)\n self.ttest.append(dbInfo[\"fields\"][field][\"ttest\"])\n self.fieldTypes.append(dbInfo[\"fields\"][field][\"type\"])\n self.limitedValues.append(\n dbInfo[\"fields\"][field][\"limitedValues\"])\n if \"rejectedEntries\" in dbInfo:\n self.rejectedEntries = dbInfo[\"rejectedEntries\"]\n self.groups = dbInfo['groups']\n self.order = dbInfo['order']\n fullpath = self.folder + \"/\" + dbInfo[\"databaseFile\"]\n with open(fullpath, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.addEntryWithGroup(row)\n\n def load(self):\n fullpath = self.folder + \"/\" + self.fileName\n self.loadWithFullPath(fullpath)\n\n def getEntryGroup(self, index):\n if index > len(self.entries):\n raise DatabaseError.EntryOutOfRange(index)\n return self.entries[index][\"Group\"]\n\n def getEntryId(self, index):\n if index > len(self.entries):\n raise DatabaseError.EntryOutOfRange(index)\n return self.entries[index][\"SubjectID\"]\n\n def getTtestFromField(self, field):\n return self.ttest[self.fields.index(field)]\n\n def getFieldTypeFromField(self, field):\n return self.fieldTypes[self.fields.index(field)]\n\n def getLimitedValuesFromField(self, field):\n return self.limitedValues[self.fields.index(field)]\n\n def __checkReadingPath__(self, fullpath):\n if(self.fileName == \"\"):\n raise DatabaseError.EmptyFileNameError()\n if(not os.path.exists(fullpath)):\n raise DatabaseError.FileNotExistError(fullpath)\n\n def destroy(self):\n fullpath = self.folder + \"/\" + self.fileName\n if(not os.path.exists(fullpath)):\n raise DatabaseError.FileNotExistError(fullpath)\n os.remove(fullpath)\n fullpath = self.folder + \"/\" + self.fileName.replace('db', 'csv')\n os.remove(fullpath)\n\n def addField(self, field, ttest, fieldType, limitedValues=''):\n if(field == \"\"):\n raise DatabaseError.EmptyFieldError()\n self.fields.append(field)\n self.ttest.append(int(ttest))\n self.fieldTypes.append(fieldType)\n self.limitedValues.append(limitedValues)\n self.order.append(field)\n\n def addFields(self, fields, ttests, fieldTypes):\n fieldIndex = 0\n for field in fields:\n self.addField(field, ttests[fieldIndex], fieldTypes[fieldIndex])\n fieldIndex += 1\n\n def addEntryWithGroup(self, entry):\n for field in entry.keys():\n if(field not in self.fields):\n print(field)\n raise DatabaseError.EntryWithUnknownFields\n self.entries.append(entry)\n\n def getPValue(self, field):\n if(field not in self.fields):\n print(field)\n raise DatabaseError.EntryWithUnknownFields\n indexField = self.fields.index(field)\n if self.ttest[indexField] is 0:\n raise DatabaseError.CannotComputeTTestOnField(field)\n groups = ({self.groups[0]: [], self.groups[1]: []})\n pvalue = 0\n entryNumber = 0\n for entry in self.entries:\n if entryNumber + 1 not in self.rejectedEntries:\n if self.getFieldTypeFromField(field) == \"List\":\n groups[entry[\"Group\"]].append(\n self.getLimitedValuesFromField(field).index(entry[field]))\n elif self.getFieldTypeFromField(field) == \"Number\":\n groups[entry[\"Group\"]].append(int(float(entry[field])))\n entryNumber = entryNumber + 1\n if self.getFieldTypeFromField(field) == \"List\":\n obs = [groups[self.groups[0]].count(\n 0), groups[self.groups[0]].count(1)]\n obs2 = [groups[self.groups[1]].count(\n 0), groups[self.groups[1]].count(1)]\n _, pvalue = stats.chisquare(obs, obs2)\n elif self.getFieldTypeFromField(field) == \"Number\":\n _, pvalue = stats.ttest_ind(\n groups[self.groups[0]], groups[self.groups[1]], equal_var=False)\n return pvalue\n\n def getGroupsProbabilitiesFromNewEntry(self, newEntry):\n groupCounter = {self.groups[0]: 0, self.groups[1]: 1}\n for entry in self.entries:\n for group in self.groups:\n if entry['Group'] == group:\n groupCounter[group] = groupCounter[group] + 1\n if abs(groupCounter[self.groups[0]] - groupCounter[self.groups[1]]) >= 4:\n if groupCounter[self.groups[0]] - groupCounter[self.groups[1]] >= 0:\n probas = {self.groups[0]: 0, self.groups[1]: 1}\n else:\n probas = {self.groups[0]: 1, self.groups[1]: 0}\n return probas\n pvalues = dict()\n productsPValues = dict()\n for group in self.groups:\n database = self.createCopy()\n newEntryGroup = dict(newEntry)\n newEntryGroup[\"Group\"] = group\n database.addEntryWithGroup(newEntryGroup)\n minPvalue = 1\n productPValue = 1\n for field in database.fields:\n try:\n pvalue = database.getPValue(field)\n if math.isnan(pvalue):\n pvalue = 1\n if pvalue < minPvalue:\n minPvalue = pvalue\n productPValue *= pvalue\n except DatabaseError.CannotComputeTTestOnField:\n pass\n pvalues[group] = minPvalue\n productsPValues[group] = productPValue\n probas = dict()\n if pvalues[self.groups[0]] == 0 and pvalues[self.groups[1]] == 0 and productsPValues[self.groups[0]] == 0 and productsPValues[self.groups[1]] == 0:\n probas[self.groups[0]] = 0.5\n probas[self.groups[1]] = 0.5\n elif pvalues[self.groups[0]] == pvalues[self.groups[1]]:\n probas[self.groups[0]] = productsPValues[self.groups[0]] / (productsPValues[self.groups[0]] +\n productsPValues[self.groups[1]])\n probas[self.groups[1]] = productsPValues[self.groups[1]] / (productsPValues[self.groups[0]] +\n productsPValues[self.groups[1]])\n else:\n probas[self.groups[0]] = pvalues[self.groups[0]] / (pvalues[self.groups[0]] +\n pvalues[self.groups[1]])\n probas[self.groups[1]] = pvalues[self.groups[1]] / (pvalues[self.groups[0]] +\n pvalues[self.groups[1]])\n return probas\n\n def getGroupFromNewEntry(self, newEntry):\n probas = self.getGroupsProbabilitiesFromNewEntry(newEntry)\n proba = random.random()\n if proba < probas[self.groups[0]]:\n return self.groups[0]\n return self.groups[1]\n\n def rejectEntry(self, index):\n self.rejectedEntries.append(index)\n\n def unrejectEntry(self, index):\n self.rejectedEntries.remove(index)\n", "repo_name": "Ruijan/PatientAllocationSinergia", "sub_path": "build/lib.linux-x86_64-2.7/patientalloc/src/Database/Database.py", "file_name": "Database.py", "file_ext": "py", "file_size_in_byte": 10475, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.seed", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.isdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 46, "usage_type": "call"}, {"api_name": "yaml.dump", "line_number": 64, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 67, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError.EmptyFileNameError", "line_number": 80, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 80, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 86, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 99, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError.EntryOutOfRange", "line_number": 109, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 109, "usage_type": "name"}, {"api_name": "patientalloc.src.Database.DatabaseError.EntryOutOfRange", "line_number": 114, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 114, "usage_type": "name"}, {"api_name": "patientalloc.src.Database.DatabaseError.EmptyFileNameError", "line_number": 128, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 128, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "patientalloc.src.Database.DatabaseError.FileNotExistError", "line_number": 130, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 130, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "patientalloc.src.Database.DatabaseError.FileNotExistError", "line_number": 135, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 135, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 136, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 138, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError.EmptyFieldError", "line_number": 142, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 142, "usage_type": "name"}, {"api_name": "patientalloc.src.Database.DatabaseError.EntryWithUnknownFields", "line_number": 159, "usage_type": "attribute"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 159, "usage_type": "name"}, {"api_name": "patientalloc.src.Database.DatabaseError.EntryWithUnknownFields", "line_number": 165, "usage_type": "attribute"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 165, "usage_type": "name"}, {"api_name": "patientalloc.src.Database.DatabaseError.CannotComputeTTestOnField", "line_number": 168, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 168, "usage_type": "name"}, {"api_name": "scipy.stats.chisquare", "line_number": 185, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 185, "usage_type": "name"}, {"api_name": "scipy.stats.ttest_ind", "line_number": 187, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 187, "usage_type": "name"}, {"api_name": "math.isnan", "line_number": 215, "usage_type": "call"}, {"api_name": "patientalloc.src.Database.DatabaseError.CannotComputeTTestOnField", "line_number": 220, "usage_type": "attribute"}, {"api_name": "patientalloc.src.Database.DatabaseError", "line_number": 220, "usage_type": "name"}, {"api_name": "random.random", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "72382573286", "text": "# -*- coding: utf-8 -*-\nimport pickle\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom gwas.compression.pipe import (\n CompressedBytesReader,\n CompressedBytesWriter,\n CompressedTextReader,\n CompressedTextWriter,\n)\n\n\n@pytest.mark.parametrize(\"compression\", [\"zst\", \"xz\", \"gz\", \"bz2\", \"lz4\"])\ndef test_compressed_text(tmp_path: Path, compression: str):\n x: str = \"test\" * 1000\n\n test_path = tmp_path / f\"test.txt.{compression}\"\n with CompressedTextWriter(test_path) as file_handle:\n file_handle.write(f\"{x}\\n\")\n with CompressedTextReader(test_path) as file_handle:\n assert file_handle.read().strip() == x\n\n\n@pytest.mark.parametrize(\"compression\", [\"zst\", \"xz\", \"gz\", \"bz2\", \"lz4\"])\ndef test_compressed_bytes(tmp_path: Path, compression: str):\n x = np.random.rand(1000, 1000)\n\n test_path = tmp_path / f\"test.txt.{compression}\"\n with CompressedBytesWriter(test_path) as file_handle:\n pickle.dump(x, file_handle)\n with CompressedBytesReader(test_path) as file_handle:\n assert np.allclose(pickle.load(file_handle), x)\n", "repo_name": "HippocampusGirl/GWASProtocol", "sub_path": "src/gwas/tests/compression/test_pipe.py", "file_name": "test_pipe.py", "file_ext": "py", "file_size_in_byte": 1092, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 17, "usage_type": "name"}, {"api_name": "gwas.compression.pipe.CompressedTextWriter", "line_number": 21, "usage_type": "call"}, {"api_name": "gwas.compression.pipe.CompressedTextReader", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 28, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gwas.compression.pipe.CompressedBytesWriter", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 33, "usage_type": "call"}, {"api_name": "gwas.compression.pipe.CompressedBytesReader", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 35, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 27, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 27, "usage_type": "attribute"}]} +{"seq_id": "20188209281", "text": "\nimport joblib\nfrom xgboost import XGBClassifier\nimport pandas as pd\n\nclass XGBoostPredictor():\n def __init__(self):\n self.model = joblib.load(\"XGBoost_model_20190601.m\")\n \n def get_predict(self, param_list):\n cols = [\n '01.sui_history', '02.had_sui_message',\n '03.disease', '04.sub_abuse', '05.Gender_disorder',\n '06.mtl_illness', '07.fam_sui_history', '08.emo_imbalance',\n '09.rej_service', '10.relationship issue', '11.fam_problem',\n '12.inter_problem', '13.social_support', '14.maladaptive',\n '15.wk_pressure', '16.mng_problem', '17.setbacks',\n '18.punish&jud_case', '19.eco_burden', '20.debt'\n ]\n\n df = pd.DataFrame([list(param_list)],columns = cols)\n df.astype(int)\n print(df.dtypes)\n print(df)\n result = self.model.predict(df)\n return result[0]", "repo_name": "ncturoger/psychological-conditions-prediction-system", "sub_path": "predictors/XGBoostPredictor.py", "file_name": "XGBoostPredictor.py", "file_ext": "py", "file_size_in_byte": 872, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "joblib.load", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "23653703999", "text": "#!/usr/bin/env python3\n\"\"\"\nGeneral-purpose stuff is defined here.\n\"\"\"\nimport re\nimport signal\nimport sys\nimport pdb\nimport traceback\nfrom source import log\nfrom dateutil.parser import parse as dateutil_parse\nfrom tzlocal import get_localzone\nfrom datetime import datetime\nfrom collections import OrderedDict\n\ndef positive(x):\n if type(x) == str:\n x = x.lower()\n if x in ['yes', 'y', '+', '1', 1, 'true', 't', True]:\n return True\n return False\n\ndef negative(x):\n if type(x) == str:\n x = x.lower()\n if x in ['no', 'n', '-', '0', 0, 'false', 'f', False]:\n return True\n return False\n\ndef quit_string(x):\n if type(x) != str:\n return False\n x = x.lower()\n if x in ['quit', 'exit', 'q', 'end', ':wq']:\n return True\n return False\n\ndef exit_program(signal, frame):\n \"\"\"immediate termination due to -h, bad parameter or bind() fail\"\"\"\n if signal == -1: \n sys.exit(0)\n\n log.newline() # newline\n #log.info('Killing all the threads...')\n sys.exit(0 if signal is None else 1)\n\n# run exit program on SIGINT\nsignal.signal(signal.SIGINT, exit_program)\n\ndef chunks(data, size):\n for i in range(0, len(data), size):\n yield data[i:i+size]\n\ndef normalize_datetime(time=None, silent=False):\n if not time:\n return datetime.now()\n if type(time) == str:\n variant = 0\n while True:\n try:\n return dateutil_parse(time).replace(tzinfo=None)\n except ValueError:\n variant += 1\n if variant == 1: # apache-access format\n time = time.replace(':', ' ', 1)\n else:\n if not silent:\n log.err(' Cannot unify datetime:', time)\n return None\n elif type(time) == datetime:\n return time.replace(tzinfo=None)\n elif type(time) in (int, float):\n # probably UNIX timestamp\n return datetime.fromtimestamp(time)\n else:\n if not silent:\n log.err(' Unknown time type:', time, type(time))\n return None\n\ndef natural_sort(data, key=lambda x: x):\n return sorted(data, key=lambda x: [int(s) if s.isdigit() else s \n for s in re.split(r'(\\d+)', str(key(x)))])\n\ndef find_between(data, startbytes, endbytes, startpos=0, endpos=0, inner=False):\n \"\"\"\n This function goes through data[startpos:endpos] and locates \n substrings 'startbytes.*endbytes'.\n \n inner specifies whether startbytes and endbytes should be \n included in match_string.\n\n Returns:\n list of (absolute_position, match_string)\n \"\"\"\n if endpos == 0:\n endpos = len(data)\n result = []\n while True:\n try:\n \"\"\"set up start, find end from it\"\"\"\n offset = data.index(startbytes, startpos)\n start = offset+(len(startbytes) if inner else 0)\n end = data.index(endbytes, start)+(0 if inner else len(endbytes))\n if end>endpos:\n \"\"\"stop if outside the scope\"\"\"\n break\n result.append((offset, data[start:end]))\n \"\"\"prepare for next search\"\"\"\n startpos = end\n except ValueError: # out of bounds (no more matches)?\n break\n return result\n\n\ndef split_escaped(string, delimiter):\n if len(delimiter) != 1:\n raise ValueError('Invalid delimiter: ' + delimiter)\n ln = len(string)\n i = 0\n j = 0\n while j < ln:\n if string[j] == '\\\\':\n if j + 1 >= ln:\n yield string[i:j].replace('\\\\', '')\n return\n j += 1\n elif string[j] == delimiter:\n yield string[i:j].replace('\\\\', '')\n i = j + 1\n j += 1\n yield string[i:j].replace('\\\\', '')\n\nchunks = lambda data,size: [data[x:x+size] for x in range(0, len(data), size)]\n\ndef get_colored_printable(b):\n \"\"\"\n\n \"\"\"\n color = log.COLOR_BROWN\n if b in (0x9, 0xa, 0xd):\n color = log.COLOR_DARK_GREEN\n b = ord('.')\n elif b<0x20 or b>=0x7f:\n color = log.COLOR_NONE\n b = ord('.')\n return color+chr(b)+log.COLOR_NONE\n\ndef get_colored_printable_hex(b):\n \"\"\"\n\n \"\"\"\n color = log.COLOR_NONE\n if b>=0x20 and b<0x7f:\n color = log.COLOR_BROWN\n elif b in (0x9, 0xa, 0xd):\n color = log.COLOR_DARK_GREEN\n return color + '%02x' % b + log.COLOR_NONE\n\ndef hexdump(data):\n \"\"\"\n Prints data as with `hexdump -C` command.\n \"\"\"\n result = []\n line_count = 0\n for chunk in chunks(data, 16):\n hexa = ' '.join(''.join(get_colored_printable_hex(b) for b in byte) \n for byte in [chunk[start:start+2] \n for start in range(0, 16, 2)])\n \n \"\"\"add none with coloring - for layout\"\"\"\n if len(hexa)<199:\n hexa += (log.COLOR_NONE+' '+log.COLOR_NONE)*(16-len(chunk))\n\n result.append(log.COLOR_DARK_GREEN \n + '%08x' % (line_count*16) \n + log.COLOR_NONE \n + ' %-160s' % (hexa) \n + ' |' \n + ''.join(get_colored_printable(b) for b in chunk) + '|')\n line_count += 1\n return result\n\n\ndef merge_dicts(target, addition):\n # recursive merge of dicts\n #pdb.set_trace()\n for ak, av in addition.items():\n av_type = type(av)\n # create empty if new\n if av_type in (list, dict, OrderedDict):\n if ak not in target.keys():\n target[ak] = av_type()\n # append if list, recurse if dict\n if av_type == list:\n target[ak] += list(filter(None, av))\n else:\n merge_dicts(target[ak], av)\n else:\n target[ak] = av\n\n\ndef run_command(command):\n \"\"\"\n Run command in shell.\n\n Args:\n command (str) - command to execute\n\n Returns:\n return code\n standard output\n standard error output\n \"\"\"\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (out, err) = p.communicate()\n return (p.returncode, out, err)\n\ndef diff_lines(lines_1, lines_2, form='D'):\n \"\"\"\n Diffs 2 sets of lines. \n\n Args:\n lines_1 (list of str): first sample\n lines_2 (list of str): second sample\n form (str): diff form to perform\n D - full diff (default)\n 1 - only lines unique to first set\n 2 - only lines unique to second set\n c - only common lines\n d - only different lines\n \"\"\"\n lines = [line for line in difflib.Differ().compare(lines_1, lines_2)\n if not line.startswith('?')]\n \"\"\"alert with respect to form\"\"\"\n if form == '1':\n lines = [line[2:] for line in lines if line.startswith('-')]\n elif form == '2':\n lines = [line[2:] for line in lines if line.startswith('+')]\n elif form == 'c':\n lines = [line[2:] for line in lines \n if not line.startswith(('-', '+'))]\n elif form == 'd':\n lines = [line for line in lines \n if line.startswith(('-', '+'))]\n return lines\t\n\ndef rreplace(string, old, new, count=None):\n return string[::-1].replace(old, new, count)[::-1]\n\n\nseverity_colors = {\n 'critical': log.COLOR_RED,\n 'warning': log.COLOR_BROWN,\n 'notice': log.COLOR_YELLOW,\n 'info': log.COLOR_DARK_GREEN,\n 'none': log.COLOR_GREY,\n 'UNKNOWN': log.COLOR_DARK_GREY,\n}\n\ndb = None # created in main file\n# --\n\n", "repo_name": "lightfaith/warlock", "sub_path": "source/lib.py", "file_name": "lib.py", "file_ext": "py", "file_size_in_byte": 7649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.exit", "line_number": 41, "usage_type": "call"}, {"api_name": "source.log.newline", "line_number": 43, "usage_type": "call"}, {"api_name": "source.log", "line_number": 43, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 45, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 48, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 61, "usage_type": "call"}, {"api_name": "source.log.err", "line_number": 68, "usage_type": "call"}, {"api_name": "source.log", "line_number": 68, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "name"}, {"api_name": "source.log.err", "line_number": 77, "usage_type": "call"}, {"api_name": "source.log", "line_number": 77, "usage_type": "name"}, {"api_name": "re.split", "line_number": 82, "usage_type": "call"}, {"api_name": "source.log.COLOR_BROWN", "line_number": 139, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 139, "usage_type": "name"}, {"api_name": "source.log.COLOR_DARK_GREEN", "line_number": 141, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 141, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 144, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 144, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 146, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 146, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 152, "usage_type": "name"}, {"api_name": "source.log.COLOR_BROWN", "line_number": 154, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 154, "usage_type": "name"}, {"api_name": "source.log.COLOR_DARK_GREEN", "line_number": 156, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 156, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 157, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 157, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 172, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 172, "usage_type": "name"}, {"api_name": "source.log.COLOR_DARK_GREEN", "line_number": 174, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 174, "usage_type": "name"}, {"api_name": "source.log.COLOR_NONE", "line_number": 176, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 176, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 190, "usage_type": "name"}, {"api_name": "source.log.COLOR_RED", "line_number": 253, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 253, "usage_type": "name"}, {"api_name": "source.log.COLOR_BROWN", "line_number": 254, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 254, "usage_type": "name"}, {"api_name": "source.log.COLOR_YELLOW", "line_number": 255, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 255, "usage_type": "name"}, {"api_name": "source.log.COLOR_DARK_GREEN", "line_number": 256, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 256, "usage_type": "name"}, {"api_name": "source.log.COLOR_GREY", "line_number": 257, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 257, "usage_type": "name"}, {"api_name": "source.log.COLOR_DARK_GREY", "line_number": 258, "usage_type": "attribute"}, {"api_name": "source.log", "line_number": 258, "usage_type": "name"}]} +{"seq_id": "25580749883", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 23 21:41:59 2022\n\n@author: yuriy\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nimport sys\n\ndef show_img(img, bw = False):\n fig = plt.figure(figsize=(15, 15))\n ax = fig.gca()\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n if bw:\n ax.imshow(img, cmap='Greys_r')\n else:\n ax.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n plt.show()\n \n#Metoda dla wszyszukiwania matchow sposrod zdjecia i wideo\n# za pomoca Brute-Force \ndef BruteForce(img1, video):\n \n #Prszypisanie wideo do parametrow captured i img2\n captured, img2 = video.read()\n \n #Wyswietlanie zdjecia ktore bylo wziete do porownania\n show_img(img1)\n \n #orb - stworzenie metody dla wyszukiwania lokalnych cech w obrazach \n # korzystalem z orb\n orb = cv2.ORB_create()\n \n #while ktory bedzie pracowal poki sa klatki wideo\n while captured:\n #try dla tego zeby zlapac exception kiedy wideo konczy sie\n try:\n captured, img2 = video.read()\n \n #Resize klatki wideo dla łatwiejszej pracy uzytkownika\n # i dla szybszej pracy programu\n img2 = cv2.resize(img2,(600,600))\n \n #kolorowanie zdjęć na szary\n grey1 = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n grey2 = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)\n \n #Obliczanie deskryptorow dla zdjec\n kp1, des1 = orb.detectAndCompute(grey1, None)\n kp2, des2 = orb.detectAndCompute(grey2, None)\n \n #Stworzenie objektu BFMatcher\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n \n #Dopasowywanie deskryptorow\n matches = bf.match(des1,des2)\n #Sortowanie ich według odległości.\n matches = sorted(matches, key = lambda x:x.distance)\n \n #Rysujemy matchi\n img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:5], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n \n #Wyswietlanie zdjecia\n show_img(img3, True)\n except Exception:\n print(\"The End\")\n \n \n \ndef main(args): \n video = cv2.VideoCapture(args.wideo) #zapisywania wideo\n img1 = cv2.imread(args.img) #zapisywania zdjecia\n \n #sprawdzanie czy nie sa pustymi img1 i video\n #jezeli jakis z elementow jest pusty - zamyka program\n if img1 is None:\n print(\"img1 is None\")\n sys.exit()\n if video is None:\n print(\"img2 is None\")\n sys.exit()\n \n #Resize zdjecia dla łatwiejszej pracy uzytkownika\n # i dla szybszej pracy programu \n img1 = cv2.resize(img1,(600,600))\n \n #przypisywanie zdjecia i wideo do metody\n BruteForce(img1,video)\n \n#Metoda do pobrania wideo i zdjecia\n#-i --img1 zdjecie\n#-w --wideo wideo \ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i','--img',default=0)\n parser.add_argument('-w','--wideo',default=0)\n return parser.parse_args()\n\nif __name__ == '__main__':\n main(parse_arguments())", "repo_name": "YuriiKykot/Wizja-maszynowa", "sub_path": "Projekt_3/Brute-Force_withoutSquare.py", "file_name": "Brute-Force_withoutSquare.py", "file_ext": "py", "file_size_in_byte": 3196, "program_lang": "python", "lang": "pl", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.figure", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "cv2.ORB_create", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.BFMatcher", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.NORM_HAMMING", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.drawMatches", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 77, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 90, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "36538715231", "text": "'''\r\nCreated on 10/04/2014\r\n\r\n@author: Bin Liang\r\n'''\r\nfrom scikits.talkbox.features import mfcc\r\nfrom scipy.io import wavfile\r\nfrom hmmlearn import hmm\r\nimport numpy as np\r\n\r\nclass Speech:\r\n ''' speech class '''\r\n \r\n def __init__(self, dirName, fileName):\r\n self.fileName = fileName # file name\r\n self.features = None # feature matrix\r\n self.soundSamplerate, self.sound = wavfile.read(dirName + fileName)\r\n \r\n # category assignment\r\n idx1 = self.fileName.find('_')\r\n idx2 = self.fileName.find('.')\r\n self.categoryId = fileName[idx1 + 1 : idx2] # speech category\r\n \r\n \r\n def extractFeature(self):\r\n ''' mfcc feature extraction '''\r\n self.features = mfcc(self.sound, nwin=int(self.soundSamplerate * 0.03), fs=self.soundSamplerate, nceps=13)[0]\r\n\r\nclass SpeechRecognizer:\r\n ''' class for speech recognizer '''\r\n \r\n def __init__(self, categoryId):\r\n self.categoryId = categoryId\r\n self.trainData = []\r\n self.hmmModel = None\r\n \r\n self.nComp = 5 # number of states\r\n self.nMix = 2 # number of mixtures\r\n self.covarianceType = 'diag' # covariance type\r\n self.n_iter = 10 # number of iterations\r\n self.startprobPrior = None\r\n self.transmatPrior = None\r\n self.bakisLevel = 2\r\n \r\n def initModelParam(self, nComp, nMix, covarianceType, n_iter, bakisLevel):\r\n ''' init params for hmm model '''\r\n \r\n self.nComp = nComp # number of states\r\n self.nMix = nMix # number of mixtures\r\n self.covarianceType = covarianceType # covariance type\r\n self.n_iter = n_iter # number of iterations\r\n self.bakisLevel = bakisLevel\r\n \r\n startprobPrior, transmatPrior = self.initByBakis(nComp, bakisLevel)\r\n self.startprobPrior = startprobPrior\r\n self.transmatPrior = transmatPrior\r\n \r\n def initByBakis(self, nComp, bakisLevel):\r\n ''' init start_prob and transmat_prob by Bakis model ''' \r\n startprobPrior = np.zeros(nComp)\r\n startprobPrior[0 : bakisLevel - 1] = 1./ (bakisLevel - 1)\r\n \r\n transmatPrior = self.getTransmatPrior(nComp, bakisLevel)\r\n \r\n return startprobPrior, transmatPrior\r\n \r\n def getTransmatPrior(self, nComp, bakisLevel):\r\n ''' get transmat prior '''\r\n transmatPrior = (1. / bakisLevel) * np.eye(nComp)\r\n \r\n for i in range(nComp - (bakisLevel - 1)):\r\n for j in range(bakisLevel - 1):\r\n transmatPrior[i, i + j + 1] = 1. / bakisLevel\r\n \r\n for i in range(nComp - bakisLevel + 1, nComp):\r\n for j in range(nComp - i -j):\r\n transmatPrior[i, i + j] = 1. / (nComp - i)\r\n \r\n return transmatPrior\r\n \r\n def getHmmModel(self):\r\n ''' get hmm model from training data '''\r\n\r\n # GaussianHMM\r\n# model = hmm.GaussianHMM(numStates, \"diag\") # initialize hmm model\r\n\r\n # Gaussian Mixture HMM\r\n model = hmm.GMMHMM(n_components = self.nComp, n_mix = self.nMix, \\\r\n transmat_prior = self.transmatPrior, startprob_prior = self.startprobPrior, \\\r\n covariance_type = self.covarianceType, n_iter = self.n_iter)\r\n model.fit(self.trainData) # get optimal parameters\r\n\r\n self.hmmModel = model\r\n \r\n \r\n", "repo_name": "drbinliang/Speech_Recognition", "sub_path": "src/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 3454, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 59, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.io.wavfile.read", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 17, "usage_type": "name"}, {"api_name": "scikits.talkbox.features.mfcc", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 69, "usage_type": "call"}, {"api_name": "hmmlearn.hmm.GMMHMM", "line_number": 88, "usage_type": "call"}, {"api_name": "hmmlearn.hmm", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "32606029204", "text": "\"\"\"\nModule contains list of functions that should be useful for validation.\nExample of use (how things play together):\n >>> option_dict = {\"some_option\": \"A\"}\n >>> validators = [\n ... is_required(\"name\"),\n ... value_in(\"some_option\", [\"B\", \"C\"])\n ... ]\n >>> report_list = run_collection_of_option_validators(\n ... option_dict,\n ... validators\n ... )\n >>> for report in report_list:\n ... print(report)\n ...\n ...\n ERROR REQUIRED_OPTION_IS_MISSING: {\n 'option_type': 'option',\n 'option_names': ['name']\n }\n ERROR INVALID_OPTION_VALUE: {\n 'option_name': 'some_option',\n 'option_value': 'A',\n 'allowed_values': ['B', 'C']\n }\n\nSometimes we need to validate the normalized value but in report we need the\noriginal value. For this purposes is ValuePair and helpers like values_to_pairs\nand pairs_to_values.\n\nTODO provide parameters to provide forceable error/warning for functions that\n does not support it\n\"\"\"\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n)\n\nfrom collections import namedtuple\nimport re\n\nfrom pcs.common.tools import is_string\nfrom pcs.lib import reports\nfrom pcs.lib.pacemaker.values import (\n timeout_to_seconds,\n validate_id,\n)\n\n\n### normalization\n\nclass ValuePair(namedtuple(\"ValuePair\", \"original normalized\")):\n \"\"\"\n Storage for the original value and its normalized form\n \"\"\"\n\n @staticmethod\n def get(val):\n return val if isinstance(val, ValuePair) else ValuePair(val, val)\n\ndef values_to_pairs(option_dict, normalize):\n \"\"\"\n Return a dict derived from option_dict where every value is instance of\n ValuePair.\n\n dict option_dict contains values that should be paired with the normalized\n form\n callable normalize should take key and value and return normalized form.\n Function option_value_normalization can be good base for create such\n callable.\n \"\"\"\n option_dict_with_pairs = {}\n for key, value in option_dict.items():\n if not isinstance(value, ValuePair):\n value = ValuePair(\n original=value,\n normalized=normalize(key, value),\n )\n option_dict_with_pairs[key] = value\n return option_dict_with_pairs\n\ndef pairs_to_values(option_dict):\n \"\"\"\n Take a dict which has OptionValuePairs as its values and return dict with\n normalized forms as its values. It is reverse function to\n values_to_pairs.\n\n dict option_dict contains OptionValuePairs as its values\n \"\"\"\n raw_option_dict = {}\n for key, value in option_dict.items():\n if isinstance(value, ValuePair):\n value = value.normalized\n raw_option_dict[key] = value\n return raw_option_dict\n\ndef option_value_normalization(normalization_map):\n \"\"\"\n Return function that takes key and value and return the normalized form.\n\n dict normalization_map has on each key function that takes value and return\n its normalized form.\n \"\"\"\n def normalize(key, value):\n return(\n value if key not in normalization_map\n else normalization_map[key](value)\n )\n return normalize\n\n### keys validators\n\ndef depends_on_option(\n option_name, prerequisite_option, option_type=\"\", prerequisite_type=\"\"\n):\n \"\"\"\n Get a validator reporting REQUIRED_OPTION_IS_MISSING when the option_dict\n does not contain the prerequisite_option and contains the option_name.\n\n string option_name -- name of the option to check\n string prerequisite_option -- name of the option which is a prerequisite\n string option_type -- describes a type of the option for reporting purposes\n \"\"\"\n def validate(option_dict):\n if (\n option_name in option_dict\n and\n prerequisite_option not in option_dict\n ):\n return [reports.prerequisite_option_is_missing(\n option_name,\n prerequisite_option,\n option_type,\n prerequisite_type\n )]\n return []\n return validate\n\ndef is_required(option_name, option_type=\"\"):\n \"\"\"\n Return a the function that takes option_dict and returns report list\n (with REQUIRED_OPTION_IS_MISSING when option_dict does not contain\n option_name).\n\n string option_name is name of option of option_dict that will be tested\n string option_type describes type of option for reporting purposes\n \"\"\"\n def validate(option_dict):\n if option_name not in option_dict:\n return [reports.required_option_is_missing(\n [option_name],\n option_type,\n )]\n return []\n return validate\n\ndef is_required_some_of(option_name_list, option_type=\"\"):\n \"\"\"\n Get a validator reporting REQUIRED_OPTION_IS_MISSING report when the\n option_dict does not contain at least one item from the option_name_list.\n\n iterable option_name_list -- names of options of the option_dict to test\n string option_type -- describes a type of the option for reporting purposes\n \"\"\"\n def validate(option_dict):\n found_names = set.intersection(\n set(option_dict.keys()),\n set(option_name_list)\n )\n if len(found_names) < 1:\n return [reports.required_option_of_alternatives_is_missing(\n sorted(option_name_list),\n option_type,\n )]\n return []\n return validate\n\ndef mutually_exclusive(mutually_exclusive_names, option_type=\"option\"):\n \"\"\"\n Return a list with report MUTUALLY_EXCLUSIVE_OPTIONS when in option_dict\n appears more than one of mutually_exclusive_names.\n\n list|set mutually_exclusive_names contains option names that cannot appear\n together\n string option_type describes type of option for reporting purposes\n \"\"\"\n def validate(option_dict):\n found_names = set.intersection(\n set(option_dict.keys()),\n set(mutually_exclusive_names)\n )\n if len(found_names) > 1:\n return [reports.mutually_exclusive_options(\n sorted(found_names),\n option_type,\n )]\n return []\n return validate\n\ndef names_in(\n allowed_name_list, name_list, option_type=\"option\",\n code_to_allow_extra_names=None, allow_extra_names=False\n):\n \"\"\"\n Return a list with report INVALID_OPTION when in name_list is a name that is\n not in allowed_name_list.\n\n list allowed_name_list contains names which are valid\n list name_list contains names for validation\n string option_type describes type of option for reporting purposes\n string code_to_allow_extra_names is code for forcing invalid names. If it is\n empty report INVALID_OPTION is non-forceable error. If it is not empty\n report INVALID_OPTION is forceable error or warning.\n bool allow_extra_names is flag that complements code_to_allow_extra_names\n and determines wheter is report INVALID_OPTION forceable error or\n warning.\n \"\"\"\n invalid_names = set(name_list) - set(allowed_name_list)\n if not invalid_names:\n return []\n\n create_report = reports.get_problem_creator(\n code_to_allow_extra_names,\n allow_extra_names\n )\n return [create_report(\n reports.invalid_option,\n sorted(invalid_names),\n sorted(allowed_name_list),\n option_type,\n )]\n\n### values validators\n\ndef value_cond(\n option_name, predicate, value_type_or_enum, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Return a validation function that takes option_dict and returns report list\n (with INVALID_OPTION_VALUE when option_name is not in allowed_values).\n\n string option_name is name of option of option_dict that will be tested\n function predicate takes one parameter, normalized value\n list or string value_type_or_enum list of possible values or string\n description of value type\n string option_name_for_report is substitued by option name if is None\n string code_to_allow_extra_values is code for forcing invalid names. If it\n is empty report INVALID_OPTION is non-forceable error. If it is not\n empty report INVALID_OPTION is forceable error or warning.\n bool allow_extra_values is flag that complements code_to_allow_extra_values\n and determines wheter is report INVALID_OPTION forceable error or\n warning.\n \"\"\"\n @_if_option_exists(option_name)\n def validate(option_dict):\n value = ValuePair.get(option_dict[option_name])\n\n if not predicate(value.normalized):\n create_report = reports.get_problem_creator(\n code_to_allow_extra_values,\n allow_extra_values\n )\n return [create_report(\n reports.invalid_option_value,\n option_name_for_report if option_name_for_report is not None\n else option_name\n ,\n value.original,\n value_type_or_enum,\n )]\n\n return []\n return validate\n\ndef value_empty_or_valid(option_name, validator):\n \"\"\"\n Get a validator running the specified validator if the value is not empty\n\n string option_name -- name of the option to check\n function validator -- validator to run when the value is not an empty string\n \"\"\"\n @_if_option_exists(option_name)\n def validate(option_dict):\n value = ValuePair.get(option_dict[option_name])\n return (\n [] if is_empty_string(value.normalized)\n else validator(option_dict)\n )\n return validate\n\ndef value_id(option_name, option_name_for_report=None, id_provider=None):\n \"\"\"\n Get a validator reporting ID errors and optionally booking IDs along the way\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n IdProvider id_provider -- used to check id uniqueness if set\n \"\"\"\n @_if_option_exists(option_name)\n def validate(option_dict):\n value = ValuePair.get(option_dict[option_name])\n report_list = []\n validate_id(value.normalized, option_name_for_report, report_list)\n if id_provider is not None and not report_list:\n report_list.extend(\n id_provider.book_ids(value.normalized)\n )\n return report_list\n return validate\n\ndef value_in(\n option_name, allowed_values, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Special case of value_cond function.returned function checks whenever value\n is included allowed_values. If not list of ReportItem will be returned.\n\n option_name -- string, name of option to check\n allowed_values -- list of strings, list of possible values\n option_name_for_report -- string, it is substitued by option name if is None\n code_to_allow_extra_values -- string, code for forcing invalid names. If it\n is empty report INVALID_OPTION is non-forceable error. If it is not\n empty report INVALID_OPTION is forceable error or warning.\n allow_extra_values -- bool, flag that complements code_to_allow_extra_values\n and determines wheter is report INVALID_OPTION forceable error or\n warning.\n \"\"\"\n return value_cond(\n option_name,\n lambda normalized_value: normalized_value in allowed_values,\n allowed_values,\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_nonnegative_integer(\n option_name, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Get a validator reporting INVALID_OPTION_VALUE when the value is not\n an integer greater than -1\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n string code_to_allow_extra_values -- create a report forceable by this code\n bool allow_extra_values -- create a warning instead of an error if True\n \"\"\"\n return value_cond(\n option_name,\n lambda value: is_integer(value, 0),\n \"a non-negative integer\",\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_not_empty(\n option_name, value_type_or_enum, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Get a validator reporting INVALID_OPTION_VALUE when the value is empty\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n string code_to_allow_extra_values -- create a report forceable by this code\n bool allow_extra_values -- create a warning instead of an error if True\n \"\"\"\n return value_cond(\n option_name,\n lambda value: not is_empty_string(value),\n value_type_or_enum,\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_port_number(\n option_name, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Get a validator reporting INVALID_OPTION_VALUE when the value is not a TCP\n or UDP port number\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n string code_to_allow_extra_values -- create a report forceable by this code\n bool allow_extra_values -- create a warning instead of an error if True\n \"\"\"\n return value_cond(\n option_name,\n is_port_number,\n \"a port number (1-65535)\",\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_port_range(\n option_name, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Get a validator reporting INVALID_OPTION_VALUE when the value is not a TCP\n or UDP port range\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n string code_to_allow_extra_values -- create a report forceable by this code\n bool allow_extra_values -- create a warning instead of an error if True\n \"\"\"\n return value_cond(\n option_name,\n lambda value: (\n matches_regexp(value, \"^[0-9]+-[0-9]+$\")\n and\n all([is_port_number(part) for part in value.split(\"-\", 1)])\n ),\n \"port-port\",\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_positive_integer(\n option_name, option_name_for_report=None,\n code_to_allow_extra_values=None, allow_extra_values=False\n):\n \"\"\"\n Get a validator reporting INVALID_OPTION_VALUE when the value is not\n an integer greater than zero\n\n string option_name -- name of the option to check\n string option_name_for_report -- substitued by the option_name if not set\n string code_to_allow_extra_values -- create a report forceable by this code\n bool allow_extra_values -- create a warning instead of an error if True\n \"\"\"\n return value_cond(\n option_name,\n lambda value: is_integer(value, 1),\n \"a positive integer\",\n option_name_for_report=option_name_for_report,\n code_to_allow_extra_values=code_to_allow_extra_values,\n allow_extra_values=allow_extra_values,\n )\n\ndef value_time_interval(option_name, option_name_for_report=None):\n return value_cond(\n option_name,\n lambda normalized_value:\n timeout_to_seconds(normalized_value) is not None\n ,\n \"time interval (e.g. 1, 2s, 3m, 4h, ...)\",\n option_name_for_report=option_name_for_report,\n )\n\n### tools and predicates\n\ndef run_collection_of_option_validators(option_dict, validator_list):\n \"\"\"\n Return a list with reports (ReportItems) about problems inside items of\n option_dict.\n\n dict option_dict is source of values to validate according to specification\n list validator_list contains callables that takes option_dict and returns\n list of reports\n \"\"\"\n report_list = []\n for validate in validator_list:\n report_list.extend(validate(option_dict))\n return report_list\n\ndef is_empty_string(value):\n \"\"\"\n Check if the specified value is an empty string\n\n mixed value -- value to check\n \"\"\"\n return is_string(value) and not value\n\ndef is_integer(value, at_least=None, at_most=None):\n \"\"\"\n Check if the specified value is an integer, optionally check a range\n\n mixed value -- string, int or float, value to check\n \"\"\"\n try:\n if isinstance(value, float):\n return False\n value_int = int(value)\n if at_least is not None and value_int < at_least:\n return False\n if at_most is not None and value_int > at_most:\n return False\n except ValueError:\n return False\n return True\n\ndef is_port_number(value):\n \"\"\"\n Check if the specified value is a TCP or UDP port number\n\n mixed value -- string, int or float, value to check\n \"\"\"\n return is_integer(value, 1, 65535)\n\ndef matches_regexp(value, regexp):\n \"\"\"\n Check if the specified value matches the specified regular expression\n\n mixed value -- string, int or float, value to check\n mixed regexp -- string or RegularExpression to match the value against\n \"\"\"\n if not hasattr(regexp, \"match\"):\n regexp = re.compile(regexp)\n return regexp.match(value) is not None\n\ndef _if_option_exists(option_name):\n def params_wrapper(validate_func):\n def prepare(option_dict):\n if option_name not in option_dict:\n return []\n return validate_func(option_dict)\n return prepare\n return params_wrapper\n", "repo_name": "wangww631/pcs", "sub_path": "pcs/lib/validate.py", "file_name": "validate.py", "file_ext": "py", "file_size_in_byte": 18419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.namedtuple", "line_number": 53, "usage_type": "call"}, {"api_name": "pcs.lib.reports.prerequisite_option_is_missing", "line_number": 131, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 131, "usage_type": "name"}, {"api_name": "pcs.lib.reports.required_option_is_missing", "line_number": 151, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 151, "usage_type": "name"}, {"api_name": "pcs.lib.reports.required_option_of_alternatives_is_missing", "line_number": 172, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 172, "usage_type": "name"}, {"api_name": "pcs.lib.reports.mutually_exclusive_options", "line_number": 194, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 194, "usage_type": "name"}, {"api_name": "pcs.lib.reports.get_problem_creator", "line_number": 223, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 223, "usage_type": "name"}, {"api_name": "pcs.lib.reports.invalid_option", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 228, "usage_type": "name"}, {"api_name": "pcs.lib.reports.get_problem_creator", "line_number": 261, "usage_type": "call"}, {"api_name": "pcs.lib.reports", "line_number": 261, "usage_type": "name"}, {"api_name": "pcs.lib.reports.invalid_option_value", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pcs.lib.reports", "line_number": 266, "usage_type": "name"}, {"api_name": "pcs.lib.pacemaker.values.validate_id", "line_number": 305, "usage_type": "call"}, {"api_name": "pcs.lib.pacemaker.values.timeout_to_seconds", "line_number": 457, "usage_type": "call"}, {"api_name": "pcs.common.tools.is_string", "line_number": 485, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 521, "usage_type": "call"}]} +{"seq_id": "70564802724", "text": "# third party\n# third party\nimport torch as th\n\n# syft absolute\nimport syft as sy\nfrom syft.lib.python.slice import Slice\nfrom syft.proto.lib.python.slice_pb2 import Slice as Slice_PB\n\n\ndef test_slice_serde() -> None:\n syft_slice = Slice(1, 3, -1)\n serialized = syft_slice._object2proto()\n\n assert isinstance(serialized, Slice_PB)\n\n deserialized = Slice._proto2object(proto=serialized)\n\n assert isinstance(deserialized, Slice)\n assert deserialized.id == syft_slice.id\n assert deserialized.start == syft_slice.start\n assert deserialized.stop == syft_slice.stop\n assert deserialized.step == syft_slice.step\n\n\ndef test_slice_send(client: sy.VirtualMachineClient) -> None:\n syft_slice = Slice(1, 3, None)\n ptr = syft_slice.send(client)\n\n # Check pointer type\n assert ptr.__class__.__name__ == \"SlicePointer\"\n\n # Check that we can get back the object\n res = ptr.get()\n assert res.start == syft_slice.start\n assert res.stop == syft_slice.stop\n assert res.step == syft_slice.step\n\n\ndef test_slice_tensor(client) -> None:\n syft_slice = Slice(0, 1)\n slice_ptr = syft_slice.send(client)\n\n t = th.Tensor([1, 2, 3])\n t_ptr = t.send(client)\n res_ptr = t_ptr[slice_ptr]\n\n # Check that we can get back the object\n res = res_ptr.get()\n assert res == t[0:1]\n\n res_ptr2 = t_ptr[0:1]\n res2 = res_ptr2.get()\n\n assert res == res2\n\n last_ptr = t_ptr[-1]\n last = last_ptr.item().get()\n assert last == 3\n", "repo_name": "datax-io/pysyft-parcel", "sub_path": "packages/syft/tests/syft/lib/python/slice/slice_serde_test.py", "file_name": "slice_serde_test.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "syft.lib.python.slice.Slice", "line_number": 12, "usage_type": "call"}, {"api_name": "syft.proto.lib.python.slice_pb2.Slice", "line_number": 15, "usage_type": "argument"}, {"api_name": "syft.lib.python.slice.Slice._proto2object", "line_number": 17, "usage_type": "call"}, {"api_name": "syft.lib.python.slice.Slice", "line_number": 17, "usage_type": "name"}, {"api_name": "syft.lib.python.slice.Slice", "line_number": 19, "usage_type": "argument"}, {"api_name": "syft.VirtualMachineClient", "line_number": 26, "usage_type": "attribute"}, {"api_name": "syft.lib.python.slice.Slice", "line_number": 27, "usage_type": "call"}, {"api_name": "syft.lib.python.slice.Slice", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "12036815254", "text": "\"\"\"Recipe for evaluating a grapheme-to-phoneme system with librispeech lexicon.\n\nThe script may be use in isolation or in combination with Orion to fit\nhyperparameters that do not require model retraining (e.g. Beam Search)\n\nAuthors\n * Mirco Ravanelli 2022\n * Artem Ploujnikov 2022\n\"\"\"\n\n\nfrom hyperpyyaml import load_hyperpyyaml\nfrom speechbrain.dataio.batch import PaddedBatch\nfrom speechbrain.lobes.models.g2p.dataio import get_sequence_key\nfrom speechbrain.utils import hpopt as hp\nfrom speechbrain.wordemb.util import expand_to_chars\nfrom train import dataio_prep, load_dependencies\nfrom types import SimpleNamespace\nfrom tqdm.auto import tqdm\nimport math\nimport itertools\nimport speechbrain as sb\nimport torch\nimport sys\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass G2PEvaluator:\n \"\"\"The G2P model evaluation wrapper\n\n Arguments\n ---------\n hparams: dict\n the dictionary from a parsed hyperparameter file\n device: str\n the device identifier\n model_state: dict\n a pre-loaded model state for a \"warm start\" if applicable\n - could be useful if hyperparameters have changed, but\n the same model can be reused from one run to the next\n \"\"\"\n\n def __init__(self, hparams, device, model_state=None):\n self.hparams = SimpleNamespace(**hparams)\n self.overrides = overrides\n self.device = device\n self.modules = torch.nn.ModuleDict(self.hparams.modules).to(self.device)\n beam_searcher = (\n self.hparams.beam_searcher_lm\n if self.hparams.use_language_model\n else self.hparams.beam_searcher\n )\n self.beam_searcher = beam_searcher.to(self.device)\n if model_state:\n self.hparams.model.load_state_dict(model_state)\n else:\n self.load()\n self.grapheme_sequence_mode = getattr(\n self.hparams, \"grapheme_sequence_mode\", \"bos\"\n )\n self.grapheme_key = get_sequence_key(\n key=\"grapheme_encoded\", mode=self.grapheme_sequence_mode\n )\n self.modules[\"model\"].eval()\n self._word_separator = None\n self._bos = torch.tensor(\n self.hparams.bos_index, device=device\n ).unsqueeze(-1)\n self._eos = torch.tensor(\n self.hparams.eos_index, device=device\n ).unsqueeze(-1)\n\n # When reconstructing sentences word-wise, the process depends\n # on whether spaces are preserved or omitted, as controlled by\n # the phonemes_enable_space hyperparameter\n self._flatten_results = (\n self._flatten_results_separated\n if getattr(self.hparams, \"phonemes_enable_space\", None)\n else self._flatten_results_jumbled\n )\n self._grapheme_word_separator_idx = None\n if self.hparams.use_word_emb:\n self.modules.word_emb = self.hparams.word_emb().to(self.device)\n\n def load(self):\n \"\"\"Loads a model from a checkpoint\"\"\"\n checkpointer = self.hparams.checkpointer\n ckpt = checkpointer.recover_if_possible(\n device=torch.device(self.device),\n importance_key=lambda ckpt: -ckpt.meta.get(\"PER\", -100.0),\n ckpt_predicate=lambda ckpt: ckpt.meta[\"step\"]\n == self.hparams.eval_ckpt_step,\n )\n if ckpt:\n logger.info(\"Loaded checkpoint with metadata %s\", ckpt.meta)\n else:\n raise ValueError(\n f\"Checkpoint not found for training step {self.hparams.eval_train_step}\"\n )\n return ckpt\n\n def evaluate_batch(self, batch):\n \"\"\"\n Evaluates the G2P model\n\n Arguments\n ---------\n batch: PaddedBatch\n A single batch of data, same as the kind of batch used\n for G2P training\n \"\"\"\n batch = batch.to(self.device)\n grapheme_encoded = getattr(batch, self.grapheme_key)\n if self.hparams.eval_mode == \"sentence\":\n hyps, scores = self._get_phonemes(grapheme_encoded, char=batch.char)\n elif self.hparams.eval_mode == \"word\":\n hyps, scores = self._get_phonemes_wordwise(batch.grapheme_encoded)\n else:\n raise ValueError(f\"unsupported eval_mode {self.hparams.eval_mode}\")\n\n ids = batch.sample_id\n\n phns, phn_lens = batch.phn_encoded\n\n self.per_metrics.append(\n ids, hyps, phns, None, phn_lens, self.hparams.out_phoneme_decoder,\n )\n\n def _get_phonemes(self, grapheme_encoded, phn_encoded=None, char=None):\n \"\"\"Runs the model and the beam search to retrieve the phoneme sequence\n corresponding to the provided grapheme sequence\n\n Arguments\n ---------\n grapheme_encoded: speechbrain.dataio.batch.PaddedData\n An encoded grapheme sequence\n\n phn_encoded_bos: speechbrain.dataio.batch.PaddedData\n An encoded phoneme sequence (optional)\n\n char: str\n Raw character input (needed for word embeddings)\n\n Returns\n -------\n hyps: list\n the hypotheses (the beam search result)\n scores: list\n the scores corresponding to the hypotheses\n \"\"\"\n _, char_word_emb = None, None\n if self._grapheme_word_separator_idx is None:\n self._grapheme_word_separator_idx = self.hparams.grapheme_encoder.lab2ind[\n \" \"\n ]\n if not phn_encoded:\n grapheme_encoded_data, grapheme_lens = grapheme_encoded\n phn_encoded = (\n torch.ones(len(grapheme_encoded_data), 1).to(\n grapheme_encoded_data.device\n )\n * self.hparams.bos_index,\n torch.ones(len(grapheme_encoded_data)).to(\n grapheme_encoded_data.device\n ),\n )\n char_word_emb = self._apply_word_embeddings(grapheme_encoded, char)\n p_seq, char_lens, encoder_out, _ = self.modules.model(\n grapheme_encoded=grapheme_encoded,\n phn_encoded=phn_encoded,\n word_emb=char_word_emb,\n )\n return self.beam_searcher(encoder_out, char_lens)\n\n def _apply_word_embeddings(self, grapheme_encoded, char):\n char_word_emb = None\n if self.hparams.use_word_emb:\n grapheme_encoded_data, grapheme_lens = grapheme_encoded\n word_emb = self.modules.word_emb.batch_embeddings(char)\n char_word_emb = expand_to_chars(\n emb=word_emb,\n seq=grapheme_encoded_data,\n seq_len=grapheme_lens,\n word_separator=self._grapheme_word_separator_idx,\n )\n return char_word_emb\n\n def _get_phonemes_wordwise(self, grapheme_encoded):\n \"\"\"Retrieves the phoneme sequence corresponding to the provided grapheme\n sequence in a word-wise manner (running the evaluator for each word separately)\n\n Arguments\n ---------\n grapheme_encoded: speechbrain.dataio.batch.PaddedData\n An encoded grapheme sequence\n\n Returns\n -------\n hyps: list\n the hypotheses (the beam search result)\n scores: list\n the scores corresponding to the hypotheses\n \"\"\"\n if self.hparams.use_word_emb:\n raise NotImplementedError(\n \"Wordwise evaluation is not supported with word embeddings\"\n )\n if self._word_separator is None:\n self._word_separator = self.hparams.phoneme_encoder.lab2ind[\" \"]\n hyps, scores = [], []\n for grapheme_item, grapheme_len in zip(\n grapheme_encoded.data, grapheme_encoded.lengths\n ):\n words_batch = self._split_words_batch(grapheme_item, grapheme_len)\n item_hyps, item_scores = self._get_phonemes(\n words_batch.grapheme_encoded\n )\n hyps.append(self._flatten_results(item_hyps))\n scores.append(self._flatten_scores(item_hyps, item_scores))\n return hyps, scores\n\n def _flatten_results_jumbled(self, results):\n \"\"\"Flattens a sequence of results into a single sequence of tokens -\n used when spaces are preserved in the phoneme space\n\n Arguments\n ---------\n results: iterable\n a two-dimensional result\n\n Returns\n -------\n result: list\n the concatenated reuslt\n \"\"\"\n return [token for item_result in results for token in item_result]\n\n def _flatten_results_separated(self, results):\n \"\"\"Flattens a sequence of words, inserting word separators between them -\n used when word separators are preserved in the phoneme space\n\n Arguments\n ---------\n results: iterable\n a two-dimensional result\n\n Returns\n -------\n result: list\n the concatenated reuslt\n \"\"\"\n result = []\n for item_result in results:\n for token in item_result:\n result.append(token)\n if item_result and item_result[-1] != self._word_separator:\n result.append(self._word_separator)\n del result[-1]\n return result\n\n def _flatten_scores(self, hyps, scores):\n \"\"\"Flattens an array of scores, using a weighted average of the scores of\n individual words, by word length\n\n Arguments\n ---------\n hyps: list\n the hypotheses (the beam search result)\n scores: list\n the scores corresponding to the hypotheses\n\n Results\n -------\n scores: list\n the scores corresponding to the hypotheses,\n merged\n \"\"\"\n seq_len = sum(len(word_hyp) for word_hyp in hyps)\n return (\n sum(\n word_score * len(word_hyp)\n for word_hyp, word_score in zip(hyps, scores)\n )\n / seq_len\n )\n\n def _split_words_batch(self, graphemes, length):\n return PaddedBatch(\n [\n {\"grapheme_encoded\": word}\n for word in self._split_words_seq(graphemes, length)\n ]\n ).to(self.device)\n\n def _split_words_seq(self, graphemes, length):\n \"\"\"Splits the provided grapheme sequence into words\n\n Arguments\n ---------\n graphemes: torch.Tensor\n an encoded sequence of phonemes\n\n Returns\n -------\n graphemes: generator\n a generator representing a sequence of words\n \"\"\"\n space_index = self.hparams.graphemes.index(\" \")\n (word_boundaries,) = torch.where(graphemes == space_index)\n last_word_boundary = 0\n for word_boundary in word_boundaries:\n yield self._add_delimiters(\n graphemes[last_word_boundary + 1 : word_boundary]\n )\n last_word_boundary = word_boundary\n char_length = math.ceil(len(graphemes) * length)\n if last_word_boundary < char_length:\n yield self._add_delimiters(\n graphemes[last_word_boundary + 1 : char_length]\n )\n\n def _add_delimiters(self, word):\n \"\"\"Adds the required delimiter characters to a word\n\n Arguments\n ---------\n word: torch.Tensor\n a tensor representing a word\n \"\"\"\n if self.grapheme_sequence_mode == \"bos\":\n word = torch.cat([self._bos, word])\n elif self.grapheme_sequence_mode == \"eos\":\n word = torch.cat([word, self._eos])\n return word\n\n def evaluate_epoch(self, dataset, dataloader_opts=None):\n \"\"\"\n Evaluates a single epoch\n\n Arguments\n ---------\n dataset: DynamicItemDataset\n a G2P dataset (same as the ones used for training)\n\n Returns\n -------\n metrics: dict\n Raw PER metrics\n \"\"\"\n logger.info(\"Beginning evaluation\")\n with torch.no_grad():\n self.per_metrics = self.hparams.per_stats()\n dataloader = sb.dataio.dataloader.make_dataloader(\n dataset,\n **dict(\n dataloader_opts or {},\n shuffle=True,\n batch_size=self.hparams.eval_batch_size,\n ),\n )\n dataloader_it = iter(dataloader)\n if self.hparams.eval_batch_count is not None:\n dataloader_it = itertools.islice(\n dataloader_it, 0, self.hparams.eval_batch_count\n )\n batch_count = self.hparams.eval_batch_count\n else:\n batch_count = math.ceil(\n len(dataset) / self.hparams.eval_batch_size\n )\n for batch in tqdm(dataloader_it, total=batch_count):\n self.evaluate_batch(batch)\n if self.hparams.eval_output_wer_file:\n self._output_wer_file()\n return self.per_metrics.summarize()\n\n def _output_wer_file(self):\n with open(self.hparams.eval_wer_file, \"w\") as w:\n w.write(\"\\nPER stats:\\n\")\n self.per_metrics.write_stats(w)\n print(\n \"seq2seq, and PER stats written to file\",\n self.hparams.eval_wer_file,\n )\n\n\nif __name__ == \"__main__\":\n # CLI:\n\n with hp.hyperparameter_optimization(objective_key=\"error_rate\") as hp_ctx:\n # Parse the hyperparameter file\n search_hparam_file = sys.argv[0]\n hparams_file, run_opts, overrides = hp_ctx.parse_arguments(sys.argv[1:])\n device = run_opts.get(\"device\", \"cpu\")\n with open(hparams_file) as fin:\n hparams = load_hyperpyyaml(fin, overrides)\n\n # Load dependencies\n if hparams.get(\"use_language_model\"):\n load_dependencies(hparams, run_opts)\n\n # Run the evaluation\n evaluator = G2PEvaluator(hparams, device)\n\n # Some configurations involve curriculum training on\n # multiple steps. Load the dataset configuration for the\n # step specified in the eval_train_step hyperparameter\n # (or command-line argument)\n train_step = next(\n train_step\n for train_step in hparams[\"train_steps\"]\n if train_step[\"name\"] == hparams[\"eval_train_step\"]\n )\n train, valid, test, _ = dataio_prep(hparams, train_step)\n datasets = {\"train\": train, \"valid\": valid, \"test\": test}\n dataset = datasets[hparams[\"eval_dataset\"]]\n dataloader_opts = train_step.get(\n \"dataloader_opts\", hparams.get(\"dataloader_opts\", {})\n )\n result = evaluator.evaluate_epoch(dataset, dataloader_opts)\n hp.report_result(result)\n", "repo_name": "speechbrain/speechbrain", "sub_path": "recipes/LibriSpeech/G2P/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 14758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6855, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.ModuleDict", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "attribute"}, {"api_name": "speechbrain.lobes.models.g2p.dataio.get_sequence_key", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 165, "usage_type": "call"}, {"api_name": "speechbrain.wordemb.util.expand_to_chars", "line_number": 182, "usage_type": "call"}, {"api_name": "speechbrain.dataio.batch.PaddedBatch", "line_number": 290, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 311, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 353, "usage_type": "call"}, {"api_name": "speechbrain.dataio.dataloader.make_dataloader", "line_number": 355, "usage_type": "call"}, {"api_name": "speechbrain.dataio", "line_number": 355, "usage_type": "attribute"}, {"api_name": "itertools.islice", "line_number": 365, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 370, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 373, "usage_type": "call"}, {"api_name": "speechbrain.utils.hpopt.hyperparameter_optimization", "line_number": 392, "usage_type": "call"}, {"api_name": "speechbrain.utils.hpopt", "line_number": 392, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 394, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 395, "usage_type": "attribute"}, {"api_name": "hyperpyyaml.load_hyperpyyaml", "line_number": 398, "usage_type": "call"}, {"api_name": "train.load_dependencies", "line_number": 402, "usage_type": "call"}, {"api_name": "train.dataio_prep", "line_number": 416, "usage_type": "call"}, {"api_name": "speechbrain.utils.hpopt.report_result", "line_number": 423, "usage_type": "call"}, {"api_name": "speechbrain.utils.hpopt", "line_number": 423, "usage_type": "name"}]} +{"seq_id": "29236535379", "text": "\n# Fine tuning of a FCN-ResNet-19 model to a \"floor\" dataset\n\n# Based on the PyTorch segmentation model's train.py plus the PyTorch fine tuning tutorial\n# (https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html)\n\nimport torch\nimport torchvision\nimport PIL\nimport transforms as tr\nimport os\nimport utils\nimport time\nimport datetime\n\n# Training parameters\n\nnum_epochs = 300\nresume = False\nprint_freq = 100\nnum_ft_classes = 2\n\n# Weights on the loss for each category. If obstacle pixels are rare in the training data set, give them a higher weight\n\nfloor_loss_weight = 1.0\nobstacle_loss_weight = 100.0\n\n# Custom dataset class. Modeled after torchvision.datasets.voc.VOCSegmentation\n\nclass FloorSegmentationDataset(torchvision.datasets.VisionDataset):\n \"\"\"Robot Floor Segmentation Dataset.\n\n Args:\n root (string): Root directory of the dataset.\n image_set (string, optional): Select the image_set to use, ``train`` or ``val``\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n\n def __init__(self,\n root,\n image_set='train',\n transform=None,\n target_transform=None,\n transforms=None):\n super(FloorSegmentationDataset, self).__init__(root, transforms, transform, target_transform)\n self.image_set = image_set\n image_dir = os.path.join(root, 'FloorData/Images')\n mask_dir = os.path.join(root, 'FloorData/Masks')\n\n if not os.path.isdir(root):\n raise RuntimeError('Dataset not found or corrupted.')\n\n splits_dir = os.path.join(root, 'FloorData/ImageSets')\n\n split_f = os.path.join(splits_dir, image_set.rstrip('\\n') + '.txt')\n\n if not os.path.exists(split_f):\n raise ValueError('Wrong image_set entered! Please use image_set=\"train\" or image_set=\"val\"')\n\n with open(os.path.join(split_f), \"r\") as f:\n file_names = [x.strip() for x in f.readlines()]\n\n self.images = [os.path.join(image_dir, x + \".png\") for x in file_names]\n self.masks = [os.path.join(mask_dir, x + \".png\") for x in file_names]\n assert (len(self.images) == len(self.masks))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is the image segmentation.\n \"\"\"\n img = PIL.Image.open(self.images[index]).convert('RGB')\n target = PIL.Image.open(self.masks[index])\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.images)\n\n\ndef get_dataset(path, image_set, transform):\n ds = FloorSegmentationDataset(path, image_set=image_set, transforms=transform)\n return ds\n\n\ndef get_transforms(train):\n transforms = []\n base_size = 352\n crop_size = 320\n min_size = base_size\n max_size = int(2 * base_size)\n transforms.append(tr.RandomResize(min_size, max_size))\n # In training mode, perform random flips and crops\n if train:\n transforms.append(tr.RandomHorizontalFlip(0.5))\n transforms.append(tr.RandomCrop(crop_size))\n transforms.append(tr.ToTensor())\n transforms.append(tr.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]))\n return tr.Compose(transforms)\n\n\n# Function to turn gradient calculations on/off depending on whether we're feature\n# extracting (holding the network constant while only training the output layer)\n\ndef set_parameter_requires_grad(model, feature_extracting):\n if feature_extracting:\n for param in model.parameters():\n param.requires_grad = False\n\n\n# Create instance of model class\n\nmodel = torchvision.models.segmentation.fcn_resnet18(num_classes=21)\n\n# Load pretrained weights\n\ncheckpoint = torch.load('fcn_resnet18_voc_best.pth', map_location='cpu')\nmodel.load_state_dict(checkpoint['model'])\n\n# Turn off training for pre-existing layers\n\n#set_parameter_requires_grad(model, feature_extracting=True)\n\n# FCN has a backbone (ResNet18) and head (attribute 'classifier' type FCNHead with 5 layers: conv, bn, relu, dropout, conv)\n# model.classifier[4] is the final 21-convolution output\n\nprint('Replacing model.classifier[4] with \"fresh\" 2-class layer')\nmodel.classifier[4] = torch.nn.Conv2d(128, num_ft_classes, kernel_size=(1, 1), stride=(1, 1))\ndevice = torch.device('cuda')\nmodel.to(device)\n\n# Load data sets\n\ntrain_dataset = get_dataset('.', \"train\", get_transforms(train=True))\nval_dataset = get_dataset('.', \"val\", get_transforms(train=False))\ntrain_sampler = torch.utils.data.RandomSampler(train_dataset)\nval_sampler = torch.utils.data.SequentialSampler(val_dataset)\ntrain_data_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=4,\n sampler=train_sampler, num_workers=16,\n collate_fn=utils.collate_fn, drop_last=True)\nval_data_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=1,\n sampler=val_sampler, num_workers=16,\n collate_fn=utils.collate_fn)\n\n# Create optimizer\n\nparams_to_optimize = [\n {\"params\": [p for p in model.backbone.parameters() if p.requires_grad]},\n {\"params\": [p for p in model.classifier.parameters() if p.requires_grad]},\n]\noptimizer = torch.optim.SGD(\n params_to_optimize,\n lr=0.0001, momentum=0.9, weight_decay=1e-4)\nlr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lambda x: (1 - x / (len(train_data_loader) * num_epochs)) ** 0.9)\n\n# Define loss function\n\ndef criterion(inputs, target, weights):\n losses = {}\n for name, x in inputs.items():\n losses[name] = torch.nn.functional.cross_entropy(x, target, weight=weights, ignore_index=255)\n\n if len(losses) == 1:\n return losses['out']\n\n return losses['out'] + 0.5 * losses['aux']\n\n# Train for one epoch over the dataset\n\ndef train_one_epoch(model, criterion, class_weights, optimizer, data_loader, lr_scheduler, device, epoch, print_freq):\n model.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))\n header = 'Epoch: [{}]'.format(epoch)\n for image, target in metric_logger.log_every(data_loader, print_freq, header):\n image, target = image.to(device), target.to(device)\n output = model(image)\n loss = criterion(output, target, class_weights)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n lr_scheduler.step()\n\n metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0][\"lr\"])\n\n\n# Evaluate model according to IoU (intersection over union)\n\ndef evaluate(model, data_loader, device, num_classes):\n model.eval()\n confmat = utils.ConfusionMatrix(num_classes)\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = 'Test:'\n with torch.no_grad():\n for image, target in metric_logger.log_every(data_loader, 100, header):\n image, target = image.to(device), target.to(device)\n output = model(image)\n output = output['out']\n\n confmat.update(target.flatten(), output.argmax(1).flatten())\n\n confmat.reduce_from_all_processes()\n\n return confmat\n\n\n# if resume:\n # checkpoint = torch.load(args.resume, map_location='cpu')\n # model.load_state_dict(checkpoint['model'])\n # model_without_ddp.load_state_dict(checkpoint['model_without_ddp'])\n # optimizer.load_state_dict(checkpoint['optimizer'])\n # print('Getting initial validation performance:')\n # confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)\n # print(confmat)\n\n# training loop\nstart_time = time.time()\nbest_IoU = 0.0\nclass_weights = torch.Tensor([floor_loss_weight, obstacle_loss_weight]).to(device)\n\nfor epoch in range(num_epochs):\n\n # Train one epoch\n\n train_one_epoch(model, criterion, class_weights, optimizer, train_data_loader, lr_scheduler, device, epoch, print_freq)\n\n # Test on the val dataset\n\n confmat = evaluate(model, val_data_loader, device=device, num_classes=num_ft_classes)\n print(confmat)\n\ntotal_time = time.time() - start_time\ntotal_time_str = str(datetime.timedelta(seconds=int(total_time)))\nprint('Training time {}'.format(total_time_str))\n\n", "repo_name": "sanjevShakya/machine-vision-class", "sub_path": "lab4-p2/lab4-segmentation-training/fine_tune.py", "file_name": "fine_tune.py", "file_ext": "py", "file_size_in_byte": 8542, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.datasets", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 78, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 79, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 79, "usage_type": "attribute"}, {"api_name": "transforms.append", "line_number": 101, "usage_type": "call"}, {"api_name": "transforms.RandomResize", "line_number": 101, "usage_type": "call"}, {"api_name": "transforms.append", "line_number": 104, "usage_type": "call"}, {"api_name": "transforms.RandomHorizontalFlip", "line_number": 104, "usage_type": "call"}, {"api_name": "transforms.append", "line_number": 105, "usage_type": "call"}, {"api_name": "transforms.RandomCrop", "line_number": 105, "usage_type": "call"}, {"api_name": "transforms.append", "line_number": 106, "usage_type": "call"}, {"api_name": "transforms.ToTensor", "line_number": 106, "usage_type": "call"}, {"api_name": "transforms.append", "line_number": 107, "usage_type": "call"}, {"api_name": "transforms.Normalize", "line_number": 107, "usage_type": "call"}, {"api_name": "transforms.Compose", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.models.segmentation.fcn_resnet18", "line_number": 123, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.utils.data.RandomSampler", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 146, "usage_type": "attribute"}, {"api_name": "torch.utils.data.SequentialSampler", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 147, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 148, "usage_type": "attribute"}, {"api_name": "utils.collate_fn", "line_number": 151, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 152, "usage_type": "attribute"}, {"api_name": "utils.collate_fn", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 163, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "attribute"}, {"api_name": "utils.MetricLogger", "line_number": 185, "usage_type": "call"}, {"api_name": "utils.SmoothedValue", "line_number": 186, "usage_type": "call"}, {"api_name": "utils.ConfusionMatrix", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.MetricLogger", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 209, "usage_type": "call"}, {"api_name": "time.time", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 234, "usage_type": "call"}, {"api_name": "time.time", "line_number": 247, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "20526651542", "text": "\nimport re\nfrom urllib2 import urlopen\nfrom urllib import urlencode\nfrom simplejson import load\nfrom pymongo import Connection, errors\n\ndef get_database(db_name, create=True, **kwargs):\n mongo = Connection(**kwargs)\n db_name = re.sub('\\s+', '_', db_name)\n if not create and db_name not in mongo.database_names():\n raise errors.InvalidName(\"database %s does not exists\" % db_name)\n return mongo[db_name]\n\ndef get_collection(db, collection_name, create=True):\n if not create and collection_name not in db.collection_names():\n raise errors.InvalidName(\"collection %s does not exists\" % collection_name)\n return db[collection_name]\n\ndef basic_search(query):\n u = urlopen('http://search.twitter.com/search.json?%s' % urlencode({'q': query}))\n resp = load(u)\n tweets = resp['results']\n del resp['results']\n return tweets, resp\n \n", "repo_name": "lbjay/twarchive", "sub_path": "twarchive.py", "file_name": "twarchive.py", "file_ext": "py", "file_size_in_byte": 877, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.Connection", "line_number": 9, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 10, "usage_type": "call"}, {"api_name": "pymongo.errors.InvalidName", "line_number": 12, "usage_type": "call"}, {"api_name": "pymongo.errors", "line_number": 12, "usage_type": "name"}, {"api_name": "pymongo.errors.InvalidName", "line_number": 17, "usage_type": "call"}, {"api_name": "pymongo.errors", "line_number": 17, "usage_type": "name"}, {"api_name": "urllib2.urlopen", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 21, "usage_type": "call"}, {"api_name": "simplejson.load", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "30632949348", "text": "import numpy as np\nimport cv2\n\n\nimport threading\nimport DobotDllType as dType\n\nCON_STR = {\n dType.DobotConnect.DobotConnect_NoError: \"DobotConnect_NoError\",\n dType.DobotConnect.DobotConnect_NotFound: \"DobotConnect_NotFound\",\n dType.DobotConnect.DobotConnect_Occupied: \"DobotConnect_Occupied\"}\n\n#Load Dll\napi = dType.load()\n\n#Connect Dobot\nstate = dType.ConnectDobot(api, \"\", 115200)[0]\nprint(\"Connect status:\",CON_STR[state])\n\n\n\n\n\n\n\nSCALE_FACTOR_GLASS = 1.5\nSCALE_FACTOR_JNT = 1.5\nSCALE_FACTOR_HAT = 1.5\nDISPLAY_BOUNDRY_BOX = True\n\nsunglasses = cv2.imread('thug.jpg') \njnt = cv2.imread('jnt.jpg')\nhat = cv2.imread('hat.jpg')\n\n# now we can try to detect faces\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\nmouth_cascade = cv2.CascadeClassifier('haarcascade_mouth.xml')\n\ncap = cv2.VideoCapture(0)\nif (state == dType.DobotConnect.DobotConnect_NoError):\n #Clean Command Queued\n dType.SetQueuedCmdClear(api)\n\n #Async Motion Params Setting\n dType.SetHOMEParams(api, 200, 200, 200, 200, isQueued = 1)\n dType.SetPTPJointParams(api, 200, 200, 200, 200, 200, 200, 200, 200, isQueued = 1)\n dType.SetPTPCommonParams(api, 100, 100, isQueued = 1)\n dType.SetPTPJumpParams(api,4,-55,isQueued=1)\n\n while(True):\n # Capture frame-by-frame\n ret, img = cap.read()\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n \n\n img = cv2.medianBlur(img,3)\n\n \n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n if len(faces) > 0:\n filter_applied = False\n\n #(x,y,w,h) = sorted(faces, key=lambda face: face[2]*face[3])[-1] #Might have more than one face -> choose the largest\n for (x,y,w,h) in faces:\n \n if DISPLAY_BOUNDRY_BOX:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \n roi_gray = gray[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray) # Might have more than two \n ret,thresh1 = cv2.threshold(roi_gray,50,255,cv2.THRESH_BINARY)\n resized=cv2.resize(thresh1,(400,300))\n cv2.imshow('asd',resized)\n\n count=0\n cv2.imshow('frame',img)\n\n for i in range(0,299):\n j=0\n #for j in range(0,335):\n while(j<398):\n \n if(resized[i,j]==0 and resized[i,j+1]==0 and resized[i,j-1]==0 and (j+1)<398 and (j-1)>0):\n k=j;\n \n \n while(resized[i,k]==0):\n k=k+1\n if(k>399):\n break\n j=k-1\n lastIndex = dType.SetPTPCmd(api, dType.PTPMode.PTPMOVLXYZMode, 300-((float(i*50)/float(300))), 80-((float(j*50)/float(400))), -59.3, 0, isQueued = 1)[0]\n \n count=count+1\n\n elif(resized[i,j]==0):\n lastIndex = dType.SetPTPCmd(api, dType.PTPMode.PTPJUMPXYZMode, 300-((float(i*50)/float(300))), 80-((float(j*50)/float(400))), -59.3, 0, isQueued = 1)[0]\n count=count+1\n j=j+1\n else:\n j=j+1\n if(count==25):\n dType.SetQueuedCmdStartExec(api)\n\n #Wait for Executing Last Command \n while lastIndex > dType.GetQueuedCmdCurrentIndex(api)[0]:\n dType.dSleep(100)\n \n #Stop to Execute Command Queued\n dType.SetQueuedCmdStopExec(api)\n dType.SetQueuedCmdClear(api)\n count=0;\n \n \n cv2.imshow('frame',img)\n else:\n cv2.imshow('frame',img)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n dType.SetQueuedCmdStartExec(api)\n\n #Wait for Executing Last Command \n while lastIndex > dType.GetQueuedCmdCurrentIndex(api)[0]:\n dType.dSleep(100)\n\n #Stop to Execute Command Queued\n dType.SetQueuedCmdStopExec(api)\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n", "repo_name": "mohitsambrani/i2i-mixels", "sub_path": "roboface1.py", "file_name": "roboface1.py", "file_ext": "py", "file_size_in_byte": 5377, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "DobotDllType.DobotConnect", "line_number": 9, "usage_type": "attribute"}, {"api_name": "DobotDllType.DobotConnect", "line_number": 10, "usage_type": "attribute"}, {"api_name": "DobotDllType.DobotConnect", "line_number": 11, "usage_type": "attribute"}, {"api_name": "DobotDllType.load", "line_number": 14, "usage_type": "call"}, {"api_name": "DobotDllType.ConnectDobot", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 40, "usage_type": "call"}, {"api_name": "DobotDllType.DobotConnect", "line_number": 41, "usage_type": "attribute"}, {"api_name": "DobotDllType.SetQueuedCmdClear", "line_number": 43, "usage_type": "call"}, {"api_name": "DobotDllType.SetHOMEParams", "line_number": 46, "usage_type": "call"}, {"api_name": "DobotDllType.SetPTPJointParams", "line_number": 47, "usage_type": "call"}, {"api_name": "DobotDllType.SetPTPCommonParams", "line_number": 48, "usage_type": "call"}, {"api_name": "DobotDllType.SetPTPJumpParams", "line_number": 49, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.medianBlur", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 70, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 79, "usage_type": "call"}, {"api_name": "DobotDllType.SetPTPCmd", "line_number": 95, "usage_type": "call"}, {"api_name": "DobotDllType.PTPMode", "line_number": 95, "usage_type": "attribute"}, {"api_name": "DobotDllType.SetPTPCmd", "line_number": 100, "usage_type": "call"}, {"api_name": "DobotDllType.PTPMode", "line_number": 100, "usage_type": "attribute"}, {"api_name": "DobotDllType.SetQueuedCmdStartExec", "line_number": 106, "usage_type": "call"}, {"api_name": "DobotDllType.GetQueuedCmdCurrentIndex", "line_number": 109, "usage_type": "call"}, {"api_name": "DobotDllType.dSleep", "line_number": 110, "usage_type": "call"}, {"api_name": "DobotDllType.SetQueuedCmdStopExec", "line_number": 113, "usage_type": "call"}, {"api_name": "DobotDllType.SetQueuedCmdClear", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 120, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 122, "usage_type": "call"}, {"api_name": "DobotDllType.SetQueuedCmdStartExec", "line_number": 124, "usage_type": "call"}, {"api_name": "DobotDllType.GetQueuedCmdCurrentIndex", "line_number": 127, "usage_type": "call"}, {"api_name": "DobotDllType.dSleep", "line_number": 128, "usage_type": "call"}, {"api_name": "DobotDllType.SetQueuedCmdStopExec", "line_number": 131, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "36569128191", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import trange,tqdm\nfrom scipy.sparse.csgraph import dijkstra\nfrom scipy.linalg import circulant\nimport itertools\n\n# problem 2a\ndef problem_2a(n,m,printen,plotten):\n A = np.zeros(n**2)\n A[:m] = 1\n np.random.shuffle(A)\n A = np.reshape(A,[n,n])\n A_sum = np.sum(A,axis=0)\n C = A_sum/n\n if printen:\n print(A)\n if plotten:\n plt.figure()\n plt.hist(C,bins=10)\n plt.show()\n return A\n\ndef problem_2b(n,printen,plotten):\n A = np.random.choice([0,1],size=[n,n],p=[770/870,100/870])\n A_sum = np.sum(A,axis=0)\n C = A_sum/n\n if printen:\n print(A)\n if plotten:\n plt.figure()\n plt.hist(C,bins=10)\n plt.show()\n return A\n\ndef problem_2c(n,k,printen):\n A_array = np.zeros(n) # the first row that forms the circulant matrix \n A_array[-int(k/2):] = 1 \n A_array[1:int(k/2)+1] = 1\n A = np.transpose(circulant(A_array)) # create a circulant matrix from the array and transpose it\n if printen:\n print(A)\n return A\n\ndef problem_2d(A,p,printen): \n indices_1 = np.array(np.where(A==1)) # indices where the matrix is 1\n n_shuffle = int(indices_1.shape[1]*p/2) # rewire a fraction of these indices, because the matrix is symmetrical there are need to be shuffled p/2 values for the upple triangle\n upper_indices_1 = np.array([indices_1[0,np.where(indices_1[1]>indices_1[0])][0],indices_1[1,np.where(indices_1[1]>indices_1[0])][0]]) # look only at upper triangle indices\n n_1 = upper_indices_1.shape[1] #look at amount of upper triangle indices\n i_1 = np.random.choice(np.arange(n_1),size=n_shuffle,replace=False) # pick n_shuffle random indices where the matrix is 1\n x_1,y_1 = upper_indices_1[0,i_1],upper_indices_1[1,i_1] # get the x,y-index of the elements to be shuffled\n\n indices_0 = np.array(np.where(A==0)) # indices where the matrix is 1\n upper_indices_0 = np.array([indices_0[0,np.where(indices_0[1]>indices_0[0])][0],indices_0[1,np.where(indices_0[1]>indices_0[0])][0]]) # look only at upper triangle indices\n n_0 = upper_indices_0.shape[1] #look at amount of upper triangle indices\n i_0 = np.random.choice(np.arange(n_0),size=n_shuffle,replace=False) # pick n_shuffle random indices where the matrix is 1\n x_0,y_0 = upper_indices_0[0,i_0],upper_indices_0[1,i_0] # get the x,y-index of the elements to be shuffled\n\n A[x_1,y_1] = 0 # replace ones by zeros\n A[y_1,x_1] = 0 # make the matrix symmetrical\n A[x_0,y_0] = 1 # replace zeros by ones\n A[y_0,x_0] = 1 # make the matrix symmetrical\n if printen:\n print(A)\n return A\n\ndef problem_2e(A): \n n = len(A)\n C_nodes = np.zeros(n)\n for i in range(n): # look at each node\n neighbours = np.nonzero(A[i])[0] # get all neighbours\n neighbours_combinations = np.array(list(itertools.combinations(neighbours,2))) # get all combinations of neighbours\n if len(neighbours_combinations)!=0: # look if there are more than 2 neighbours\n triangles = A[neighbours_combinations[:,0],neighbours_combinations[:,1]] # get the values of the combinations of neighbours where a 1 means a connection (=triangle) and a 0 not\n C_node = np.sum(triangles)/neighbours_combinations.shape[0] # get the local cluster index\n else: # this is the case if there are less than 2 neighbours\n C_node = 0 # set C_node to 0 if it has only 1 neighbours\n C_nodes[i] = C_node\n C = np.mean(C_nodes) # get the average local cluster index\n d_matrix = dijkstra(A) # make shortest path matrix via Dijkstra algorithm\n d_matrix = np.where(d_matrix==np.inf,np.nan,d_matrix)\n d = np.nanmean(d_matrix) # get the mean path length\n return C_nodes,C,d\n\ndef problem_2f(n,T,k_array):\n probs = 1/10**np.linspace(0,5,T)[::-1]\n C_nodes = np.zeros([len(k_array),T,n])\n C = np.zeros([len(k_array),T])\n d = np.zeros([len(k_array),T])\n\n fig,ax = plt.subplots(ncols=2)\n for i,k in enumerate(k_array):\n print('k={}'.format(k))\n A_f = problem_2c(n=n,k=k,printen=False)\n for j,prob in enumerate(tqdm(probs)): \n A = problem_2d(A=A_f.copy(),p=prob,printen=False)\n C_nodes[i,j],C[i,j],d[i,j] = problem_2e(A=A)\n \n ax[0].plot(probs,C[i]/C[i,0],color=plt.cm.hot(k*10))\n ax[0].plot(probs,d[i]/d[i,0],color=plt.cm.gray(k*10))\n ax[0].set_xscale('log')\n ax[0].set_xlabel('Rewiring probability')\n ax[0].set_ylabel('Cp/C0 and Lp/L0')\n\n ax[1].plot(probs,(C[i]/C[i,0])/(d[i]/d[i,0]),color=plt.cm.hot(k*10))\n ax[1].set_xlabel('Rewiring probability')\n ax[1].set_ylabel('(Cp/C0) / (Lp/L0)')\n ax[1].set_xscale('log')\n fig.tight_layout()\n plt.show()\n\nA_a = problem_2a(n=30,m=100,printen=False,plotten=False)\nA_b = problem_2b(n=30,printen=False,plotten=False)\nA_c = problem_2c(n=500,k=10,printen=False)\nA_d = problem_2d(A=A_c.copy(),p=0.05,printen=False)\nC_nodes_e,C_e,d_e = problem_2e(A=A_d)\nproblem_2f(n=1000,T=100,k_array=[3,5,10,12,15,20])\n", "repo_name": "ssuidman/neurophysics", "sub_path": "QBN/Week 3/Graph_theory.py", "file_name": "Graph_theory.py", "file_ext": "py", "file_size_in_byte": 5060, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.linalg.circulant", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 50, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 79, "usage_type": "call"}, {"api_name": "scipy.sparse.csgraph.dijkstra", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.hot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 99, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.gray", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 100, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.hot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 105, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "3448756136", "text": "\nimport datetime\nimport json\nimport os\nimport re\nimport fnmatch\nfrom PIL import Image\nimport numpy as np\n\nclass PyCocoCreator():\n\n def main(self, args, creator_tools):\n\n # print(args)\n\n self.DATABASE_NAME = args.database_name\n self.base_path = args.base_path\n self.images_path = args.images_path\n self.masks_path = args.masks_path\n\n self.IMAGE_DIR = os.path.join(self.base_path, self.images_path)\n self.ANNOTATION_DIR = os.path.join(self.base_path, self.masks_path)\n\n self.stage = args.stage\n self.iscrowd = args.iscrowd\n\n self.init_file()\n\n # filter for jpeg images\n for root, _, files in os.walk(self.IMAGE_DIR):\n image_files = self.filter_for_images(root, files)\n self.process_images(image_files, creator_tools)\n\n self.write_file()\n\n def init_file(self):\n self.INFO = {\n \"description\": self.DATABASE_NAME,\n \"url\": \"https://github.com/waspinator/pycococreator\",\n \"version\": \"1.0.0\",\n \"year\": datetime.date.today().year,\n \"contributor\": \"Charles Camargo\",\n \"date_created\": datetime.datetime.utcnow().isoformat(' ')\n }\n\n self.LICENSES = [\n {\n \"id\": 1,\n \"name\": \"Attribution-NonCommercial-Charles-License\",\n \"url\": \"http://creativecommons.org/licenses/by-nc-sa/2.0/\"\n }\n ]\n\n self.CATEGORIES = [\n {\n \"supercategory\": \"vegetation\",\n \"id\": 1,\n \"name\": \"hedychium_coronarium\"\n }\n ]\n\n self.coco_output = {\n \"info\": self.INFO,\n \"licenses\": self.LICENSES,\n \"categories\": self.CATEGORIES,\n \"images\": [],\n \"annotations\": []\n }\n\n def process_images(self, image_files, creator_tools):\n image_id = 1\n segmentation_id = 1\n\n # go through each image\n for image_filename in image_files:\n image = Image.open(image_filename)\n image_info = creator_tools.create_image_info(\n image_id, os.path.basename(image_filename), image.size)\n \n has_annotation = False\n\n # filter for associated png annotations\n for root, _, files in os.walk(self.ANNOTATION_DIR):\n annotation_files = self.filter_for_annotations(\n root, files, image_filename)\n\n if(not annotation_files or len(annotation_files) == 0):\n print(\n f'\\n-------------------- without annotations_files {image_filename}\\n')\n\n # go through each associated annotation\n for annotation_filename in annotation_files:\n\n print(f'image_id: {image_id} - {annotation_filename}')\n #[x['id'] for x in CATEGORIES if x['name'] in annotation_filename][0]\n class_id = 0\n\n category_info = {'id': class_id, 'is_crowd': self.iscrowd}\n \n binary_mask = np.asarray(Image.open(\n annotation_filename).convert('1')).astype(np.uint8)\n\n self.annotation_info = creator_tools.create_annotation_info(\n segmentation_id, image_id, category_info, binary_mask, image.size, tolerance=2)\n\n if self.annotation_info is not None:\n self.coco_output[\"annotations\"].append(\n self.annotation_info)\n has_annotation = True\n\n segmentation_id = segmentation_id + 1\n\n if (has_annotation == True):\n self.coco_output[\"images\"].append(image_info)\n else:\n print(\n f'\\n------------ The image {image_filename} has no annotations. ------------\\n')\n\n image_id = image_id + 1\n\n def write_file(self):\n with open(f'{self.base_path}/{self.stage}.json', 'w+') as output_json_file:\n json.dump(self.coco_output, output_json_file)\n \n print(f\"\\n\\nPyCocoCreator - file saved {self.base_path}{self.stage}.json\\n\")\n\n def filter_for_images(self, root, files):\n file_types = ['*.jpeg', '*.jpg', '*.JPEG', '*.JPG', '*.png', '*.PNG']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n\n ############ \n #files = ['../images/train/images/DJI_0594.JPG'] \n ############\n\n return files\n\n def filter_for_annotations(self, root, files, image_filename):\n file_types = ['*.jpeg', '*.jpg', '*.JPEG', '*.JPG', '*.png', '*.PNG']\n file_types = r'|'.join([fnmatch.translate(x) for x in file_types])\n basename_no_extension = os.path.splitext(\n os.path.basename(image_filename))[0]\n file_name_prefix = basename_no_extension + '.*'\n files = [os.path.join(root, f) for f in files]\n files = [f for f in files if re.match(file_types, f)]\n files = [f for f in files if re.match(\n file_name_prefix, os.path.splitext(os.path.basename(f))[0])]\n\n return files\n\n def convert(self, text):\n return int(text) if text.isdigit() else text.lower()\n\n def natrual_key(self, key):\n return [self.convert(c) for c in re.split('([0-9]+)', key)]\n ", "repo_name": "charlespcamargo/pycococreator", "sub_path": "pycococreator.py", "file_name": "pycococreator.py", "file_ext": "py", "file_size_in_byte": 5533, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.walk", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 100, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 101, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 123, "usage_type": "call"}, {"api_name": "fnmatch.translate", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 130, "usage_type": "call"}, {"api_name": "os.path", "line_number": 130, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 131, "usage_type": "call"}, {"api_name": "fnmatch.translate", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 146, "usage_type": "call"}, {"api_name": "re.match", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 148, "usage_type": "call"}, {"api_name": "re.split", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "70043993765", "text": "from tree.binary_search_tree import BinarySearchTree \nimport pytest\n\ndef test_exists():\n assert BinarySearchTree\n\ndef test_instantiation():\n assert BinarySearchTree()\n\ndef test_insert():\n bst = BinarySearchTree()\n bst.insert(4)\n bst.insert(10)\n bst.insert(1)\n assert bst.root.data == 4\n assert bst.root.left.data == 1\n assert bst.root.right.data == 10\n\ndef test_insert_list():\n bst = BinarySearchTree()\n bst.insert_list([10,5,17])\n assert bst.root.data == 10\n assert bst.root.left.data == 5\n assert bst.root.right.data == 17\n\n@pytest.fixture\ndef bst():\n b = BinarySearchTree()\n b.insert_list([10,5,17,3,7,12,19,1,4,13])\n return b\n\ndef test_get_inorder_list(bst):\n actual = bst.get_inorder_list(bst.root, [])\n expected = [1, 3, 4, 5, 7, 10, 12, 13, 17, 19]\n assert actual == expected\n\ndef test_get_preorder_list(bst):\n actual = bst.get_preorder_list(bst.root, [])\n expected = [10, 5, 3, 1, 4, 7, 17, 12, 13, 19]\n assert actual == expected\n\ndef test_get_postorder_list(bst):\n actual = bst.get_postorder_list(bst.root, [])\n expected = [1, 4, 3, 7, 5, 13, 12, 19, 17, 10]\n assert actual == expected\n\ndef test_print_inorder(bst):\n actual = bst.print_inorder()\n expected = [1, 3, 4, 5, 7, 10, 12, 13, 17, 19]\n assert actual == expected\n\ndef test_print_preorder(bst):\n actual = bst.print_preorder()\n expected = [10, 5, 3, 1, 4, 7, 17, 12, 13, 19]\n assert actual == expected\n\ndef test_print_postorder(bst):\n actual = bst.print_postorder()\n expected = [1, 4, 3, 7, 5, 13, 12, 19, 17, 10]\n assert actual == expected\n\ndef test_search(bst):\n assert bst.search(4).data == 4\n assert bst.search(12).data == 12\n assert bst.search(-1) == None\n\ndef test_minimum(bst):\n assert bst.minimum().data == 1\n assert bst.minimum(bst.search(17)).data == 12\n\ndef test_maximum(bst):\n assert bst.maximum().data == 19\n assert bst.maximum(bst.search(5)).data == 7\n\ndef test_delete(bst):\n bt = bst\n bt.delete(10)\n assert bt.print_inorder() == [1, 3, 4, 5, 7, 12, 13, 17, 19]\n bt.delete(4)\n assert bt.print_inorder() == [1, 3, 5, 7, 12, 13, 17, 19]\n bt.delete(17)\n assert bt.print_inorder() == [1, 3, 5, 7, 12, 13, 19]\n bt.delete(12)\n assert bt.print_inorder() == [1, 3, 5, 7, 13, 19]\n bt.delete(1)\n bt.delete(5)\n bt.delete(3)\n assert bt.print_inorder() == [7, 13, 19]\n bst.delete(19)\n bst.delete(13)\n assert bt.print_inorder() == [7]\n bst.delete(7)\n assert bt.print_inorder() == []\n\n", "repo_name": "codermrhasan/data-structures-and-algorithms-in-python", "sub_path": "src/tree/tests/test_binary_search_tree.py", "file_name": "test_binary_search_tree.py", "file_ext": "py", "file_size_in_byte": 2531, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tree.binary_search_tree.BinarySearchTree", "line_number": 5, "usage_type": "name"}, {"api_name": "tree.binary_search_tree.BinarySearchTree", "line_number": 8, "usage_type": "call"}, {"api_name": "tree.binary_search_tree.BinarySearchTree", "line_number": 11, "usage_type": "call"}, {"api_name": "tree.binary_search_tree.BinarySearchTree", "line_number": 20, "usage_type": "call"}, {"api_name": "tree.binary_search_tree.BinarySearchTree", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "16073730365", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nimport requests\nfrom configparser import ConfigParser as Ini\nimport configparser\nimport subprocess\nimport distro\nimport readline\n\n\nclass ChatGPT:\n\n def __init__(self, api_key, system_prompt=\"\", url=\"https://chatgpt.example.com\", max_tokens=100, proxy_url=None, proxy_username=None, proxy_password=None):\n \"\"\"初始化ChatGPT Shell\n :param api_key: API Key\n :param system_prompt: 系统提示\n :param url: API URL,搭建教程详见:https://chatgpt-api.pro/index.php/api/chatgpt-flask-api.html,如果不想使用自己搭建的API,可以将URL设置为openai来使用OpenAI的API(可能需要配置代理)\n :param max_tokens: 每次回复的最大Token数\n :param proxy_url: 代理URL,如果不想使用代理,可以将其设置为null\n :param proxy_username: 代理用户名\n :param proxy_password: 代理密码\n \"\"\"\n self.api_key = api_key\n self.system_prompt = system_prompt\n self.url = url\n self.max_tokens = max_tokens\n self.messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt\n }\n ]\n self.proxy_url = proxy_url\n self.proxy_username = proxy_username\n self.proxy_password = proxy_password\n\n def send_message(self, message):\n self.messages.append({\n \"role\": \"user\",\n \"content\": message\n })\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + self.api_key\n }\n if self.url == \"openai\" or self.url.startswith(\"https://api.openai.com\") or self.url.startswith(\"https://openai.com\"):\n data = {\n \"messages\": self.messages,\n \"max_tokens\": self.max_tokens,\n \"model\": \"gpt-3.5-turbo\"\n }\n else:\n data = {\n \"api_key\": self.api_key,\n \"system_content\": self.system_prompt,\n \"user_content\": message,\n \"continuous\": self.messages,\n \"max_tokens\": self.max_tokens,\n \"model\": \"gpt-3.5-turbo\"\n }\n \n if self.proxy_url is not None:\n proxies = {\n \"http\": self.proxy_url,\n \"https\": self.proxy_url\n }\n if self.proxy_username is not None and self.proxy_password is not None:\n proxies[\"http\"] = self.proxy_username + \":\" + self.proxy_password + \"@\" + self.proxy_url\n proxies[\"https\"] = self.proxy_username + \":\" + self.proxy_password + \"@\" + self.proxy_url\n result = requests.post(self.url, json=data, headers=headers, proxies=proxies)\n else:\n result = requests.post(self.url, json=data, headers=headers)\n if result.status_code == 200:\n response = result.json()\n chatgpt_reply = response['current_response']\n self.messages.append({\n \"role\": \"assistant\",\n \"content\": chatgpt_reply\n })\n return chatgpt_reply\n else:\n print(\"Error: \", result.status_code)\n print(result.text)\n return None\n \n def commit_message(self, user_content, chatgpt_reply):\n self.messages.append({\n \"role\": \"user\",\n \"content\": user_content\n })\n self.messages.append({\n \"role\": \"assistant\",\n \"content\": chatgpt_reply\n })\n\n\nif __name__ == \"__main__\":\n # 从配置文件读取 API Key、系统提示、URL、最大Token数\n conf = Ini()\n conf.read(\"/etc/chatgpt.conf\")\n api_key = conf.get(\"common\", \"API_KEY\")\n username = os.popen(\"whoami\").read().strip()\n system_prompt = conf.get(\"common\", \"SYSTEM_PROMPT\").replace(\"${UserName}\", username).replace(\"${Distro}\", distro.name())\n url = conf.get(\"common\", \"API_URI\")\n max_tokens = conf.getint(\"common\", \"MAX_TOKENS\")\n try:\n proxy_url = conf.get(\"proxy\", \"PROXY_URL\")\n proxy_username = conf.get(\"proxy\", \"PROXY_USERNAME\")\n proxy_password = conf.get(\"proxy\", \"PROXY_PASSWORD\")\n except (configparser.NoSectionError, configparser.NoOptionError):\n # print(\"No Proxy\")\n proxy_url = None\n proxy_username = None\n proxy_password = None\n # chatgpt = ChatGPT(api_key, system_prompt, url, max_tokens)\n finally:\n # 初始化ChatGPT Shell(设置代理)\n # print(\"Proxy\")\n chatgpt = ChatGPT(api_key, system_prompt, url, max_tokens, proxy_url, proxy_username, proxy_password)\n distribution = distro.name()\n kernel_version = os.uname().release\n hostname = os.uname().nodename\n print(f\"\"\"\n ██████╗██╗ ██╗ █████╗ ████████╗ ██████╗ ██████╗ ████████╗\n██╔════╝██║ ██║██╔══██╗╚══██╔══╝██╔════╝ ██╔══██╗╚══██╔══╝\n██║ ███████║███████║ ██║ ██║ ███╗██████╔╝ ██║ \n██║ ██╔══██║██╔══██║ ██║ ██║ ██║██╔═══╝ ██║ \n╚██████╗██║ ██║██║ ██║ ██║ ╚██████╔╝██║ ██║ \n ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ \n \n 您好,{username}!欢迎使用ChatGPT Shell,您的计算机信息:\n 操作系统:{distribution}\n 内核版本:{kernel_version}\n 用户名:{username}\n 主机名:{hostname}\n\n 如需帮助,请输入help\n \"\"\")\n failed = False\n # 设置历史记录长度\n readline.set_history_length(1000)\n try:\n readline.read_history_file(os.path.expanduser('~/.chatgpt_history'))\n except FileNotFoundError:\n pass\n while True:\n try:\n # 获取用户的输入\n print(f\"{'(x)' if failed else ''} ChatGPT {'#' if os.getuid() == 0 else '$'}\", end=\"\\n> \")\n # readline.set_pre_input_hook(pre_input_hook)\n user_input = input()\n readline.write_history_file(os.path.expanduser('~/.chatgpt_history'))\n # 判断用户输入\n if user_input.startswith(\"chat \"):\n message = user_input[5:]\n chatgpt_reply = chatgpt.send_message(message)\n if chatgpt_reply is not None:\n print(chatgpt_reply)\n elif user_input.startswith(\"search \"):\n command = user_input[7:]\n chatgpt_reply = chatgpt.send_message(f\"{command}命令的作用是什么?如何使用和安装这个命令?\")\n if chatgpt_reply is not None:\n print(chatgpt_reply)\n elif user_input == \"exit\":\n print(\"程序已退出\")\n sys.exit(0)\n elif user_input == \"help\":\n print(f\"\"\"您好,{username},这里是ChatGPT Shell的专用命令\n [command] 直接执行命令\n chat [message] 与ChatGPT进行对话(支持连续对话)\n exit 退出ChatGPT Shell\n help 查看帮助信息\n exec [message] 使用自然语言的方式执行命令:例如:exec 列出当前目录下的文件,包括隐藏文件,此时ChatGPT将会执行ls -a命令。\n history 查看历史命令\n search [command] 在ChatGPT Shell中搜索某个命令,例如:search command 将会搜索到command命令的使用/安装方法。\n whatswrong [filename] 读取源代码文件,检查源代码的语法错误,并提供修复建议。\n \"\"\")\n elif user_input == \"history\":\n for i in range(1, readline.get_current_history_length() + 1):\n print(f\"{i} {readline.get_history_item(i)}\")\n elif user_input.startswith(\"cd \"):\n path = user_input[3:]\n if path.startswith(\"~\"):\n path = os.path.expanduser(path)\n if path.startswith(\"./\"):\n path = os.path.abspath(path)\n if path.startswith(\"../\"):\n path = os.path.abspath(path)\n if os.path.exists(path):\n os.chdir(path)\n else:\n print(f\"cd: {path}: 没有那个文件或目录\")\n elif user_input.startswith(\"whatswrong \"):\n # 判断文件是否存在\n filename = user_input[11:]\n if not os.path.exists(filename):\n print(f\"whatswrong: {filename}: 没有那个文件或目录\")\n continue\n else:\n # 读取文件\n with open(filename, \"r\") as f:\n code = f.read()\n # 检查文件\n reply = chatgpt.send_message(f\"我编写了一段源代码,但是不知道是否有语法错误,请你帮我检查一下。源代码内容:\\n\\n {code}\")\n if reply is not None:\n print(reply)\n elif user_input.startswith(\"exec \"):\n # 判断文件是否存在\n message = user_input[5:]\n reply = chatgpt.send_message(f\"我想要{message},请你直接给出命令,不要对内容进行复述,不要回复任何其他内容,由于我要将你的回答传递给计算机进行执行,如果你回答了其他内容,将会执行失败。注意:1. 如果命令必须以root身份执行,请在命令前加上sudo(例如某些发行版本使用包管理器安装应用)。\\n2. 如果命令有危险性,请拒绝执行,并使用echo命令输出该命令存在的风险。\\n3. 以下为示例:\\n列出当前目录下的文件,包括隐藏文件\\tls -a\\n查看Linux内核版本\\tuname -r\\n切换为root用户\\tsudo su\")\n if reply is not None:\n # 执行命令\n print(f\"根据你的回复,我将执行命令:{reply}\")\n try:\n result = subprocess.run(reply, shell=True)\n if result.returncode == 0:\n failed = False\n else:\n failed = True\n except subprocess.TimeoutExpired:\n print(\"命令执行超时\")\n failed = True\n except FileNotFoundError:\n print(\"命令不存在\")\n failed = True\n except Exception as e:\n print(e)\n failed = True\n else:\n # 执行命令\n try:\n cmd = f\"{user_input}\"\n result = subprocess.run(cmd, shell=True)\n if result.returncode == 0:\n failed = False\n else:\n failed = True\n except subprocess.TimeoutExpired:\n print(\"命令执行超时\")\n failed = True\n except FileNotFoundError:\n print(\"命令不存在\")\n failed = True\n except Exception as e:\n print(e)\n failed = True\n except KeyboardInterrupt:\n failed = True\n continue\n except EOFError:\n print(\"程序已退出\")\n sys.exit(0)\n\n", "repo_name": "senge-dev/chatgpt-shell", "sub_path": "chatgpt.py", "file_name": "chatgpt.py", "file_ext": "py", "file_size_in_byte": 11838, "program_lang": "python", "lang": "zh", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 74, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 101, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 104, "usage_type": "call"}, {"api_name": "distro.name", "line_number": 105, "usage_type": "call"}, {"api_name": "configparser.NoSectionError", "line_number": 112, "usage_type": "attribute"}, {"api_name": "configparser.NoOptionError", "line_number": 112, "usage_type": "attribute"}, {"api_name": "distro.name", "line_number": 122, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 123, "usage_type": "call"}, {"api_name": "os.uname", "line_number": 124, "usage_type": "call"}, {"api_name": "readline.set_history_length", "line_number": 143, "usage_type": "call"}, {"api_name": "readline.read_history_file", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.getuid", "line_number": 151, "usage_type": "call"}, {"api_name": "readline.write_history_file", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 154, "usage_type": "call"}, {"api_name": "os.path", "line_number": 154, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 168, "usage_type": "call"}, {"api_name": "readline.get_current_history_length", "line_number": 181, "usage_type": "call"}, {"api_name": "readline.get_history_item", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 186, "usage_type": "call"}, {"api_name": "os.path", "line_number": 186, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 192, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 198, "usage_type": "call"}, {"api_name": "os.path", "line_number": 198, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 217, "usage_type": "call"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 222, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 235, "usage_type": "call"}, {"api_name": "subprocess.TimeoutExpired", "line_number": 240, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "73798966564", "text": "from binance.client import Client\nimport datetime\nimport collections\nfrom binance.enums import *\nfrom binance.exceptions import *\n\n\napi_key = \"\"\napi_secret = \"\"\nclient = Client(api_key, api_secret)\n\n#取得配列の内訳\n#[OpenTime,Open,High,Low,Close,Volume,CloseTime,QuoteAssetVolume,NumberOfTrades,TakerBuyBaseAssetVolume,TakerBuyQuoteAssetVolume,Ignore]\n\ndef date_cul(servertime): #サーバータイムを日付に変換する\n time = float(servertime) / 1000\n dt = datetime.datetime.fromtimestamp(time)\n return dt\n\ndef time_cul(date): #日付をサーバータイムに変換する\n time = date.timestamp() * 1000\n return time\n\ndef maxmin_all():\n max,min = maxmin_month()\n j = 0\n hourA = []\n hourB = []\n hourX = []\n hourN = []\n #print(max)\n for i in max:\n #最大最小値の毎時だけ取得\n hourA.append(min[j][0].hour)\n hourB.append(max[j][0].hour)\n j = j + 1\n day = max[j-1][0] - max[0][0] + datetime.timedelta(days=2) #何日間統計したか\n print(\"統計日数:\",day.days,\"日\")\n hourN = collections.Counter(hourA).most_common()\n hourX = collections.Counter(hourB).most_common()\n print(\"最多最安時間:\",hourN[0][0],\"時(\",hourN[0][1],\"回)\")\n print(\"最多最高時間:\", hourX[0][0], \"時(\", hourX[0][1], \"回)\")\n #print(\"HOUR:\", hourA)\n\ndef maxmin_month():\n #klines = client.get_historical_klines(\"XEMUSDT\", Client.KLINE_INTERVAL_1HOUR, \"31 Jan, 2021\", \"1 Mar, 2021\")\n klines = client.get_historical_klines(\"XEMUSDT\", KLINE_INTERVAL_1HOUR, \"28 Feb, 2020\")\n # 配列変数初期化\n j = 0\n dt1 = datetime.datetime(2020, 12, 1, 0, 0, 0, 0)\n dt2 = datetime.datetime(2021, 3, 1, 0, 0, 0, 0)\n su1 = int(time_cul(dt1))\n su2 = int(time_cul(dt2))\n allmax = []\n allmin = []\n for i in klines:\n openP = []\n openT = []\n if len(klines)> j: kl = int(klines[j][0])\n if (kl >= su1 and kl < su2):\n for k in range(24):\n # 取得ごとの最大最小価格を取得\n if len(klines) > j:\n openP.append(klines[j][1])\n openT.append(klines[j][0])\n j = j + 1\n # 配列内の最大最小値を取得\n maxC = []\n minC = []\n maxA = max(openP)\n minA = min(openP)\n # 配列内の最大最小値の位置を取得\n maxS = openP.index(maxA)\n minS = openP.index(minA)\n\n maxTime = date_cul(openT[maxS])\n minTime = date_cul(openT[minS])\n\n maxC.append(maxTime)\n maxC.append(maxA)\n minC.append(minTime)\n minC.append(minA)\n\n allmax.append(maxC)\n allmin.append(minC)\n\n print(\"MAXTIME:\",maxTime , \"Price:\", maxA, \"MINTIME:\",minTime ,\"Price:\", minA)\n else:\n j = j + 1\n return allmax,allmin\n\n\n\n\n\ndef max_min():\n #ローソク足取得\n klines = client.get_historical_klines(\"XEMUSDT\", Client.KLINE_INTERVAL_1MINUTE, \"1 day ago UTC\")\n\n\n #現在のサーバータイムを取得\n time_res = client.get_server_time()\n print(date_cul(float(time_res[\"serverTime\"])))\n\n #配列変数初期化\n maxP = []\n openT = []\n minP = []\n j = 0\n\n for i in klines:\n # 取得ごとの最大最小価格を取得\n maxP.append(klines[j][2])\n minP.append(klines[j][3])\n openT.append(klines[j][0])\n j = j + 1\n\n #配列内の最大最小値を取得\n maxA = max(maxP)\n minA = min(minP)\n #配列内の最大最小値の位置を取得\n maxS = maxP.index(maxA)\n minS = minP.index(minA)\n\n print(\"MAXTIME:\",date_cul(klines[maxS][0]),\"Price:\",maxA)\n print(\"MINTIME:\",date_cul(klines[minS][0]),\"Price:\",minA)\n\n\nif __name__ == '__main__':\n maxmin_all()\n", "repo_name": "jankendo/buggy1234", "sub_path": "coin.py", "file_name": "coin.py", "file_ext": "py", "file_size_in_byte": 3867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "binance.client.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 37, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 51, "usage_type": "call"}, {"api_name": "binance.client.Client.KLINE_INTERVAL_1MINUTE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "binance.client.Client", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "13264157261", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nfrom tensorflow import keras\nimport tensorflow_addons as tfa\nimport seaborn as sns\nimport tensorflow_hub as hub\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\n\n\n# In[2]:\n\n\ntest_data, info = tfds.load(\"imdb_reviews\",split='test', as_supervised=True, with_info=True)\ntrain_full, info = tfds.load(\"imdb_reviews\",split='train', as_supervised=True, with_info=True)\n\n\n# In[3]:\n\n\ndef preprocess(X_batch, y_batch):\n X_batch = tf.strings.substr(X_batch, 0, 300)\n X_batch = tf.strings.regex_replace(X_batch, rb\"\", b\" \")\n X_batch = tf.strings.regex_replace(X_batch, b\"[^a-zA-Z']\", b\" \")\n X_batch = tf.strings.split(X_batch)\n return X_batch.to_tensor(default_value=b\"\"), y_batch\n\ndef encode_words(X_batch, y_batch):\n return table.lookup(X_batch), y_batch\n\n\n# In[4]:\n\n\nvocabulary = Counter()\nfor X_batch, y_batch in train_full.batch(32).map(preprocess):\n for review in X_batch:\n vocabulary.update(list(review.numpy()))\n \nvocab_size = 10000\ntruncated_vocabulary = [\n word for word, count in vocabulary.most_common()[24:vocab_size+24]]\n\nwords = tf.constant(truncated_vocabulary)\nword_ids = tf.range(len(truncated_vocabulary), dtype=tf.int64)\nvocab_init = tf.lookup.KeyValueTensorInitializer(words, word_ids)\nnum_oov_buckets = 1000\ntable = tf.lookup.StaticVocabularyTable(vocab_init, num_oov_buckets)\n\n\n# In[5]:\n\n\nmodel = keras.models.load_model('IMDB_sentiment_pred_best.h5')\n\n\n# In[52]:\n\n\ndef pred(reviews):\n inputs = reviews.batch(1).map(preprocess).map(encode_words).prefetch(1)\n for X_batch, y_batch in inputs:\n for review, label in zip(X_batch, y_batch.numpy()):\n y_pred = model.predict(review)\n print('Prediction: Positive Probability - ', y_pred, 'Negative Probability - ',1-y_pred)\n print(\"Label: \", label, \"= Positive\" if label else \"= Negative\")\n\n\n# In[53]:\n\n\ntest = test_data.batch(1).take(5)\npred(test)\n\n", "repo_name": "emilsnyman/Applied_ML", "sub_path": "Assignment_5/runModel.py", "file_name": "runModel.py", "file_ext": "py", "file_size_in_byte": 2042, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tensorflow_datasets.load", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow_datasets.load", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.strings.substr", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.strings", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 29, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.strings.regex_replace", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.strings", "line_number": 30, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 30, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.strings.regex_replace", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.strings", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 31, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.strings.split", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.strings", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 32, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.constant", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2", "line_number": 51, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.range", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2", "line_number": 52, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.int64", "line_number": 52, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2.lookup.KeyValueTensorInitializer", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.lookup", "line_number": 53, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 53, "usage_type": "name"}, {"api_name": "tensorflow.compat.v2.lookup.StaticVocabularyTable", "line_number": 55, "usage_type": "call"}, {"api_name": "tensorflow.compat.v2.lookup", "line_number": 55, "usage_type": "attribute"}, {"api_name": "tensorflow.compat.v2", "line_number": 55, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.models", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "29402699188", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 13 10:38:22 2017\n\n@author: Home\n\nGoogle Sheets Project\n\nVersion 1\nTo be included:\n1. Read from a specified google worksheet within a sheet\n2. write a full table to a worksheet within a sheet\n\n\n\"\"\"\n\n#file path is to the location of the json file from google\ndef auth(file_path):\n from oauth2client.service_account import ServiceAccountCredentials\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name(file_path, scope)\n return creds\n\n\ndef read(sheet_name,worksheet_name,creds):\n import gspread\n import pandas as pd\n\n #Authorizations and sheet selection\n client = gspread.authorize(creds)\n sheet = client.open(sheet_name)\n worksheet = sheet.worksheet(worksheet_name)\n\n #gets all data from the worksheet in the form of a dict\n data = worksheet.get_all_records()\n data = pd.DataFrame.from_dict(data, dtype = float)\n \n #Pandas automatically re-orders columns alphabetically\n #In-order to bring in data in the same format one needs to bring in the row by itself and use it to arrange the df\n rowcol_list = worksheet.row_values(1)\n\n col_list = []\n for i in range(0, len(data.columns)):\n col_list.append(rowcol_list[i])\n \n return data[col_list] \n\n\ndef write(data, sheet_name,worksheet_name,creds):\n import gspread\n import pandas as pd\n \n #Authorizations and sheet selection\n client = gspread.authorize(creds)\n sheet = client.open(sheet_name)\n worksheet = sheet.worksheet(worksheet_name)\n \n #Gspread doesn't account for column names\n #This creates a new df to add to the top of the original df\n cols = data.columns \n col_names = {}\n for i in cols:\n col_names[i] = i \n insert_names = pd.DataFrame(col_names, index = [0])\n data = pd.concat([insert_names, data]).reset_index(drop = True)\n data = data[cols]\n \n\n #Worksheets need to be able to accomodate all cells that are part of an update or it will fail.\n #This re-szes the worksheet ahead of the update.\n worksheet.resize(len(data), len(data.columns))\n \n #creates a list of all the necessary cells based on the size of the df \n cell_range = '{col_i}{row_i}:{col_f}{row_f}'.format(\n col_i=chr((0) + ord('A')), # converts number to letter\n col_f=chr((len(cols)-1) + ord('A')), # subtract 1 because of 0-indexing\n row_i=1,\n row_f=len(data))\n cell_list = worksheet.range(cell_range)\n\n #Transforms data into one list to work with cell_range\n values = []\n for q in range(0, len(data)):\n for i in range(0, len(cols)):\n values.append(data[cols[i]][q])\n \n for i, val in enumerate(values): \n cell_list[i].value = val \n\n\n #I don't think gspread can't send more than 50,000 cells at once during a multi cell sheet update.\n #This creates bins of 45,000 cells to send off, and when a == len(cell_list) the remaining bin that didn't reach 45,000 cells will be sent\n chunk = []\n a = 0\n for i in range(0, len(cell_list)):\n if len(chunk) < 45000:\n chunk.append(cell_list[i])\n a += 1\n if a == len(cell_list):\n worksheet.update_cells(chunk) \n elif len(chunk) == 45000:\n worksheet.update_cells(chunk)\n chunk = []\n a += 1\n \n return print('Upload to Worksheet:%r within Sheet:%r Complete' %(worksheet_name, sheet_name))\n\n \n\n\n\n", "repo_name": "rindlerr1/sheets", "sub_path": "sheets.py", "file_name": "sheets.py", "file_ext": "py", "file_size_in_byte": 3577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name", "line_number": 22, "usage_type": "call"}, {"api_name": "oauth2client.service_account.ServiceAccountCredentials", "line_number": 22, "usage_type": "name"}, {"api_name": "gspread.authorize", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "attribute"}, {"api_name": "gspread.authorize", "line_number": 55, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "4760924777", "text": "from twython import Twython\nimport requests, json\nfrom bs4 import BeautifulSoup\nimport re, random\nimport pandas as pd\n\n\nwith open('../Twitter_Keys/APP_KEY') as f:\n APP_KEY = f.readlines()\nAPP_KEY = APP_KEY[0].strip()\n\nwith open('../Twitter_Keys/APP_SECRET') as f:\n APP_SECRET = f.readlines()\nAPP_SECRET = APP_SECRET[0].strip()\n\nwith open('../Twitter_Keys/OAUTH_TOKEN') as f:\n OAUTH_TOKEN = f.readlines()\nOAUTH_TOKEN = OAUTH_TOKEN[0].strip()\n\nwith open('../Twitter_Keys/OAUTH_SECRET') as f:\n OAUTH_TOKEN_SECRET = f.readlines()\nOAUTH_TOKEN_SECRET = OAUTH_TOKEN_SECRET[0].strip()\n\n\ntwitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n\ndef get_stat_cast_winner():\n statcast_table = pd.read_csv('./Todays_statcast.csv')\n hardest_hit_row = statcast_table['launch_speed'].idxmax()\n player = statcast_table.loc[hardest_hit_row,'player_name']\n comma = player.find(',')\n player = player[comma+1:] + ' ' + player[:comma]\n velo = statcast_table.loc[hardest_hit_row,'launch_speed']\n date = statcast_table.loc[hardest_hit_row,'game_date']\n \n return player, velo, date\n \n\ndef postOnTwitter():\n player, velo, date = get_stat_cast_winner()\n tweet_string = player + \" had the hardest hit ball on \" + date + \" in MLB with an exit velo of \" + str(velo) + \" mph. I am the at bat bot. This action was performed automatically.\"\n print(tweet_string)\n twitter.update_status(status=tweet_string)\n \npostOnTwitter()\n", "repo_name": "bwheel12/At_Bat_Twit_Bot", "sub_path": "at_bat_bot.py", "file_name": "at_bat_bot.py", "file_ext": "py", "file_size_in_byte": 1470, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "twython.Twython", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "39935488337", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 21 19:01:17 2019\n\n@author: User\n\"\"\"\n\nimport shapefile #Mengambil data dari shapefile\nw=shapefile.Writer('soal10', shapeType=5) #membuat file dengan nama soal 10 dan untuk membua polygon menggunakan shapefile=5\nw.field(\"k1\",\"C\") #Membuat tabel dengan kolom pertama\nw.field(\"k2\",\"C\") #Membuat tabel dengan kolom kedua\nw.record(\"Utara\",\"Sepuluh\") #isi dari tabel Utara adalah isi dari kolom1 dan Sepuluh kolom2\nw.record(\"Selatan\",\"Sebelas\") #isi dari tabel Selatan adalah isi dari kolom1 dan Sebelas kolom2\nw.record(\"Kamu\",\"Selingkuh\") #isi dari tabel Kamu adalah isi dari kolom1 dan Selingkuh kolom2\nw.record(\"Raimu\",\"TakAmplas\") #isi dari tabel Raimu adalah isi dari kolom1 dan TakAmplas kolom2\nw.record(\"By\",\"RadhycaLz\") #isi dari tabel By adalah isi dari kolom1 dan RadhycaLz kolom2\nw.poly([[[2,2],[8,2],[8,8],[2,8],[2,2]]]) #membuat garis dengan menghubungkan titik titik yang dibuat dan memberi warna di dalam garis yg di hubungkan\nw.poly([[[9,2],[15,2],[15,8],[9,8],[9,2]]]) #membuat garis dengan menghubungkan titik titik yang dibuat dan memberi warna di dalam garis yg di hubungkan\nw.poly([[[2,0],[8,0],[8,-6],[2,-6],[2,0]]]) #membuat garis dengan menghubungkan titik titik yang dibuat dan memberi warna di dalam garis yg di hubungkan\nw.poly([[[9,0],[15,0],[15,-6],[9,-6],[9,0]]]) #membuat garis dengan menghubungkan titik titik yang dibuat dan memberi warna di dalam garis yg di hubungkan\nw.poly([[[20,0],[26,0],[26,-6],[20,-6],[20,0]]]) #membuat garis dengan menghubungkan titik titik yang dibuat dan memberi warna di dalam garis yg di hubungkan\nw.close() #penutup\n", "repo_name": "Sistem-Informasi-Geografi-2017/SIG-3B", "sub_path": "src/1/1174050/10.py", "file_name": "10.py", "file_ext": "py", "file_size_in_byte": 1620, "program_lang": "python", "lang": "id", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "shapefile.Writer", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "22133159803", "text": "import torch\nfrom torch import optim\n\nfrom minimagen.Imagen import Imagen\nfrom minimagen.Unet import Unet\nfrom minimagen.t5 import get_encoded_dim\n\n# Constants\nBATCH_SIZE = 4 # Batch size training data\nMAX_NUM_WORDS = 64 # Max number of words allowed in a caption\nIMG_SIDE_LEN = 128 # Side length of the training images/final output image from Imagen\nEPOCHS = 5 # Number of epochs to train from\nT5_NAME = \"t5_small\" # Name of the T5 encoder to use\n\n# Captions to generate samples for\nCAPTIONS = [\n 'a happy dog',\n 'a big red house',\n 'a woman standing on a beach',\n 'a man on a bike'\n]\n\n# Get device\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# Get encoding dimension of the text encoder\ntext_embed_dim = get_encoded_dim(T5_NAME)\n\n# Create Unets\nbase_unet = Unet(\n dim=32,\n text_embed_dim=text_embed_dim,\n cond_dim=64,\n dim_mults=(1, 2, 4),\n num_resnet_blocks=2,\n layer_attns=(False, False, True),\n layer_cross_attns=(False, False, True),\n attend_at_middle=True\n)\n\nsuper_res_unet = Unet(\n dim=32,\n text_embed_dim=text_embed_dim,\n cond_dim=512,\n dim_mults=(1, 2, 4),\n num_resnet_blocks=(2, 4, 8),\n layer_attns=(False, False, True),\n layer_cross_attns=(False, False, True),\n attend_at_middle=False\n)\nprint(\"Created Unets\")\n\n# Create Imagen from Unets\nimagen = Imagen(\n unets=(base_unet, super_res_unet),\n image_sizes=(32, 128),\n timesteps=10,\n cond_drop_prob=0.1\n).to(device)\nprint(\"Created Imagen\")\n\n# Create example data\ntext_embeds = torch.randn(\n BATCH_SIZE,\n MAX_NUM_WORDS,\n text_embed_dim).to(device)\n\ntext_masks = torch.ones(\n BATCH_SIZE,\n MAX_NUM_WORDS).bool().to(device)\n\nimages = torch.randn(\n BATCH_SIZE,\n 3,\n IMG_SIDE_LEN,\n IMG_SIDE_LEN).to(device)\nprint(\"Created example data\")\n\n# Create optimizer\noptimizer = optim.Adam(imagen.parameters())\nprint(\"Created optimzer\")\n\n# Train on example data\nprint(\"Training Imagen...\")\nfor j in range(EPOCHS):\n for i in (1, 2):\n optimizer.zero_grad()\n loss = imagen(images, text_embeds=text_embeds, text_masks=text_masks, unet_number=i)\n loss.backward()\n optimizer.step()\n\n# Generate images with \"trained\" model\nprint(\"Sampling from Imagen...\")\nimages = imagen.sample(texts=CAPTIONS, cond_scale=3., return_pil_images=True)\n\n# Save output PIL images\nprint(\"Saving Images\")\nfor idx, img in enumerate(images):\n img.save(f'Generated_Image_{idx}.png')\n", "repo_name": "adilanka/VisuaLink", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2461, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 24, "usage_type": "attribute"}, {"api_name": "minimagen.t5.get_encoded_dim", "line_number": 27, "usage_type": "call"}, {"api_name": "minimagen.Unet.Unet", "line_number": 30, "usage_type": "call"}, {"api_name": "minimagen.Unet.Unet", "line_number": 41, "usage_type": "call"}, {"api_name": "minimagen.Imagen.Imagen", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 80, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "37330753447", "text": "from collections import OrderedDict\nfrom datetime import datetime\n\nimport pytest\n\nfrom statcert import Certificate\nfrom . import TEST_FILES\n\n\n@pytest.fixture(\n params=[\n {\n \"subject\": OrderedDict(commonName=\"www.google.com\"),\n \"issuer\": OrderedDict(\n commonName=\"GTS CA 1C3\",\n organizationName=\"Google Trust Services LLC\",\n countryName=\"US\",\n ),\n \"not_before\": datetime(2022, 1, 10, 3, 35, 32),\n \"not_after\": datetime(2022, 4, 4, 3, 35, 31),\n \"san\": [\"www.google.com\"],\n \"serial\": \"afff8ea23c08f71f0a000000012e077a\",\n \"key\": (\"EC\", 256),\n \"type\": \"DV\",\n \"cert\": \"google.der\"\n },\n {\n \"subject\": OrderedDict(\n commonName=\"twitter.com\",\n organizationName=\"Twitter, Inc.\",\n localityName=\"San Francisco\",\n stateOrProvinceName=\"California\",\n countryName=\"US\",\n ),\n \"issuer\": OrderedDict(\n commonName=\"DigiCert TLS RSA SHA256 2020 CA1\",\n organizationName=\"DigiCert Inc\",\n countryName=\"US\",\n ),\n \"not_before\": datetime(2022, 1, 9, 0, 0, 0),\n \"not_after\": datetime(2023, 1, 8, 23, 59, 59),\n \"san\": [\"twitter.com\", \"www.twitter.com\"],\n \"serial\": \"6788dcc8560c6793cb5921d644412a1\",\n \"key\": (\"RSA\", 2048),\n \"type\": \"OV\",\n \"cert\": \"twitter.der\"\n },\n {\n \"subject\": OrderedDict(\n commonName=\"www.bb.com.br\",\n organizationalUnit=\"DITEC\",\n organizationName=\"Banco do Brasil S.A.\",\n stateOrProvinceName=\"Distrito Federal\",\n countryName=\"BR\",\n businessCategory=\"Private Organization\",\n jurisdictionOfIncorporationCountryName=\"BR\",\n serialNumber=\"00.000.000/0001-91\",\n ),\n \"issuer\": OrderedDict(\n commonName=\"Sectigo RSA Extended Validation Secure Server CA\",\n organizationName=\"Sectigo Limited\",\n localityName=\"Salford\",\n stateOrProvinceName=\"Greater Manchester\",\n countryName=\"GB\",\n ),\n \"not_before\": datetime(2021, 12, 6, 0, 0, 0),\n \"not_after\": datetime(2022, 12, 6, 23, 59, 59),\n \"san\": [\n \"www.bb.com.br\",\n \"bb.com.br\",\n \"www.bancobrasil.com.br\",\n \"www.bancodobrasil.com.br\"\n ],\n \"serial\": \"941ab95fcbcc437166569969fdf8cef2\",\n \"key\": (\"RSA\", 2048),\n \"type\": \"EV\",\n \"cert\": \"bb.der\"\n },\n ],\n ids=[\"dv-google\", \"ov-twitter\", \"ev-bb\"]\n)\ndef cert_info(request):\n param = {**request.param}\n with open(TEST_FILES/\"certs\"/param[\"cert\"], \"rb\") as f:\n param[\"cert\"] = Certificate(f.read())\n return param\n\n\ndef test_model_attributes(cert_info):\n\n cert = cert_info[\"cert\"]\n\n assert cert\n assert cert_info[\"subject\"] == cert.subject\n assert cert_info[\"issuer\"] == cert.issuer\n assert cert_info[\"not_before\"] == cert.not_before\n assert cert_info[\"not_after\"] == cert.not_after\n assert cert_info[\"san\"] == cert.subject_alt_names\n assert cert_info[\"serial\"] == cert.serial_number\n assert cert_info[\"key\"] == cert.key_type\n assert cert_info[\"type\"] == cert.policy_type\n", "repo_name": "SamerW/statcert", "sub_path": "tests/test_model.py", "file_name": "test_model.py", "file_ext": "py", "file_size_in_byte": 3519, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "statcert.Certificate", "line_number": 85, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 10, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 28, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 49, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "40771494783", "text": "#!/usr/bin/env python\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This code was initially a refactoring of gl3w retrieved from the url below. It was gradually\n# rewritten until little if any remained from the upstream code.\n# https://github.com/skaslev/gl3w/blob/5f8d7fd191ba22ff2b60c1106d7135bb9a335533/gl3w_gen.py\n\n# Allow Python 2.6+ to use the print() function\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport re\n\nparser = argparse.ArgumentParser(description='gla generator script')\nparser.add_argument('--quiet', action='store_true', help='quiet output')\nparser.add_argument('--verbose', action='store_true', help='verbose output')\nparser.add_argument('--header_only',\n action='store_true',\n help=\"generate a header-only library guarded by GLA_IMPLEMENTATION\")\nparser.add_argument('--input_dir',\n metavar='D',\n type=str,\n default=os.path.dirname(os.path.realpath(__file__)),\n help='the input directory')\nparser.add_argument('--output_directory', metavar='D', type=str, default='', help='the output directory')\nparser.add_argument('--minimum_profile',\n type=str,\n metavar='V',\n default='1.0',\n help='the lowest OpenGL profile that the generated code will support')\nparser.add_argument('--maximum_profile',\n type=str,\n metavar='V',\n default='99.99',\n help='the highest OpenGL profile that the generated code will support')\nparser.add_argument('--extension',\n action='append',\n metavar='E',\n type=str,\n dest='extensions',\n default=[],\n help='an extension that the generated code will support')\nargs = parser.parse_args()\n\nextensions = args.extensions\n# noinspection PyStatementEffect\nextensions.sort\n\nquiet = args.quiet is not None and args.quiet\nverbose = not quiet and args.verbose\n\nheader_only = args.header_only is not None and args.header_only\n\nif not quiet:\n print('Configuration:')\n print(' Minimum OpenGL Profile: ' + args.minimum_profile)\n print(' Maximum OpenGL Profile: ' + (args.maximum_profile if '99.99' != args.maximum_profile else '-'))\n print(' Header Only: ' + ('true' if header_only else 'false'))\n print(' Supported Extensions:')\n for extension in extensions:\n print(' * ' + extension)\n\nif verbose:\n print('Loading API Headers to scan')\n\n# Maps name of header filename => group\nheader_groups = {}\n# Maps name of header filename => [group] that have been skipped\nheader_suppressed_groups = {}\n# Maps name of group => list of functions\ngroups = {}\n# list of versions that are above minimum but below or equal to maximum.\n# Used to generate guards in code.\noptional_versions = []\nfunctions = []\n# list of versions that are in groups that are at or below minimum version.\nrequired_functions = []\nvoid_functions = []\ngroup_pattern = re.compile(r'#ifndef (GL_\\w+)')\nversion_group_pattern = re.compile(r'GL_VERSION_(\\d)_(\\d)')\nfunction_pattern = re.compile(r'GLAPI(.*)APIENTRY\\s+(\\w+)')\n\ngroup = None\nskip_current_group = True\nrequired_group = False\n\nheader_files = ['GL/glcorearb.h', 'GL/glext.h']\n\nfor filename in header_files:\n with open(os.path.join(args.input_dir, 'include/' + filename), 'r') as f:\n header_groups[filename] = []\n header_suppressed_groups[filename] = []\n for line in f:\n m = group_pattern.match(line)\n if m:\n group = m.group(1)\n required_group = False\n v = version_group_pattern.match(group)\n if v:\n v = v.group(1) + '.' + v.group(2)\n required_group = args.minimum_profile >= group\n if args.minimum_profile < v <= args.maximum_profile and group not in optional_versions:\n optional_versions.append(group)\n if v > args.maximum_profile:\n if verbose:\n print('Skipping group ' + group + ' as it exceeds maximum supported profile version')\n skip_current_group = True\n else:\n skip_current_group = False\n else:\n skip_current_group = not (group in extensions)\n if skip_current_group and verbose:\n print('Skipping group ' + group + ' as this is not a supported extension')\n if skip_current_group:\n header_suppressed_groups[filename].append(group)\n else:\n header_groups[filename].append(group)\n if not skip_current_group:\n m = function_pattern.match(line)\n if not m:\n continue\n function = m.group(2)\n if function in functions:\n continue\n if not groups.get(group):\n groups[group] = []\n groups[group].append(function)\n if required_group and function not in required_functions:\n required_functions.append(function)\n functions.append(function)\n if ' void ' == m.group(1):\n void_functions.append(function)\n\nrequired_functions.sort()\noptional_versions.sort()\nfunctions.sort()\n\nif verbose:\n print('Wrapper methods by version:')\n for group in groups.keys():\n print(' ' + group + ': ' + str(len(groups[group])))\n print('Group Count by Header:')\n for header in header_groups.keys():\n print(' ' + header + ': ' +\n str(len(header_groups[header])) + ' included, ' +\n str(len(header_suppressed_groups[header])) + ' excluded')\n\nif verbose:\n print('Loading templates')\nheader_template = open(os.path.join(args.input_dir, 'templates/include/GLA/gla.h'), 'r')\nimplementation_template = open(os.path.join(args.input_dir, 'templates/src/GLA/gla.c'), 'r')\n\ngroups_present = []\nincludes_lines = []\nfor filename in header_files:\n # We define all the groups we do not want so that they do not get defined and nor do their constants\n for group in header_suppressed_groups[filename]:\n includes_lines.append(\"#define \" + group + \"\\n\")\n for group in header_groups[filename]:\n if group in groups_present:\n # We have to undef guard that was defined in previous header as this header includes a similar section\n includes_lines.append(\"#undef \" + group + \"\\n\")\n includes_lines.append(\"#include \\\"\" + filename + \"\\\"\\n\")\n # We add any group that was defined in header to a list\n for group in header_groups[filename]:\n if not group in groups_present:\n groups_present.append(group)\n # We undefine the groups we do not want so not to leave incorrect defines present in context\n for group in header_suppressed_groups[filename]:\n includes_lines.append(\"#undef \" + group + \"\\n\")\n\ninterface_lines = []\n\nif optional_versions:\n interface_lines.append('union GLAVersions {\\n')\n interface_lines.append(' bool versions[{0}];\\n'.format(len(optional_versions)))\n interface_lines.append(' struct {\\n')\n for version in optional_versions:\n interface_lines.append(' bool {0};\\n'.format(version[3:]))\n interface_lines.append(r''' } version;\n};\n\nGLA_API extern union GLAVersions glaVersions;\n\n''')\n for version in optional_versions:\n interface_lines.append(\n '#define {0: <48} glaVersions.version.{1}\\n'.format('GLA_' + version[3:], version[3:]))\n interface_lines.append('\\n')\n\nif extensions:\n interface_lines.append('union GLAExtensions {\\n')\n interface_lines.append(' bool extensions[{0}];\\n'.format(len(extensions)))\n interface_lines.append(' struct {\\n')\n for extension in extensions:\n interface_lines.append(' bool {0};\\n'.format(extension[3:]))\n interface_lines.append(r''' } extension;\n};\n\nGLA_API extern union GLAExtensions glaExtensions;\n\n''')\n for extension in extensions:\n interface_lines.append(\n '#define {0: <48} glaExtensions.extension.{1}\\n'.format('GLA_' + extension[3:], extension[3:]))\n interface_lines.append('\\n')\n\ninterface_lines.append('union GLAFunctions {\\n')\ninterface_lines.append(' GLAglFunction functions[{0}];\\n'.format(len(functions)))\ninterface_lines.append(' struct {\\n')\nfor function in functions:\n interface_lines.append(' {0: <55} {1};\\n'.format('PFN{0}PROC'.format(function.upper()), function[2:]))\ninterface_lines.append(r''' } function;\n};\n\nGLA_API extern union GLAFunctions glaFunctions;\n\n''')\nfor function in functions:\n interface_lines.append('#define {0: <48} {1}(glaFunctions.function.{2}(__VA_ARGS__))\\n'.\n format(function + '(...)', 'GLA_CHECK' if function in void_functions else '', function[2:]))\n\nimpl_lines = []\nimpl_lines.append(r'#define GLA_MIN_MAJOR_VERSION ' + args.minimum_profile.split('.')[0] + \"\\n\")\nimpl_lines.append(r'#define GLA_MIN_MINOR_VERSION ' + args.minimum_profile.split('.')[1] + \"\\n\")\n\nif optional_versions:\n impl_lines.append(r'''\n#define GLFW_SUPPORT_OPTIONAL_VERSIONS\n\ntypedef struct gla_version_s {\n int major;\n int minor;\n} gla_version_t;\n\nstatic const gla_version_t gla_versions[] = {\n''')\n for version in optional_versions:\n impl_lines.append(' { ' + version[11:12] + ', ' + version[13:14] + ' },\\n')\n impl_lines.append('};\\n')\n\nif extensions:\n impl_lines.append(r'''\n#define GLFW_SUPPORT_EXTENSIONS\n\nstatic const char* gla_extension_names[] = {\n''')\n for extension in extensions:\n impl_lines.append(' \"{0}\",\\n'.format(extension))\n impl_lines.append('};\\n')\n\nimpl_lines.append(r'''\n\ntypedef struct gla_function_s {\n const char* name;\n bool required;\n} gla_function_t;\n\nstatic const gla_function_t gla_functions[] = {\n''')\nfor function in functions:\n impl_lines.append(\n ' { \\\"' + function + '\\\", ' + ('true' if function in required_functions else 'false') + ' },\\n')\nimpl_lines.append('};\\n')\n\nincludes_content = ''.join(includes_lines)\ninterface_content = ''.join(interface_lines)\nimpl_content = ''.join(impl_lines)\n\nif header_only:\n interface_content = interface_content + \"\\n#ifdef GLA_IMPLEMENTATION\\n\"\n for line in implementation_template:\n interface_content = interface_content + line.replace('GLA_IMPL_CONTENT;\\n', impl_content).replace(\"#include \\\"GLA/gla.h\\\"\\n\", \"\")\n interface_content = interface_content + \"\\n#endif // GLA_IMPLEMENTATION\\n\"\n\ninclude_dir = os.path.join(args.output_directory, 'include/GLA/')\nif not os.path.exists(include_dir):\n os.makedirs(include_dir)\ninclude_output_filename = os.path.join(include_dir, 'gla.h')\n\nif not quiet:\n print('Generating {0}...'.format(include_output_filename))\nwith open(include_output_filename, 'wb') as f:\n for line in header_template:\n f.write(line.\n replace('GLA_INCLUDES_CONTENT;\\n', includes_content).\n replace('GLA_INTERFACE_CONTENT;\\n', interface_content).\n encode('utf-8'))\nif not header_only:\n src_dir = os.path.join(args.output_directory, 'src/GLA/')\n if not os.path.exists(src_dir):\n os.makedirs(src_dir)\n\n impl_output_filename = os.path.join(src_dir, 'gla.c')\n\n if not quiet:\n print('Generating {0}...'.format(impl_output_filename))\n with open(impl_output_filename, 'wb') as f:\n for line in implementation_template:\n f.write(line.replace('GLA_IMPL_CONTENT;\\n', impl_content).encode('utf-8'))\n", "repo_name": "realityforge/gla", "sub_path": "gla_generator.py", "file_name": "gla_generator.py", "file_ext": "py", "file_size_in_byte": 12259, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 35, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 91, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 92, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 162, "usage_type": "call"}, {"api_name": "os.path", "line_number": 162, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path", "line_number": 163, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 289, "usage_type": "call"}, {"api_name": "os.path", "line_number": 289, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 291, "usage_type": "call"}, {"api_name": "os.path", "line_number": 291, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 303, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 304, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 306, "usage_type": "call"}, {"api_name": "os.path", "line_number": 306, "usage_type": "attribute"}]} +{"seq_id": "14867332699", "text": "LOGGER_NAME = 'JapaneseTokenizer'\n\nimport logging\nimport sys\nfrom logging import getLogger, Formatter, Logger, StreamHandler\n\n# Formatter\ncustmoFormatter = Formatter(\n fmt='[%(asctime)s]%(levelname)s - %(filename)s#%(funcName)s:%(lineno)d: %(message)s',\n datefmt='Y/%m/%d %H:%M:%S'\n)\n\n# StreamHandler\nSTREAM_LEVEL = logging.DEBUG\nSTREAM_FORMATTER = custmoFormatter\nSTREAM = sys.stderr\n\nst_handler = StreamHandler(stream=STREAM)\nst_handler.setLevel(STREAM_LEVEL)\nst_handler.setFormatter(STREAM_FORMATTER)\n\n\ndef init_logger(logger):\n # type: (logging.Logger) -> logging.Logger\n logger.addHandler(st_handler)\n logger.propagate = False\n\n return logger\n", "repo_name": "Kensuke-Mitsuzawa/JapaneseTokenizers", "sub_path": "JapaneseTokenizer/init_logger.py", "file_name": "init_logger.py", "file_ext": "py", "file_size_in_byte": 666, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 135, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.Formatter", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "9855730980", "text": "import sys\nimport time\nimport random\n\nimport os\nimport shutil\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n# from_dir = \"DIGITE O CAMINHO DA PASTA DE DOWNLOAD (USE \" / \") no VSC\"\n# to_dir = \"DIGITE A PASTA DE CAMINHO DE DESTINO (USE \" / \") no VSC\"\n\nfrom_dir = \"C:/Users/USUARIO/Downloads\"\nto_dir = \"C:/Users/USUARIO/Desktop/Luciano Filho Arquivos/Byjus/Aula 103 - Python Movendo Arquivos\"\n\ndir_tree = {\n \"Image_Files\": ['.jpg', '.jpeg', '.png', '.gif', '.jfif'],\n \"Video_Files\": ['.mpg', '.mp2', '.mpeg', '.mpe', '.mpv', '.mp4', '.m4p', '.m4v', '.avi', '.mov'],\n \"Document_Files\": ['.ppt', '.xls', '.csv', '.pdf', '.txt'],\n \"Setup_Files\": ['.exe', '.bin', '.cmd', '.msi', '.dmg']\n}\n\n\n# Classe Gerenciadora de Eventos\n\nclass FileMovementHandler(FileSystemEventHandler):\n\n def on_created(self, event):\n raiz, ext = os.path.splitext(event.src_path)\n time.sleep(1)\n print(event)\n for chave, value in dir_tree.items(): \n time.sleep(1)\n if ext in value:\n file = os.path.basename(event.src_path)\n shutil.move(from_dir + '/' + file, to_dir + '/' + chave + '/' + file)\n time.sleep(1)\n\n\n\n# Inicialize a Classe Gerenciadora de Eventos\nevent_handler = FileMovementHandler()\n\n# Inicialize o Observer\nobserver = Observer()\n\n# Agende o Observer\nobserver.schedule(event_handler, from_dir, recursive=True)\n\n# Inicie o Observer\nobserver.start()\n\n\nwhile True:\n time.sleep(5)\n print(\"executando...\")\n", "repo_name": "LucianoFilho13/ByjusAula103", "sub_path": "DownloadAndMove.py", "file_name": "DownloadAndMove.py", "file_ext": "py", "file_size_in_byte": 1544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "watchdog.events.FileSystemEventHandler", "line_number": 27, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "watchdog.observers.Observer", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "19155381091", "text": "#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 20 15:27:14 2018\r\n\r\n@author: user\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n#from torch.autograd import Variable\r\n\r\n# from ext import warp\r\nimport warp\r\n#from utils import train_utils as tn_utils\r\n\r\nclass conv_block(nn.Module):\r\n def __init__(self, inChan, outChan, stride=1):\r\n super(conv_block, self).__init__()\r\n self.conv = nn.Sequential(\r\n nn.Conv3d(inChan, outChan, kernel_size=3, stride=stride, padding=1, bias=True),\r\n nn.BatchNorm3d(outChan),\r\n nn.LeakyReLU(0.2, inplace=True)\r\n# nn.ReLU(inplace=True)\r\n )\r\n self._init_weights()\r\n\r\n def _init_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv3d):\r\n nn.init.kaiming_normal_(m.weight, a=0.2)\r\n# nn.init.kaiming_normal_(m.weight, nonlinearity='relu')\r\n# nn.init.xavier_uniform_(m.weight)\r\n #default: mode='fan_in', nonlinearity='leaky_relu'\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n\r\n return x\r\n\r\nclass Unet(nn.Module):\r\n def __init__(self, enc_nf=[2,16,32,32,32,32], dec_nf=[32,32,32,32,16,16,3]):\r\n super(Unet, self).__init__()\r\n \"\"\"\r\n unet architecture(voxelmorph-2). \r\n :param enc_nf: list of encoder filters. right now it needs to be 1x6. \r\n e.g. [2,16,32,32,32,32]\r\n :param dec_nf: list of decoder filters. right now it must be 1x7\r\n e.g. [32,32,32,32,16,16,3]\r\n \"\"\"\r\n self.inconv = conv_block(enc_nf[0], enc_nf[1])\r\n self.down1 = conv_block(enc_nf[1], enc_nf[2], 2)\r\n self.down2 = conv_block(enc_nf[2], enc_nf[3], 2)\r\n self.down3 = conv_block(enc_nf[3], enc_nf[4], 2)\r\n self.down4 = conv_block(enc_nf[4], enc_nf[5], 2)\r\n self.up1 = conv_block(enc_nf[-1], dec_nf[0])\r\n self.up2 = conv_block(dec_nf[0]+enc_nf[4], dec_nf[1])\r\n self.up3 = conv_block(dec_nf[1]+enc_nf[3], dec_nf[2])\r\n self.same_conv1 = conv_block(dec_nf[2]+enc_nf[2], dec_nf[3])\r\n self.up4 = conv_block(dec_nf[3], dec_nf[4])\r\n self.same_conv2 = conv_block(dec_nf[4]+enc_nf[1], dec_nf[5])\r\n self.outconv = nn.Conv3d(\r\n dec_nf[5], dec_nf[6], kernel_size=3, stride=1, padding=1, bias=True)\r\n# self.tanh = nn.Tanh()\r\n #init last_conv\r\n self.outconv.weight.data.normal_(mean=0, std=1e-5)\r\n if self.outconv.bias is not None:\r\n self.outconv.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n # down-sample path (encoder)\r\n skip1 = self.inconv(x)\r\n skip2 = self.down1(skip1)\r\n skip3 = self.down2(skip2)\r\n skip4 = self.down3(skip3)\r\n x = self.down4(skip4)\r\n # up-sample path (decoder)\r\n x = self.up1(x)\r\n x = F.interpolate(x, scale_factor=2, mode='nearest')\r\n x = torch.cat((x, skip4), 1)\r\n x = self.up2(x)\r\n x = F.interpolate(x, scale_factor=2, mode='nearest')\r\n x = torch.cat((x, skip3), 1)\r\n x = self.up3(x)\r\n x = F.interpolate(x, scale_factor=2, mode='nearest')\r\n x = torch.cat((x, skip2), 1)\r\n x = self.same_conv1(x)\r\n x = self.up4(x)\r\n x = F.interpolate(x, scale_factor=2, mode='nearest')\r\n x = torch.cat((x, skip1), 1)\r\n x = self.same_conv2(x)\r\n x = self.outconv(x)\r\n\r\n return x\r\n\r\nclass dirnet(nn.Module):\r\n def __init__(self, img_size=[192,256,112], enc_nf=[2,16,32,32,32,32], dec_nf=[32,32,32,32,16,16,3]):\r\n super(dirnet, self).__init__()\r\n self.unet = Unet(enc_nf, dec_nf)\r\n self.warper = warp.Warper3d(img_size)\r\n\r\n def forward(self, mov, ref):\r\n input0 = torch.cat((mov, ref), 1)\r\n flow = self.unet(input0)\r\n warped = self.warper(mov, flow)\r\n\r\n return warped, flow\r\n \r\nclass conv_down(nn.Module):\r\n \"\"\"\r\n Conv3d:三维卷积层, 输入的尺度是(N, C_in,D,H,W),输出尺度(N,C_out,D_out,H_out,W_out)\r\n BatchNorm3d:在每一个小批量(mini-batch)数据中,计算输入各个维度的均值和标准差。gamma与beta是可学习的大小为C的参数向量(C为输入大小)\r\n 在训练时,该层计算每次输入的均值与方差,并进行移动平均。移动平均默认的动量值为0.1。\r\n 在验证时,训练求得的均值/方差将用于标准化验证数据。\r\n \"\"\"\r\n\r\n def __init__(self, inChan, outChan, down=True, pool_kernel=2):\r\n super(conv_down, self).__init__()\r\n self.down = down\r\n self.conv = nn.Sequential(\r\n nn.Conv3d(inChan, outChan, kernel_size=3, stride=1, padding=1, bias=True),\r\n nn.BatchNorm3d(outChan),\r\n nn.ReLU(inplace=True)\r\n )\r\n self.pool = nn.AvgPool3d(pool_kernel)\r\n# self.pool = nn.MaxPool3d(pool_kernel)\r\n self._init_weights()\r\n \r\n def _init_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv3d):\r\n nn.init.kaiming_normal_(m.weight, nonlinearity='relu')\r\n# nn.init.xavier_uniform_(m.weight)\r\n #default: mode='fan_in', nonlinearity='leaky_relu'\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n \r\n def forward(self, x):\r\n x = self.conv(x)\r\n if self.down:\r\n x = self.pool(x)\r\n return x\r\n\r\n\r\nclass Self_Attn(nn.Module):\r\n \"\"\" Self attention Layer\"\"\"\r\n\r\n def __init__(self, in_dim, activation):\r\n super(Self_Attn, self).__init__()\r\n self.chanel_in = in_dim\r\n self.activation = activation\r\n\r\n self.query_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\r\n self.key_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)\r\n self.value_conv = nn.Conv3d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\r\n self.gamma = nn.Parameter(torch.zeros(1))\r\n\r\n self.softmax = nn.Softmax(dim=-1) #\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n inputs :\r\n x : input feature maps( B X C X W X H)\r\n returns :\r\n out : self attention value + input feature\r\n attention: B X N X N (N is Width*Height)\r\n \"\"\"\r\n print(x.shape)\r\n m_batchsize, C, W, H, D = x.size()\r\n\r\n proj_query = self.query_conv(x).view(m_batchsize, -1, W * H * D).permute(0, 2, 1) # B X CX(N)\r\n proj_key = self.key_conv(x).view(m_batchsize, -1, W * H * D) # B X C x (*W*H)\r\n energy = torch.bmm(proj_query, proj_key) # transpose check\r\n attention = self.softmax(energy) # BX (N) X (N)\r\n proj_value = self.value_conv(x).view(m_batchsize, -1, W * H * D) # B X C X N\r\n\r\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\r\n out = out.view(m_batchsize, C, W, H, D)\r\n\r\n out = self.gamma * out + x\r\n return out, attention\r\n\r\n\r\nclass Net(nn.Module):\r\n def __init__(self, ndown=3, nfea=[2,16,32,64,64,64,128,64,32,3]):\r\n super(Net, self).__init__()\r\n \"\"\"\r\n net architecture. \r\n :param nfea: list of conv filters. right now it needs to be 1x8.\r\n :param ndown: num of downsampling, 3 or 4.\r\n \"\"\"\r\n self.ndown = ndown\r\n assert ndown in [3, 344, 4]\r\n if ndown == 344:\r\n self.down1 = conv_down(nfea[0], nfea[1], pool_kernel=(1,2,2)) \r\n else:\r\n self.down1 = conv_down(nfea[0], nfea[1])\r\n self.down2 = conv_down(nfea[1], nfea[2])\r\n self.down3 = conv_down(nfea[2], nfea[3])\r\n if ndown in [344, 4]:\r\n self.down4 = conv_down(nfea[3], nfea[3])\r\n self.same0 = conv_down(nfea[3], nfea[3], down=False)\r\n self.same1 = conv_down(nfea[3], nfea[4], down=False)\r\n self.same2 = conv_down(nfea[4], nfea[5], down=False)\r\n self.same3 = conv_down(nfea[5], nfea[6], down=False)\r\n self.same4 = conv_down(nfea[6], nfea[7], down=False)\r\n self.same5 = conv_down(nfea[7], nfea[8], down=False)\r\n self.outconv = nn.Conv3d(\r\n nfea[8], nfea[9], kernel_size=1, stride=1, padding=0, bias=True)\r\n #init last_conv\r\n self.outconv.weight.data.normal_(mean=0, std=1e-5)\r\n if self.outconv.bias is not None:\r\n self.outconv.bias.data.zero_()\r\n\r\n def forward(self, x):\r\n scale=8\r\n # print('x',x.shape)\r\n x = F.instance_norm(self.down1(x))\r\n # print('x1',x.shape)\r\n x = self.down2(x)\r\n # print('x2', x.shape)\r\n x = self.down3(x)\r\n # print('x3', x.shape)\r\n if self.ndown in [344, 4]:\r\n scale=16 if self.ndown==4 else (8,16,16)\r\n x = self.down4(x)\r\n x = self.same0(x)\r\n x = self.same1(x)\r\n # print('x_s1', x.shape)\r\n x = self.same2(x)\r\n # print('x_s2', x.shape)\r\n x = self.same3(x)\r\n # print('x_s3', x.shape)\r\n x = self.same4(x)\r\n # print('x_s4', x.shape)\r\n x = self.same5(x)\r\n # print('x_s5', x.shape)\r\n x = self.outconv(x)\r\n # print('x', x.shape)\r\n # x = F.interpolate(x, scale_factor=[8,8,8.56], mode='trilinear', align_corners=True) # False\r\n x = F.interpolate(x, scale_factor=scale, mode='trilinear', align_corners=True)\r\n # print('x', x.shape)\r\n\r\n\r\n return x\r\n\r\nclass snet(nn.Module):\r\n def __init__(self, ndown=3, img_size=[256,256,96]):\r\n # def __init__(self, ndown=3, img_size=[128, 144, 128]):\r\n super(snet, self).__init__()\r\n self.net = Net(ndown)\r\n self.warper = warp.Warper3d(img_size)\r\n\r\n\r\n def forward(self, mov, ref):\r\n input0 = torch.cat((mov, ref), 1)\r\n input0 = input0.to(torch.float)\r\n # print(input0.shape)\r\n flow = self.net(input0)\r\n\r\n warped = self.warper(mov, flow)\r\n\r\n return warped, flow\r\n\r\n#a=snet(ndown=344, img_size=[32,32,32])\r\n#in1 = torch.rand((2,1,32,32,32))\r\n#in2 = torch.rand((2,1,32,32,32))\r\n#b,c=a(in1, in2)", "repo_name": "iiiiiinhao/DIR-lin", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 10262, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.LeakyReLU", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 31, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 85, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 92, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 99, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "warp.Warper3d", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool3d", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 157, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.nn.Parameter", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 187, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "name"}, {"api_name": "torch.nn.functional.instance_norm", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 221, "usage_type": "name"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 244, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 250, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 250, "usage_type": "name"}, {"api_name": "warp.Warper3d", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 260, "usage_type": "attribute"}]} +{"seq_id": "12001791367", "text": "from tensorflow import lite\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.models import load_model\nimport datetime\n\nclass tfLiteConverter:\n\n\tdef __init__(self, save_path=\"saved/\"):\n\t\tself.save_path = save_path\n\t\tself.num_strings = 6\n\t\tself.model_filename = \"model.h5\"\n\t\tself.tflite_filename = \"model.tflite\"\n\n\n\tdef log(self, text):\n\t\ttext = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" | \" + text + \"\\n\"\n\t\twith open(\"log/data.log\", \"a\") as myfile:\n\t\t\tmyfile.write(text)\n\t\t\tprint(text)\n\t\twith open(\"log/tfLiteConverter.log\", \"a\") as myfile2:\n\t\t\tmyfile2.write(text)\n\n\tdef softmax_by_string(self, t):\n\t\tsh = K.shape(t)\n\t\tstring_sm = []\n\t\tfor i in range(self.num_strings):\n\t\t\tstring_sm.append(K.expand_dims(K.softmax(t[:,i,:]), axis=1))\n\t\treturn K.concatenate(string_sm, axis=1)\n\n\tdef catcross_by_string(self, target, output):\n\t\tloss = 0\n\t\tfor i in range(self.num_strings):\n\t\t\tloss += K.categorical_crossentropy(target[:,i,:], output[:,i,:])\n\t\treturn loss\n\n\tdef avg_acc(self, y_true, y_pred):\n\t\treturn K.mean(K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))\n\n\tdef convert_tflite(self):\n\t\tmodel = load_model(self.save_path + self.model_filename, custom_objects={'softmax_by_string': self.softmax_by_string, 'avg_acc': self.avg_acc, 'catcross_by_string': self.catcross_by_string})\n\t\tconverter = lite.TFLiteConverter.from_keras_model(model)\n\t\ttflite_model = converter.convert()\n\t\topen(self.save_path + self.tflite_filename, \"wb\").write(tflite_model)\n\n\ndef main():\n\ttfliteconverter = tfLiteConverter()\n\ttfliteconverter.log(\"Start tfLiteConverter\")\n\n\ttfliteconverter.log(\"convert tflite...\")\n\ttfliteconverter.convert_tflite()\n\n\ttfliteconverter.log(\"End tfLiteConverter\")\n\nif __name__ == '__main__':\n\tmain()", "repo_name": "mattpoggi/SistemiDigitaliM20-21", "sub_path": "DeNardi-Tornatore/tfLiteConverter.py", "file_name": "tfLiteConverter.py", "file_ext": "py", "file_size_in_byte": 1743, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.shape", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 24, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.expand_dims", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 27, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.softmax", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.concatenate", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.categorical_crossentropy", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 33, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.mean", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 37, "usage_type": "name"}, {"api_name": "tensorflow.keras.backend.equal", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.argmax", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.lite.TFLiteConverter.from_keras_model", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.lite.TFLiteConverter", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.lite", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "31069676224", "text": "import os\nfrom math import cos, sin\n\nimport numpy as np\nimport cupy as cp\nimport cv2 as cv\nfrom numpy import radians\nfrom tqdm import tqdm\n\nfrom src.core.card import Card\n\n\nclass ORB:\n ROOT = \"res/card_database\"\n BS = 64\n N_FEATURES = 100\n\n def __init__(self, path=ROOT):\n self.orb_algorithm = cv.ORB_create(nfeatures=self.N_FEATURES)\n try:\n npzfile = np.load(f\"{path}/orb.npz\")\n self.keypoints = npzfile['orb_clouds']\n self.idx2id = npzfile['card_id']\n self.id2idx = {}\n for ii, id in enumerate(self.idx2id):\n try:\n self.id2idx[id].append(ii)\n except KeyError:\n self.id2idx[id] = [ii]\n\n except (FileNotFoundError, TypeError):\n self.keypoints, self.idx2id = self.__init_orb_clouds()\n\n def compare(self, card1, card2):\n artwork1 = cv.cvtColor(card1.artwork, cv.COLOR_BGR2GRAY)\n artwork2 = cv.cvtColor(card2.artwork, cv.COLOR_BGR2GRAY)\n\n h, w = artwork1.shape[:2]\n kp1 = self.orb_algorithm.detect(artwork1, None)\n kp2 = self.orb_algorithm.detect(artwork2, None)\n pc1 = np.stack([self.kp2vec(kp, w, h) for kp in kp1])\n pc2 = np.stack([self.kp2vec(kp, w, h) for kp in kp2])\n\n M = ((pc1[None, :, :2] - pc2[:, None, :2]) ** 2).sum(-1) ** .5\n D = (pc1[None, :, 2:4] * pc2[:, None, 2:4]).sum(-1)\n S = (1 - M) * D\n\n score = np.mean(np.max(S, axis=1), axis=0)\n\n return score\n\n def rank(self, card, ids=None):\n img = cv.cvtColor(card.artwork, cv.COLOR_BGR2GRAY)\n w, h = img.shape[:2]\n kp = self.orb_algorithm.detect(img, None)\n kp = np.stack([self.kp2vec(p, w, h) for p in kp])\n kp = cp.asarray(kp)\n\n if ids is None:\n ids = self.idx2id\n\n idxs = []\n for id in ids:\n idxs.extend(self.id2idx[id])\n idxs = np.array(list(set(idxs)))\n\n similarity = np.zeros(idxs.shape[0])\n\n for ii_frm in range(0, ids.shape[0] + self.BS, self.BS):\n ii_to = min(ii_frm + self.BS, similarity.shape[0])\n\n anc_idxs = idxs[ii_frm:ii_to]\n\n anc = cp.asarray(self.keypoints[anc_idxs])\n\n # Euclidean distance between points\n M = ((kp[None, :, None, :2] - anc[:, None, :, :2]) ** 2).sum(-1) ** .5\n\n # Dot product between orientational unit vector\n D = (kp[None, :, None, 2:4] * anc[:, None, :, 2:4]).sum(-1)\n\n sim = (1 - M) * D * kp[None, :, None, 4]\n similarity[ii_frm:ii_to] = cp.asnumpy(cp.mean(cp.max(sim, axis=2), axis=1))\n\n ranked_idxs = idxs[np.argsort(similarity)[::-1]]\n ranked_ids = self.idx2id[ranked_idxs]\n\n return ranked_ids, np.sort(similarity)[::-1]\n\n def __init_orb_clouds(self):\n\n imgfiles = sorted(os.listdir(f\"{self.ROOT}/images\"))\n\n clouds = np.zeros((len(imgfiles), self.N_FEATURES, 5))\n crd_id = np.zeros((len(imgfiles),), dtype=np.long)\n # n_kp = np.zeros((len(imgfiles),), dtype=np.int)\n\n for ii, imgfile in enumerate(tqdm(imgfiles)):\n imgpath = f\"{self.ROOT}/images/{imgfile}\"\n\n card = Card(path=imgpath)\n img = cv.cvtColor(card.artwork, cv.COLOR_BGR2GRAY)\n\n h, w = img.shape[:2]\n\n for jj, kp in enumerate(self.orb_algorithm.detect(img, None)):\n clouds[ii, jj] = self.kp2vec(kp, w, h)\n crd_id[ii] = int(imgfile.split('_')[0])\n\n np.savez(f\"{self.ROOT}/orb.npz\", orb_clouds=clouds, card_id=crd_id)\n\n return clouds, crd_id\n\n def kp2vec(self, kp, w, h):\n return np.array([kp.pt[0] / w, kp.pt[1] / h,\n cos(radians(kp.angle)), sin(radians(kp.angle)), 1])\n\n# orb = cv.ORB_create()\n#\n# root = \"res/card_database/images\"\n# for imgpth in os.listdir(root):\n# imgpth = f\"{root}/{imgpth}\"\n#\n# img = cv.imread(imgpth, 0)\n#\n# img = img[108:458, 26:395]\n#\n# kp = orb.detect(img, None)\n# kp, des = orb.compute(img, kp)\n#\n# img2 = cv.drawKeypoints(img, kp, None, color=(0, 255, 0), flags=0)\n#\n# show(img2)\n", "repo_name": "MatthijsBiondina/YuGiOh", "sub_path": "src/ml/orb.py", "file_name": "orb.py", "file_ext": "py", "file_size_in_byte": 4141, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.ORB_create", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 35, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 48, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 56, "usage_type": "call"}, {"api_name": "cupy.asarray", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 67, "usage_type": "call"}, {"api_name": "cupy.asarray", "line_number": 74, "usage_type": "call"}, {"api_name": "cupy.asnumpy", "line_number": 83, "usage_type": "call"}, {"api_name": "cupy.mean", "line_number": 83, "usage_type": "call"}, {"api_name": "cupy.max", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 88, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.long", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 98, "usage_type": "call"}, {"api_name": "src.core.card.Card", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.savez", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 116, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "1083767322", "text": "import config\nfrom tests import BaseTestCase\nfrom lib.alipay import AliWapPay\n\nclass BaseTestALiPay(BaseTestCase):\n def __init__(self, *args, **kwargs):\n super(BaseTestALiPay, self).__init__(*args, **kwargs)\n self.alipay = AliWapPay(notify_url=\"http://27.42.109.37:8000\",\n app_id=config.ali_app_id,\n private_key_path=config.private_key_path,\n sign_type= config.ali_sign_type,\n seller_id=config.ali_seller_id\n )\n\n\nclass TestALiPay(BaseTestALiPay):\n def test_wappay_create_trade(self):\n trade_id = self.gen_uid()\n title = \"test title\"\n fee = \"0.01\"\n timeout = \"1m\"\n callback_url = \"http://27.42.109.37:8000\"\n product_info = {\n \"title\": \"test title\",\n \"descr\": \"test descr\",\n \"type\": \"1\"\n }\n query_string = self.alipay.create_trade(trade_id,\n fee,\n timeout,\n callback_url,\n product_info\n )\n self.assertIsNotNone(query_string)\n self.assertIn(\"sign\", query_string)\n\n def test_refund(self):\n trade_id = self.gen_uid()\n fee = \"0.01\"\n query_string = self.alipay.refund(trade_id, fee)\n self.assertIsNotNone(query_string)\n self.assertIn(\"sign\", query_string)", "repo_name": "zivsu/alipay", "sub_path": "alipay/tests/test_alipay.py", "file_name": "test_alipay.py", "file_ext": "py", "file_size_in_byte": 1599, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tests.BaseTestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "lib.alipay.AliWapPay", "line_number": 8, "usage_type": "call"}, {"api_name": "config.ali_app_id", "line_number": 9, "usage_type": "attribute"}, {"api_name": "config.private_key_path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "config.ali_sign_type", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.ali_seller_id", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "21900205884", "text": "import pytest\n\nfrom spsdk.dk6.commands import *\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (CommandTag.GET_CHIPID, b\"\\x21\\x4a\\x04\\x94\"),\n (\n CommandTag.UNLOCK_ISP,\n b\"\\x01\\x11\\x22\\x33\\x44\\x55\\x66\\x77\\x88\\x11\\x22\\x33\\x44\\x55\\x66\\x77\\x88\",\n ),\n ],\n)\ndef test_cmd_response(type, raw_data):\n cmd_response = CmdResponse(type, raw_data)\n assert cmd_response.status == raw_data[0]\n assert \"Status\" and \"Type\" in cmd_response.info()\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (CommandTag.GET_CHIPID, b\"\\x21\\x4a\\x04\\x94\"),\n (\n CommandTag.UNLOCK_ISP,\n b\"\\x01\\x11\\x22\\x33\\x44\\x55\\x66\\x77\\x88\\x11\\x22\\x33\\x44\\x55\\x66\\x77\\x88\",\n ),\n ],\n)\ndef test_generic_response(type, raw_data):\n generic_response = GenericResponse(type, raw_data)\n assert generic_response.status == raw_data[0]\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.UNLOCK_ISP, b\"\\x00\"),\n ],\n)\ndef test_isp_unlock_response(type, raw_data):\n isp_unlock = IspUnlockResponse(type, raw_data)\n assert isp_unlock.authenticated == True\n assert isp_unlock.status == StatusCode.OK\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.GET_CHIPID, b\"\\x00\\x88\\x88\\x88\\x88\\xcc\\x00\\x00\\x14\"),\n ],\n)\ndef test_get_chip_id_response(type, raw_data):\n chip_id = GetChipIdResponse(type, raw_data)\n assert chip_id.status == StatusCode.OK\n assert chip_id.chip_id == 0x88888888\n assert chip_id.chip_version == 0x140000CC\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (\n ResponseTag.MEM_GET_INFO,\n b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xde\\x09\\x00\\x00\\x02\\x00\\x00\\x01\\x0f\\x46\\x4c\\x41\\x53\\x48\",\n )\n ],\n)\ndef test_mem_get_info_response(type, raw_data):\n get_info = MemGetInfoResponse(type, raw_data)\n assert get_info.status == StatusCode.OK\n assert get_info.access == 15\n assert get_info.base_addr == 0x0\n assert get_info.length == 0x9DE00\n assert \"FLASH\" in get_info.mem_name\n assert get_info.mem_type == 0x1\n assert get_info.memory_id == 0x0\n assert get_info.sector_size == 0x200\n assert get_info.mem_name == get_info.raw_data[15:].decode(\"ascii\")\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_OPEN, b\"\\x00\\x00\"),\n ],\n)\ndef test_mem_open_response(type, raw_data):\n mem_open = MemOpenResponse(type, raw_data)\n assert mem_open.status == StatusCode.OK\n assert mem_open.handle[0] == 0\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_READ, b\"\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\"),\n ],\n)\ndef test_mem_read_response(type, raw_data):\n mem_read = MemReadResponse(type, raw_data)\n assert mem_read.status == StatusCode.OK\n assert mem_read.data == raw_data[1:]\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_WRITE, b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"),\n ],\n)\ndef test_mem_write_response(type, raw_data):\n mem_write = MemWriteResponse(type, raw_data)\n assert mem_write.status == StatusCode.OK\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_ERASE, b\"\\x00\\x00\\x09\\x43\\x00\\x12\\xa7\\xd0\\x54\"),\n ],\n)\ndef test_mem_erase_response(type, raw_data):\n mem_erase = MemEraseResponse(type, raw_data)\n assert mem_erase.status == StatusCode.OK\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_BLANK_CHECK, b\"\\x00\\x00\\x09\\x45\\x00\\x44\\xfd\\x77\\xd2\"),\n ],\n)\ndef test_mem_check_response(type, raw_data):\n mem_check = MemBlankCheckResponse(type, raw_data)\n assert mem_check.status == StatusCode.OK\n\n\n@pytest.mark.parametrize(\n \"type,raw_data\",\n [\n (ResponseTag.MEM_CLOSE, b\"\\x00\"),\n ],\n)\ndef test_mem_close_response(type, raw_data):\n mem_close = MemCloseResponse(type, raw_data)\n assert mem_close.status == StatusCode.OK\n", "repo_name": "nxp-mcuxpresso/spsdk", "sub_path": "tests/dk6/test_commands.py", "file_name": "test_commands.py", "file_ext": "py", "file_size_in_byte": 3935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 37, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pytest.mark.parametrize", "line_number": 6, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 84, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 96, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 96, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 108, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 119, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 130, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 141, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 141, "usage_type": "attribute"}]} +{"seq_id": "22489547315", "text": "import sys\r\nfrom super_gradients.training import Trainer\r\nfrom super_gradients.training import dataloaders\r\nfrom super_gradients.training.dataloaders.dataloaders import coco_detection_yolo_format_train, coco_detection_yolo_format_val\r\nfrom IPython.display import clear_output\r\nfrom super_gradients.training import models\r\nfrom super_gradients.training.losses import PPYoloELoss\r\nfrom super_gradients.training.metrics import DetectionMetrics_050\r\nfrom super_gradients.training.models.detection_models.pp_yolo_e import PPYoloEPostPredictionCallback\r\n\r\n\r\nCHECKPOINT_DIR = 'checkpoints'\r\ntrainer = Trainer(experiment_name='custom', ckpt_root_dir=CHECKPOINT_DIR)\r\n\r\ndataset_params = {\r\n 'data_dir':'dataset', #dataset directory\r\n 'train_images_dir':'train/images',\r\n 'train_labels_dir':'train/labels',\r\n 'val_images_dir':'valid/images',\r\n 'val_labels_dir':'valid/labels',\r\n # 'test_images_dir':'test/images',\r\n # 'test_labels_dir':'test/labels',\r\n\r\n 'classes': ['car'], #Fill in your classess\r\n \r\n 'transforms':[{'DetectionRandomAffine': {'degrees': 0,'translate':0.25,'scales':(0.5,1.5), 'shear': 0.0, 'target_size':'','filter_box_candidate': True,'wh_thr':2, 'area_thr':0.1, 'ar_thr':20}}, \r\n {'DetectionHSV': {'prob': 0.5, 'hgain': 18, 'sgain':30, 'vgain':30}}, \r\n {'DetectionHorizontalFlip': {'prob': 0.5}},\r\n {'DetectionMixup':{'input_dim':(640,640),'mixup_scale': (0.5,1.5),'prob':0.5, 'flip_prob':0.5 }},\r\n {'DetectionPadToSize':{'output_size': (640,640),'pad_value':114}},\r\n {'DetectionStandardize': {'max_value':255.}},\r\n 'DetectionImagePermute',\r\n {'DetectionTargetsFormatTransform':{'input_dim':(640,640), 'output_format':'LABEL_CXCYWH'}}\r\n ]\r\n}\r\n\r\ntrain_data = coco_detection_yolo_format_train(\r\n dataset_params={\r\n 'data_dir': dataset_params['data_dir'],\r\n 'images_dir': dataset_params['train_images_dir'],\r\n 'labels_dir': dataset_params['train_labels_dir'],\r\n 'classes': dataset_params['classes'],\r\n 'transforms': [\r\n {'DetectionRandomAffine': {'degrees': 0,'translate':0.25,'scales':(0.5,1.5), 'shear': 0.0, 'target_size':(640,640),'filter_box_candidates': True,'wh_thr':2, 'area_thr':0.1, 'ar_thr':20}}, \r\n {'DetectionHSV': {'prob': 0.5, 'hgain': 18, 'sgain':30, 'vgain':30}}, \r\n {'DetectionHorizontalFlip': {'prob': 0.5}},\r\n {'DetectionMixup':{'input_dim':(640,640),'mixup_scale': (0.5,1.5), 'prob':0.5, 'flip_prob':0.5 }},\r\n {'DetectionPadToSize':{'output_size': (640,640),'pad_value':114}},\r\n {'DetectionStandardize': {'max_value':255.}},\r\n 'DetectionImagePermute',\r\n {'DetectionTargetsFormatTransform':{'input_dim':(640,640), 'output_format':'LABEL_CXCYWH'}}]\r\n },\r\n dataloader_params={\r\n 'batch_size':8,\r\n 'num_workers':2\r\n }\r\n)\r\n\r\nval_data = coco_detection_yolo_format_val(\r\n dataset_params={\r\n 'data_dir': dataset_params['data_dir'],\r\n 'images_dir': dataset_params['val_images_dir'],\r\n 'labels_dir': dataset_params['val_labels_dir'],\r\n 'classes': dataset_params['classes'],\r\n 'transforms': [{'DetectionPadToSize':{'output_size': (640,640),'pad_value':114}},{'DetectionStandardize': {'max_value':255.}},\r\n 'DetectionImagePermute',{'DetectionTargetsFormatTransform':{'input_dim':(640,640), 'output_format':'LABEL_CXCYWH'}}]\r\n },\r\n dataloader_params={\r\n 'batch_size':8,\r\n 'num_workers':2\r\n }\r\n)\r\n\r\n# test_data = coco_detection_yolo_format_val(\r\n# dataset_params={\r\n# 'data_dir': dataset_params['data_dir'],\r\n# 'images_dir': dataset_params['test_images_dir'],\r\n# 'labels_dir': dataset_params['test_labels_dir'],\r\n# 'classes': dataset_params['classes']\r\n# },\r\n# dataloader_params={\r\n# 'batch_size':2,\r\n# 'num_workers':2\r\n# }\r\n# )\r\n\r\nclear_output()\r\n\r\ntrain_data.dataset.transforms\r\n\r\ntrain_data.dataset.plot()\r\n\r\nmodel = models.get('yolo_nas_m',\r\n num_classes=len(dataset_params['classes']),\r\n pretrained_weights=\"coco\"\r\n )\r\n\r\n\r\n\r\ntrain_params = {\r\n # ENABLING SILENT MODE\r\n 'silent_mode': False,\r\n \"average_best_models\":True,\r\n \"warmup_mode\": \"linear_epoch_step\",\r\n \"warmup_initial_lr\": 1e-6,\r\n \"lr_warmup_epochs\": 3,\r\n \"initial_lr\": 5e-4,\r\n \"lr_mode\": \"cosine\",\r\n \"cosine_final_lr_ratio\": 0.1,\r\n \"optimizer\": \"Adam\",\r\n \"optimizer_params\": {\"weight_decay\": 0.0001},\r\n \"zero_weight_decay_on_bias_and_bn\": True,\r\n \"ema\": True,\r\n \"ema_params\": {\"decay\": 0.9, \"decay_type\": \"threshold\"},\r\n # launch_tensorboard: False # Whether to launch a TensorBoard process.\r\n # tensorboard_port: # port for tensorboard process\r\n # tb_files_user_prompt: False # Asks User for Tensorboard Deletion Prompt\r\n # save_tensorboard_to_s3: False # whether to save tb to s3\r\n # ONLY TRAINING FOR 10 EPOCHS FOR THIS EXAMPLE NOTEBOOK\r\n # save_model: True # Whether to save the model checkpoints\r\n # ckpt_best_name: ckpt_best.pth\r\n \"max_epochs\": 100,\r\n \"mixed_precision\": True,\r\n \"loss\": PPYoloELoss(\r\n use_static_assigner=False,\r\n # NOTE: num_classes needs to be defined here\r\n num_classes=len(dataset_params['classes']),\r\n reg_max=16\r\n ),\r\n \"valid_metrics_list\": [\r\n DetectionMetrics_050(\r\n score_thres=0.1,\r\n top_k_predictions=300,\r\n # NOTE: num_classes needs to be defined here\r\n num_cls=len(dataset_params['classes']),\r\n normalize_targets=True,\r\n post_prediction_callback=PPYoloEPostPredictionCallback(\r\n score_threshold=0.01,\r\n nms_top_k=1000,\r\n max_predictions=300,\r\n nms_threshold=0.7\r\n )\r\n )\r\n ],\r\n \"metric_to_watch\": 'mAP@0.50'\r\n}\r\n\r\ntrainer.train(model=model,\r\n training_params=train_params,\r\n train_loader=train_data,\r\n valid_loader=val_data)\r\n\r\n", "repo_name": "nqt228/YOLO-NAS_TensorRT", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5970, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "super_gradients.training.Trainer", "line_number": 13, "usage_type": "call"}, {"api_name": "super_gradients.training.dataloaders.dataloaders.coco_detection_yolo_format_train", "line_number": 37, "usage_type": "call"}, {"api_name": "super_gradients.training.dataloaders.dataloaders.coco_detection_yolo_format_val", "line_number": 59, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 87, "usage_type": "call"}, {"api_name": "super_gradients.training.models.get", "line_number": 93, "usage_type": "call"}, {"api_name": "super_gradients.training.models", "line_number": 93, "usage_type": "name"}, {"api_name": "super_gradients.training.losses.PPYoloELoss", "line_number": 124, "usage_type": "call"}, {"api_name": "super_gradients.training.metrics.DetectionMetrics_050", "line_number": 131, "usage_type": "call"}, {"api_name": "super_gradients.training.models.detection_models.pp_yolo_e.PPYoloEPostPredictionCallback", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "25347276884", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef total_pesquisa():\n Excel_file = pd.read_excel('Banco_de_dados_praticas.xlsx')\n print(Excel_file)\n\n Contagem1 = Excel_file.groupby(['DireitoVida']).size()\n Contagem2 = Excel_file.groupby(['Violência']).size()\n Contagem3 = Excel_file.groupby(['Escravidão']).size()\n Contagem4 = Excel_file.groupby(['MausTratos']).size()\n Contagem5 = Excel_file.groupby(['Liberdade']).size()\n Contagem6 = Excel_file.groupby(['Repressão']).size()\n Contagem7 = Excel_file.groupby(['LiberdadeExpressão']).size()\n print(Contagem1)\n print(Contagem2)\n print(Contagem3)\n print(Contagem4)\n print(Contagem5)\n print(Contagem6)\n print(Contagem7)\n Total = Contagem1 + Contagem2 + Contagem3 + Contagem4 + Contagem5 + Contagem6 +Contagem7\n print(Total)\n\n plt.subplot(3,3,1)\n plt.hist(Total[0], label='False', color='red')\n plt.xlabel('Total de falsos')\n plt.legend()\n\n plt.subplot(3,3,2)\n plt.hist(Total[1], label='True')\n plt.xlabel('Total de verdadeiros')\n plt.legend()\n\n plt.subplot(3,3,3)\n plt.hist(Contagem1[1], label='True')\n plt.hist(Contagem1[0], label='False')\n plt.xlabel('Direito a vida')\n plt.legend()\n\n plt.subplot(3,3,4)\n plt.hist(Contagem2[1], label='True')\n plt.hist(Contagem2[0], label='False')\n plt.xlabel('violência')\n plt.legend()\n\n plt.subplot(3,3,5)\n plt.hist(Contagem3[1], label='True')\n plt.hist(Contagem3[0], label='False')\n plt.xlabel('Escravidão')\n plt.legend()\n\n plt.subplot(3,3,6)\n plt.hist(Contagem4[1], label='True')\n plt.hist(Contagem4[0], label='False')\n plt.xlabel('MausTratos')\n plt.legend()\n\n plt.subplot(3,3,7)\n plt.hist(Contagem5[1], label='True')\n plt.hist(Contagem5[0], label='False')\n plt.xlabel('Liberdade')\n plt.legend()\n\n plt.subplot(3,3,8)\n plt.hist(Contagem6[1], label='True')\n plt.hist(Contagem6[0], label='False')\n plt.xlabel('Repressão')\n plt.legend()\n\n plt.subplot(3,3,9)\n plt.hist(Contagem7[1], label='True')\n plt.hist(Contagem7[0], label='False')\n plt.xlabel('Liberdade de Expressão')\n plt.legend()\n\n plt.show()\n", "repo_name": "Dec0XD/Trabalho_praticas_2", "sub_path": "Grafico_Total_Violaçoes.py", "file_name": "Grafico_Total_Violaçoes.py", "file_ext": "py", "file_size_in_byte": 2201, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_excel", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "9263449468", "text": "import os\nimport bisect\nimport struct\nfrom enum import IntFlag\nfrom typing import Tuple, Optional, Iterator\nfrom collections.abc import MutableSequence\n\nfrom .util import quickSort as _quickSort\n\n\nclass SOBError(Exception):\n pass\n\n\nclass SOBFlags(IntFlag):\n SORTED = 1\n\n\nclass SOBFile(MutableSequence):\n MAGIC = b\"SOB\\x00\\x00\\x00\\x00\\x00\"\n MIN_HEADER_SIZE = 32\n # HEADER FIELDS OFFSETS\n OFFSET_SIZEOF_HEADER = len(MAGIC)\n OFFSET_FLAGS = OFFSET_SIZEOF_HEADER + 4\n OFFSET_SIZEOF_ITEM = OFFSET_FLAGS + 4\n\n def __init__(self, filepath: str, mode: str = \"r\", cachesize: int = 10240):\n self._path = None\n self._sorted = False\n self._cachesize = cachesize\n self._cache = dict()\n self._closed = True\n self._headersize = self.MIN_HEADER_SIZE\n self._flags = SOBFlags(0)\n if filepath:\n self.open(filepath, mode)\n\n def parse_header(self) -> Tuple[int, SOBFlags, int, int, bytes]:\n \"\"\"Parse the file header.\n Returns header size, flags, item size, #of items,\n and a raw copy of the header.\n \"\"\"\n self._fh.seek(0)\n buf = self._fh.read(self.MIN_HEADER_SIZE)\n if buf[: len(self.MAGIC)] != self.MAGIC:\n raise TypeError(\"Invalid header (MAGIC value)\")\n h_size = struct.unpack_from(\" self.MIN_HEADER_SIZE:\n buf += self._fh.read(h_size - self.MIN_HEADER_SIZE)\n # check for truncation\n fsize = self._fh.seek(0, os.SEEK_END)\n if (fsize - h_size) % i_size != 0:\n raise SOBError(\"Truncated file\".format(self._path))\n num_items = (fsize - h_size) / i_size\n # header size, flags, item size, #of items, raw header\n return h_size, SOBFlags(flags), i_size, int(num_items), buf\n\n ##################################################################\n ## Getters and setters\n\n @property\n def closed(self) -> bool:\n return self._closed\n\n @property\n def itemsize(self) -> int:\n return self._itemsize\n\n @itemsize.setter\n def itemsize(self, value: int):\n if not isinstance(value, int):\n raise TypeError(type(value))\n if len(self) != 0:\n raise SOBError(\"Item size cannot be changed\")\n self._itemsize = value\n self._fh.seek(self.OFFSET_SIZEOF_ITEM)\n self._fh.write(struct.pack(\" int:\n return self._headersize\n\n @headersize.setter\n def headersize(self, value: int):\n if not isinstance(value, int):\n raise TypeError(type(value))\n if len(self) != 0:\n raise SOBError(\"Header size cannot be changed\")\n if value < self.MIN_HEADER_SIZE:\n raise ValueError(\"value must be >= {}\".format(self.MIN_HEADER_SIZE))\n if not self.closed:\n # grow or shrink file\n self._fh.truncate(value)\n self._headersize = value\n self._fh.seek(self.OFFSET_SIZEOF_HEADER)\n self._fh.write(struct.pack(\" SOBFlags:\n return self._flags\n\n def set_flags(self, value):\n if not isinstance(value, SOBFlags):\n raise TypeError(\"expected {}, got {}\".format(SOBFlags, type(value)))\n if value in self._flags:\n return\n self._flags |= value\n self._fh.seek(self.OFFSET_FLAGS)\n self._fh.write(struct.pack(\" bool:\n return SOBFlags.SORTED in self.flags\n\n ##################################################################\n ## Mutable Sequence\n\n ####################\n ### Abstract methods\n\n def __getitem__(self, key) -> bytes:\n if isinstance(key, slice):\n # TODO\n raise NotImplementedError(\"slicing not (yet) implemented\")\n # inefficient but simple\n step = 1 if key.step is None else key.step\n return [self[index] for index in range(key.start, key.stop, step)]\n elif isinstance(key, int):\n if key in self._cache:\n item = self._cache[key]\n return item\n item_offset = key * self.itemsize\n if key >= 0:\n if key >= self._len:\n raise IndexError(key)\n self._fh.seek(self.headersize + item_offset)\n buf = self._fh.read(self.itemsize)\n else:\n if key * -1 >= self._len:\n raise IndexError(key)\n # offset from end\n self._fh.seek(item_offset, os.SEEK_END)\n buf = self._fh.read(self.itemsize)\n return buf\n else:\n raise TypeError()\n\n def __setitem__(self, key, buf: bytes):\n if isinstance(key, slice):\n # TODO\n raise NotImplementedError(\"slicing not (yet) implemented\")\n if not isinstance(key, int):\n raise TypeError(\"key is not int\")\n if not isinstance(buf, bytes):\n raise TypeError(\"buf is not bytes\")\n if len(buf) != self.itemsize:\n raise TypeError(\n \"len(buf)={} but itemsize={}\".format(len(buf), self.itemsize)\n )\n # pack item\n item_offset = key * self.itemsize\n if key >= 0:\n if key >= self._len:\n raise IndexError()\n self._fh.seek(self.headersize + item_offset)\n else:\n if key * -1 >= self._len:\n raise IndexError()\n # offset from end\n self._fh.seek(item_offset, os.SEEK_END)\n # write data\n self._fh.write(buf)\n if self.sorted:\n # TODO: check\n self.unset_flags(SOBFlags.SORTED)\n\n def __len__(self) -> int:\n return self._len\n\n def __delitem__(self, index: int):\n raise NotImplementedError()\n\n def insert(self, index: int, value: bytes):\n raise NotImplementedError()\n\n ###################\n ### Other Overidden\n\n def append(self, buf: bytes):\n if not isinstance(buf, bytes):\n raise TypeError(\"buf is not bytes\")\n if len(buf) != self.itemsize:\n raise TypeError(\n \"len(buf)={} but itemsize={}\".format(len(buf), self.itemsize)\n )\n self._fh.seek(0, os.SEEK_END)\n self._fh.write(buf)\n self._len += 1\n if self.sorted:\n # TODO check\n self.unset_flags(SOBFlags.SORTED)\n\n def clear(self):\n self._fh.truncate(self.headersize)\n self._len = 0\n self._cache.clear()\n\n def index(self, buf: bytes, start=0, end=None) -> Optional[int]:\n \"\"\"Return index of item or None.\"\"\"\n if end is None or end < start:\n end = len(self)\n if not self.sorted:\n # linear search :(\n for i in range(start, end):\n item = self[i]\n if item == buf:\n return i\n raise ValueError()\n # sorted, binary search\n x = self._sorted_find(buf, start, end)\n if x is None:\n raise ValueError()\n return x\n\n ## Mutable Sequence\n ##################################################################\n\n def open(self, filepath: str, mode: str):\n self._path = filepath\n if mode == \"r\":\n self._fh = open(filepath, \"rb\")\n (\n self._headersize,\n self._flags,\n self._itemsize,\n self._len,\n _,\n ) = self.parse_header()\n elif mode == \"w\":\n self._fh = open(filepath, \"wb\")\n # write header\n self._fh.write(self.MAGIC)\n self._fh.write(b\"\\x00\" * (self.headersize - len(self.MAGIC)))\n # number of entries = 0\n self._len = 0\n # this causes headersize to be written to header on disk\n self.headersize = self.headersize\n elif mode == \"a\":\n self._fh = open(filepath, \"rb+\")\n (\n self._headersize,\n self._flags,\n self._itemsize,\n self._len,\n _,\n ) = self.parse_header()\n else:\n raise SOBError(\"Unknown mode '{}'\".format(mode))\n self._closed = False\n\n def close(self):\n self._fh.close()\n self._closed = True\n\n def fill_cache(self):\n self._fill_cache()\n\n def sort(self, key=None):\n _quickSort(self, 0, len(self) - 1, key)\n self.set_flags(SOBFlags.SORTED)\n\n def __enter__(self):\n return self\n\n def __exit__(self, typ, value, traceback):\n self.close()\n\n def _bisect_indexes(\n self, lo: int = 0, hi: Optional[int] = None, iterations: int = 1024\n ) -> Iterator[int]:\n if int(iterations) == 0 or hi == lo:\n return\n # This algorithm is copied from the CPython src for bisect\n if lo < 0:\n raise ValueError(\"lo must not be negative\")\n if hi is None:\n hi = len(self)\n if hi < lo:\n raise ValueError(f\"{hi} < {lo}\")\n mid = (lo + hi) // 2\n yield mid\n iterations -= 1\n odd = iterations % 2\n # down\n yield from self._bisect_indexes(lo, mid, (iterations - odd) / 2)\n # up\n yield from self._bisect_indexes(mid + 1, hi, (iterations + odd) / 2)\n\n def _fill_cache(self):\n if self._cachesize and self._cachesize > 0:\n if self.sorted:\n # cache values for binary search\n for i in self._bisect_indexes(iterations=self._cachesize):\n self._cache[i] = self[i]\n else:\n # cache values for linear search\n for i in range(self._cachesize):\n self._cache[i] = self[i]\n\n def _sorted_find(self, value: bytes, start: int, end: int):\n \"\"\"Locate the first (leftmost) entry\"\"\"\n i = bisect.bisect_left(self, value, start, end)\n item = self[i]\n if i != len(self) and item == value:\n return i\n return None\n", "repo_name": "malwarefrank/sobod", "sub_path": "sobod/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 10739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "enum.IntFlag", "line_number": 15, "usage_type": "name"}, {"api_name": "collections.abc.MutableSequence", "line_number": 19, "usage_type": "name"}, {"api_name": "struct.unpack_from", "line_number": 47, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 48, "usage_type": "call"}, {"api_name": "struct.unpack_from", "line_number": 49, "usage_type": "call"}, {"api_name": "os.SEEK_END", "line_number": 54, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 38, "usage_type": "name"}, {"api_name": "struct.pack", "line_number": 80, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 99, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 112, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 121, "usage_type": "call"}, {"api_name": "os.SEEK_END", "line_number": 154, "usage_type": "attribute"}, {"api_name": "os.SEEK_END", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.SEEK_END", "line_number": 208, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 220, "usage_type": "name"}, {"api_name": "util.quickSort", "line_number": 281, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 291, "usage_type": "name"}, {"api_name": "typing.Iterator", "line_number": 292, "usage_type": "name"}, {"api_name": "bisect.bisect_left", "line_number": 324, "usage_type": "call"}]} +{"seq_id": "20897965690", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom rasa_sdk import Action\nfrom rasa_sdk.events import SlotSet\nimport pandas as pd\nimport json\nimport re\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nZomatoData = pd.read_csv('zomato.csv')\nZomatoData = ZomatoData.drop_duplicates().reset_index(drop=True)\nWeOperate = list(ZomatoData[\"City\"].str.lower().unique())\nCuisines = [\"north indian\", \"south indian\", \"american\", \"chinese\", \"italian\", \"mexican\"]\n\ndef RestaurantSearch(City,Cuisine,Price):\n\tprint(City, Cuisine, Price)\n\tzomato_data = ZomatoData[(ZomatoData['Cuisines'].apply(lambda x: Cuisine.lower() in x.lower())) & (ZomatoData['City'].apply(lambda x: City.lower() in x.lower()))]\n\tif(Price == \"low\"):\n\t\tTEMP = zomato_data[ZomatoData[\"Average Cost for two\"] <= 300]\n\telif(Price == \"mid\"):\n\t\tTEMP = zomato_data[(ZomatoData[\"Average Cost for two\"] > 300) & (ZomatoData[\"Average Cost for two\"] <= 700)]\n\telif(Price == \"high\"):\n\t\tTEMP = zomato_data[(ZomatoData[\"Average Cost for two\"] > 700)]\n\telse:\n\t\treturn pd.DataFrame()\n\treturn TEMP[['Restaurant Name','Address','Average Cost for two','Aggregate rating']].sort_values('Aggregate rating', ascending = False)[:10]\n\ndef get_price_range(price):\n\tif(price == \"low\"):\n\t\treturn \"less than Rs. 300\"\n\telif(price==\"mid\"):\n\t\treturn \"between Rs 300 to 700\"\n\telif(price==\"high\"):\n\t\treturn \"greater than 700\"\n\ndef CitySearch(City):\n\tif(City.lower() in WeOperate):\n\t\treturn '1'\n\telse:\n\t\treturn '0'\n\nclass ActionValEmail(Action):\t\n\tdef name(self):\n\t\treturn 'action_val_email'\n\n\tdef run(self, dispatcher, tracker, domain):\n\t\tto_user = tracker.get_slot('email')\n\t\tprint(\"validating email\")\n\t\tprint(to_user)\n\t\tif re.search(r'\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b', to_user, re.I):\n\t\t\treturn [SlotSet('invalid_email', 'no')]\n\t\telse:\n\t\t\tdispatcher.utter_message(\"The email ID is invalid.\")\n\t\t\treturn [SlotSet('invalid_email', 'yes')]\n\nclass ActionSearchRestaurants(Action):\t\n\tdef name(self):\n\t\treturn 'action_search_restaurants'\n\n\tdef run(self, dispatcher, tracker, domain):\n\t\tloc = tracker.get_slot('location')\n\t\tcuisine = tracker.get_slot('cuisine')\n\t\tprice = tracker.get_slot('price')\n\t\tprice_range = get_price_range(price)\n\t\tdispatcher.utter_message(\"Searching restaurants in {0} restaurants at {1}, in price range {2}.......\\n\".format(cuisine, loc, price_range));\n\t\tresults = RestaurantSearch(City=loc,Cuisine=cuisine,Price=price)\n\t\tresponse=\"\"\n\t\tcount = 0\n\t\tbot_response_header = \"Showing top 5 restaurants \\n===========================================\\n\"\n\t\temail_response_header = \"Top 10 restaurants \\n===========================================\\n\"\n\t\tif results.shape[0] == 0 or (cuisine.lower() not in Cuisines):\n\t\t\t\tdispatcher.utter_message(\"Sorry, no restaurant(s) found for your criteria\")\n\t\t\t\treturn [SlotSet('no_restaurant_found', 'yes')]\n\t\telse:\n\t\t\tfor restaurant in results.iterrows():\n\t\t\t\tcount = count + 1\n\t\t\t\trestaurant = restaurant[1]\n\t\t\t\tresponse = response + F\"Found {restaurant['Restaurant Name']} in {restaurant['Address']} has been rated {restaurant['Aggregate rating']}, average cost for two is {restaurant['Average Cost for two']}\\n\\n\"\n\t\t\t\tif (count == 5):\n\t\t\t\t\tdispatcher.utter_message(bot_response_header + response) # dispatch the message to caller\n\t\t\tif(count<5 and count >0):\n\t\t\t\t\tdispatcher.utter_message(bot_response_header + response)\n\t\t\tprint(response)\n\t\t\tSlotSet('no_restaurant_found', 'no')\n\t\t\treturn [SlotSet('email_body', email_response_header + response)]\n\n\nclass ActionSearchCity(Action):\t\n\tdef name(self):\n\t\treturn 'action_search_city'\n\n\tdef run(self, dispatcher, tracker, domain):\n\t\tloc = tracker.get_slot('location')\n\t\tresult = CitySearch(City=loc)\n\t\tprint(result)\n\t\tif result == '0':\n\t\t\tdispatcher.utter_message(\"Sorry, we don’t operate in this city. Can you please specify some other location?\")\n\t\t\treturn [SlotSet('no_restaurant_found', 'yes')]\n\t\telse:\n\t\t\treturn [SlotSet('no_restaurant_found', 'no')]\n\nclass ActionSendEmail(Action):\n\t\n\tdef name(self):\n\t\treturn 'action_send_email'\n\n\tdef run(self, dispatcher, tracker, domain):\n\t\t\ttry:\n\t\t\t\tfrom_user = 'upgrad.sriks@gmail.com' # replace email with your own\n\t\t\t\tto_user = tracker.get_slot('email')\n\t\t\t\tprint('sending email to ' + to_user)\n\t\t\t\tpassword = 'password' # replace password with your own\n\t\t\t\tserver = smtplib.SMTP('smtp.gmail.com',587)\n\t\t\t\tserver.starttls()\n\t\t\t\tserver.login(from_user, password)\n\t\t\t\tsubject = 'Your list of restaurants from Foodie'\n\t\t\t\tmsg = MIMEMultipart()\n\t\t\t\tmsg['From'] = from_user\n\t\t\t\tmsg['TO'] = to_user\n\t\t\t\tmsg['Subject'] = subject\n\t\t\t\tbody = tracker.get_slot('email_body')\n\t\t\t\tmsg.attach(MIMEText(body,'plain'))\n\t\t\t\ttext = msg.as_string()\n\t\t\t\tserver.sendmail(from_user,to_user,text)\n\t\t\t\tserver.close()\n\t\t\texcept: \n\t\t\t\tdispatcher.utter_message(\"Something went wrong, we could not send you the email. Please try again later.\")", "repo_name": "sriksmachi/rasa-chat-bot", "sub_path": "actions.py", "file_name": "actions.py", "file_ext": "py", "file_size_in_byte": 4915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "rasa_sdk.Action", "line_number": 46, "usage_type": "name"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "re.I", "line_number": 54, "usage_type": "attribute"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 55, "usage_type": "call"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 58, "usage_type": "call"}, {"api_name": "rasa_sdk.Action", "line_number": 60, "usage_type": "name"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 77, "usage_type": "call"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 88, "usage_type": "call"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 89, "usage_type": "call"}, {"api_name": "rasa_sdk.Action", "line_number": 92, "usage_type": "name"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 102, "usage_type": "call"}, {"api_name": "rasa_sdk.events.SlotSet", "line_number": 104, "usage_type": "call"}, {"api_name": "rasa_sdk.Action", "line_number": 106, "usage_type": "name"}, {"api_name": "smtplib.SMTP", "line_number": 117, "usage_type": "call"}, {"api_name": "email.mime.multipart.MIMEMultipart", "line_number": 121, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "70234052965", "text": "from http import HTTPStatus\nfrom django.test import TestCase, Client\n\nfrom ..models import Group, Post, User\n\n\nclass PostURLTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create(username='NoName')\n cls.post = Post.objects.create(\n text='Example text',\n author=cls.user\n )\n cls.group = Group.objects.create(\n title='Example group',\n slug='test-slug',\n description='Text of group'\n )\n cls.templates_url_names = {\n '/': 'posts/index.html',\n f'/group/{cls.group.slug}/': 'posts/group_list.html',\n f'/profile/{cls.user.username}/': 'posts/profile.html',\n f'/posts/{cls.post.id}/': 'posts/post_detail.html',\n f'/posts/{cls.post.id}/edit/': 'posts/create_post.html',\n '/create/': 'posts/create_post.html',\n '/follow/': 'posts/follow.html'}\n\n def setUp(self):\n self.guest_client = Client()\n self.authorized_client = Client()\n self.authorized_client.force_login(self.user)\n\n def test_urls_uses_correct_template(self):\n \"\"\"Проверка шаблонов и связанных адресов\"\"\"\n for address, template in self.templates_url_names.items():\n with self.subTest(template=template):\n response = self.authorized_client.get(address)\n self.assertTemplateUsed(response, template)\n\n def test_create_page_for_redirect_if_anonymous(self):\n \"\"\"Проверка редиректа, если неавторизованный пользователь\n по приватным URl лазит\"\"\"\n response = self.guest_client.get('/create/', follow=True)\n self.assertRedirects(response, '/auth/login/?next=/create/')\n\n def test_unexisting_page(self):\n \"\"\"Получить ошибку при запросе несуществущего URL\"\"\"\n response = self.authorized_client.get('/unexisting_page/')\n self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)\n", "repo_name": "DDanielleinaDD/project_site_gjango", "sub_path": "yatube/posts/tests/test_urls.py", "file_name": "test_urls.py", "file_ext": "py", "file_size_in_byte": 2135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 7, "usage_type": "name"}, {"api_name": "models.User.objects.create", "line_number": 11, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Post.objects.create", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Post.objects", "line_number": 12, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 12, "usage_type": "name"}, {"api_name": "models.Group.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Group.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Group", "line_number": 16, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 31, "usage_type": "call"}, {"api_name": "django.test.Client", "line_number": 32, "usage_type": "call"}, {"api_name": "http.HTTPStatus.NOT_FOUND", "line_number": 51, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "12547202405", "text": "import datetime\nimport os\nimport re\nimport shutil\nimport StringIO\nimport sys\nimport tempfile\nimport unittest\n\nDEPOT_TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.insert(0, DEPOT_TOOLS_ROOT)\n\nfrom testing_support import coverage_utils\nfrom testing_support import git_test_utils\n\nimport git_common\n\nGitRepo = git_test_utils.GitRepo\n\n\nclass GitHyperBlameTestBase(git_test_utils.GitRepoReadOnlyTestBase):\n @classmethod\n def setUpClass(cls):\n super(GitHyperBlameTestBase, cls).setUpClass()\n import git_hyper_blame\n cls.git_hyper_blame = git_hyper_blame\n\n def run_hyperblame(self, ignored, filename, revision):\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n ignored = [self.repo[c] for c in ignored]\n retval = self.repo.run(self.git_hyper_blame.hyper_blame, ignored, filename,\n revision=revision, out=stdout, err=stderr)\n return retval, stdout.getvalue().rstrip().split('\\n')\n\n def blame_line(self, commit_name, rest, author=None, filename=None):\n \"\"\"Generate a blame line from a commit.\n\n Args:\n commit_name: The commit's schema name.\n rest: The blame line after the timestamp. e.g., '2) file2 - merged'.\n author: The author's name. If omitted, reads the name out of the commit.\n filename: The filename. If omitted, not shown in the blame line.\n \"\"\"\n short = self.repo[commit_name][:8]\n start = '%s %s' % (short, filename) if filename else short\n if author is None:\n author = self.repo.show_commit(commit_name, format_string='%an %ai')\n else:\n author += self.repo.show_commit(commit_name, format_string=' %ai')\n return '%s (%s %s' % (start, author, rest)\n\nclass GitHyperBlameMainTest(GitHyperBlameTestBase):\n \"\"\"End-to-end tests on a very simple repo.\"\"\"\n REPO_SCHEMA = \"A B C D\"\n\n COMMIT_A = {\n 'some/files/file': {'data': 'line 1\\nline 2\\n'},\n }\n\n COMMIT_B = {\n 'some/files/file': {'data': 'line 1\\nline 2.1\\n'},\n }\n\n COMMIT_C = {\n 'some/files/file': {'data': 'line 1.1\\nline 2.1\\n'},\n }\n\n COMMIT_D = {\n # This file should be automatically considered for ignore.\n '.git-blame-ignore-revs': {'data': 'tag_C'},\n # This file should not be considered.\n 'some/files/.git-blame-ignore-revs': {'data': 'tag_B'},\n }\n\n def setUp(self):\n super(GitHyperBlameMainTest, self).setUp()\n # Most tests want to check out C (so the .git-blame-ignore-revs is not\n # used).\n self.repo.git('checkout', '-f', 'tag_C')\n\n def testBasicBlame(self):\n \"\"\"Tests the main function (simple end-to-end test with no ignores).\"\"\"\n expected_output = [self.blame_line('C', '1) line 1.1'),\n self.blame_line('B', '2) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['tag_C', 'some/files/file'], stdout=stdout,\n stderr=stderr)\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('', stderr.getvalue())\n\n def testIgnoreSimple(self):\n \"\"\"Tests the main function (simple end-to-end test with ignores).\"\"\"\n expected_output = [self.blame_line('C', ' 1) line 1.1'),\n self.blame_line('A', '2*) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['-i', 'tag_B', 'tag_C', 'some/files/file'],\n stdout=stdout, stderr=stderr)\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('', stderr.getvalue())\n\n def testBadRepo(self):\n \"\"\"Tests the main function (not in a repo).\"\"\"\n # Make a temp dir that has no .git directory.\n curdir = os.getcwd()\n tempdir = tempfile.mkdtemp(suffix='_nogit', prefix='git_repo')\n try:\n os.chdir(tempdir)\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.git_hyper_blame.main(\n args=['-i', 'tag_B', 'tag_C', 'some/files/file'], stdout=stdout,\n stderr=stderr)\n finally:\n shutil.rmtree(tempdir)\n os.chdir(curdir)\n\n self.assertNotEqual(0, retval)\n self.assertEqual('', stdout.getvalue())\n r = re.compile('^fatal: Not a git repository', re.I)\n self.assertRegexpMatches(stderr.getvalue(), r)\n\n def testBadFilename(self):\n \"\"\"Tests the main function (bad filename).\"\"\"\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['-i', 'tag_B', 'tag_C', 'some/files/xxxx'],\n stdout=stdout, stderr=stderr)\n self.assertNotEqual(0, retval)\n self.assertEqual('', stdout.getvalue())\n # TODO(mgiuca): This test used to test the exact string, but it broke due to\n # an upstream bug in git-blame. For now, just check the start of the string.\n # A patch has been sent upstream; when it rolls out we can revert back to\n # the original test logic.\n self.assertTrue(\n stderr.getvalue().startswith('fatal: no such path some/files/xxxx in '))\n\n def testBadRevision(self):\n \"\"\"Tests the main function (bad revision to blame from).\"\"\"\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['-i', 'tag_B', 'xxxx', 'some/files/file'],\n stdout=stdout, stderr=stderr)\n self.assertNotEqual(0, retval)\n self.assertEqual('', stdout.getvalue())\n self.assertRegexpMatches(stderr.getvalue(),\n '^fatal: ambiguous argument \\'xxxx\\': unknown '\n 'revision or path not in the working tree.')\n\n def testBadIgnore(self):\n \"\"\"Tests the main function (bad revision passed to -i).\"\"\"\n expected_output = [self.blame_line('C', '1) line 1.1'),\n self.blame_line('B', '2) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['-i', 'xxxx', 'tag_C', 'some/files/file'],\n stdout=stdout, stderr=stderr)\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('warning: unknown revision \\'xxxx\\'.\\n', stderr.getvalue())\n\n def testIgnoreFile(self):\n \"\"\"Tests passing the ignore list in a file.\"\"\"\n expected_output = [self.blame_line('C', ' 1) line 1.1'),\n self.blame_line('A', '2*) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n\n with tempfile.NamedTemporaryFile(mode='w+', prefix='ignore') as ignore_file:\n ignore_file.write('# Line comments are allowed.\\n'.format(self.repo['B']))\n ignore_file.write('\\n')\n ignore_file.write('{}\\n'.format(self.repo['B']))\n # A revision that is not in the repo (should be ignored).\n ignore_file.write('xxxx\\n')\n ignore_file.flush()\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['--ignore-file', ignore_file.name, 'tag_C',\n 'some/files/file'],\n stdout=stdout, stderr=stderr)\n\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('warning: unknown revision \\'xxxx\\'.\\n', stderr.getvalue())\n\n def testDefaultIgnoreFile(self):\n \"\"\"Tests automatically using a default ignore list.\"\"\"\n # Check out revision D. We expect the script to use the default ignore list\n # that is checked out, *not* the one committed at the given revision.\n self.repo.git('checkout', '-f', 'tag_D')\n\n expected_output = [self.blame_line('A', '1*) line 1.1'),\n self.blame_line('B', ' 2) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['tag_D', 'some/files/file'],\n stdout=stdout, stderr=stderr)\n\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('', stderr.getvalue())\n\n # Test blame from a different revision. Despite the default ignore file\n # *not* being committed at that revision, it should still be picked up\n # because D is currently checked out.\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n\n retval = self.repo.run(self.git_hyper_blame.main,\n args=['tag_C', 'some/files/file'],\n stdout=stdout, stderr=stderr)\n\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('', stderr.getvalue())\n\n def testNoDefaultIgnores(self):\n \"\"\"Tests the --no-default-ignores switch.\"\"\"\n # Check out revision D. This has a .git-blame-ignore-revs file, which we\n # expect to be ignored due to --no-default-ignores.\n self.repo.git('checkout', '-f', 'tag_D')\n\n expected_output = [self.blame_line('C', '1) line 1.1'),\n self.blame_line('B', '2) line 2.1')]\n stdout = StringIO.StringIO()\n stderr = StringIO.StringIO()\n\n retval = self.repo.run(\n self.git_hyper_blame.main,\n args=['tag_D', 'some/files/file', '--no-default-ignores'],\n stdout=stdout, stderr=stderr)\n\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, stdout.getvalue().rstrip().split('\\n'))\n self.assertEqual('', stderr.getvalue())\n\nclass GitHyperBlameSimpleTest(GitHyperBlameTestBase):\n REPO_SCHEMA = \"\"\"\n A B D E F G H\n A C D\n \"\"\"\n\n COMMIT_A = {\n 'some/files/file1': {'data': 'file1'},\n 'some/files/file2': {'data': 'file2'},\n 'some/files/empty': {'data': ''},\n 'some/other/file': {'data': 'otherfile'},\n }\n\n COMMIT_B = {\n 'some/files/file2': {\n 'mode': 0o755,\n 'data': 'file2 - vanilla\\n'},\n 'some/files/empty': {'data': 'not anymore'},\n 'some/files/file3': {'data': 'file3'},\n }\n\n COMMIT_C = {\n 'some/files/file2': {'data': 'file2 - merged\\n'},\n }\n\n COMMIT_D = {\n 'some/files/file2': {'data': 'file2 - vanilla\\nfile2 - merged\\n'},\n }\n\n COMMIT_E = {\n 'some/files/file2': {'data': 'file2 - vanilla\\nfile_x - merged\\n'},\n }\n\n COMMIT_F = {\n 'some/files/file2': {'data': 'file2 - vanilla\\nfile_y - merged\\n'},\n }\n\n # Move file2 from files to other.\n COMMIT_G = {\n 'some/files/file2': {'data': None},\n 'some/other/file2': {'data': 'file2 - vanilla\\nfile_y - merged\\n'},\n }\n\n COMMIT_H = {\n 'some/other/file2': {'data': 'file2 - vanilla\\nfile_z - merged\\n'},\n }\n\n def testBlameError(self):\n \"\"\"Tests a blame on a non-existent file.\"\"\"\n expected_output = ['']\n retval, output = self.run_hyperblame([], 'some/other/file2', 'tag_D')\n self.assertNotEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testBlameEmpty(self):\n \"\"\"Tests a blame of an empty file with no ignores.\"\"\"\n expected_output = ['']\n retval, output = self.run_hyperblame([], 'some/files/empty', 'tag_A')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testBasicBlame(self):\n \"\"\"Tests a basic blame with no ignores.\"\"\"\n # Expect to blame line 1 on B, line 2 on C.\n expected_output = [self.blame_line('B', '1) file2 - vanilla'),\n self.blame_line('C', '2) file2 - merged')]\n retval, output = self.run_hyperblame([], 'some/files/file2', 'tag_D')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testBlameRenamed(self):\n \"\"\"Tests a blame with no ignores on a renamed file.\"\"\"\n # Expect to blame line 1 on B, line 2 on H.\n # Because the file has a different name than it had when (some of) these\n # lines were changed, expect the filenames to be displayed.\n expected_output = [self.blame_line('B', '1) file2 - vanilla',\n filename='some/files/file2'),\n self.blame_line('H', '2) file_z - merged',\n filename='some/other/file2')]\n retval, output = self.run_hyperblame([], 'some/other/file2', 'tag_H')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testIgnoreSimpleEdits(self):\n \"\"\"Tests a blame with simple (line-level changes) commits ignored.\"\"\"\n # Expect to blame line 1 on B, line 2 on E.\n expected_output = [self.blame_line('B', '1) file2 - vanilla'),\n self.blame_line('E', '2) file_x - merged')]\n retval, output = self.run_hyperblame([], 'some/files/file2', 'tag_E')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n # Ignore E; blame line 1 on B, line 2 on C.\n expected_output = [self.blame_line('B', ' 1) file2 - vanilla'),\n self.blame_line('C', '2*) file_x - merged')]\n retval, output = self.run_hyperblame(['E'], 'some/files/file2', 'tag_E')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n # Ignore E and F; blame line 1 on B, line 2 on C.\n expected_output = [self.blame_line('B', ' 1) file2 - vanilla'),\n self.blame_line('C', '2*) file_y - merged')]\n retval, output = self.run_hyperblame(['E', 'F'], 'some/files/file2',\n 'tag_F')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testIgnoreInitialCommit(self):\n \"\"\"Tests a blame with the initial commit ignored.\"\"\"\n # Ignore A. Expect A to get blamed anyway.\n expected_output = [self.blame_line('A', '1) file1')]\n retval, output = self.run_hyperblame(['A'], 'some/files/file1', 'tag_A')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testIgnoreFileAdd(self):\n \"\"\"Tests a blame ignoring the commit that added this file.\"\"\"\n # Ignore A. Expect A to get blamed anyway.\n expected_output = [self.blame_line('B', '1) file3')]\n retval, output = self.run_hyperblame(['B'], 'some/files/file3', 'tag_B')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testIgnoreFilePopulate(self):\n \"\"\"Tests a blame ignoring the commit that added data to an empty file.\"\"\"\n # Ignore A. Expect A to get blamed anyway.\n expected_output = [self.blame_line('B', '1) not anymore')]\n retval, output = self.run_hyperblame(['B'], 'some/files/empty', 'tag_B')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\nclass GitHyperBlameLineMotionTest(GitHyperBlameTestBase):\n REPO_SCHEMA = \"\"\"\n A B C D E F\n \"\"\"\n\n COMMIT_A = {\n 'file': {'data': 'A\\ngreen\\nblue\\n'},\n }\n\n # Change \"green\" to \"yellow\".\n COMMIT_B = {\n 'file': {'data': 'A\\nyellow\\nblue\\n'},\n }\n\n # Insert 2 lines at the top,\n # Change \"yellow\" to \"red\".\n # Insert 1 line at the bottom.\n COMMIT_C = {\n 'file': {'data': 'X\\nY\\nA\\nred\\nblue\\nZ\\n'},\n }\n\n # Insert 2 more lines at the top.\n COMMIT_D = {\n 'file': {'data': 'earth\\nfire\\nX\\nY\\nA\\nred\\nblue\\nZ\\n'},\n }\n\n # Insert a line before \"red\", and indent \"red\" and \"blue\".\n COMMIT_E = {\n 'file': {'data': 'earth\\nfire\\nX\\nY\\nA\\ncolors:\\n red\\n blue\\nZ\\n'},\n }\n\n # Insert a line between \"A\" and \"colors\".\n COMMIT_F = {\n 'file': {'data': 'earth\\nfire\\nX\\nY\\nA\\nB\\ncolors:\\n red\\n blue\\nZ\\n'},\n }\n\n def testCacheDiffHunks(self):\n \"\"\"Tests the cache_diff_hunks internal function.\"\"\"\n expected_hunks = [((0, 0), (1, 2)),\n ((2, 1), (4, 1)),\n ((3, 0), (6, 1)),\n ]\n hunks = self.repo.run(self.git_hyper_blame.cache_diff_hunks, 'tag_B',\n 'tag_C')\n self.assertEqual(expected_hunks, hunks)\n\n def testApproxLinenoAcrossRevs(self):\n \"\"\"Tests the approx_lineno_across_revs internal function.\"\"\"\n # Note: For all of these tests, the \"old revision\" and \"new revision\" are\n # reversed, which matches the usage by hyper_blame.\n\n # Test an unchanged line before any hunks in the diff. Should be unchanged.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_B', 'tag_A', 1)\n self.assertEqual(1, lineno)\n\n # Test an unchanged line after all hunks in the diff. Should be matched to\n # the line's previous position in the file.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_D', 'tag_C', 6)\n self.assertEqual(4, lineno)\n\n # Test a line added in a new hunk. Should be matched to the line *before*\n # where the hunk was inserted in the old version of the file.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_F', 'tag_E', 6)\n self.assertEqual(5, lineno)\n\n # Test lines added in a new hunk at the very start of the file. This tests\n # an edge case: normally it would be matched to the line *before* where the\n # hunk was inserted (Line 0), but since the hunk is at the start of the\n # file, we match to Line 1.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_C', 'tag_B', 1)\n self.assertEqual(1, lineno)\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_C', 'tag_B', 2)\n self.assertEqual(1, lineno)\n\n # Test an unchanged line in between hunks in the diff. Should be matched to\n # the line's previous position in the file.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_C', 'tag_B', 3)\n self.assertEqual(1, lineno)\n\n # Test a changed line. Should be matched to the hunk's previous position in\n # the file.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_C', 'tag_B', 4)\n self.assertEqual(2, lineno)\n\n # Test a line added in a new hunk at the very end of the file. Should be\n # matched to the line *before* where the hunk was inserted (the last line of\n # the file). Technically same as the case above but good to boundary test.\n lineno = self.repo.run(self.git_hyper_blame.approx_lineno_across_revs,\n 'file', 'file', 'tag_C', 'tag_B', 6)\n self.assertEqual(3, lineno)\n\n def testInterHunkLineMotion(self):\n \"\"\"Tests a blame with line motion in another hunk in the ignored commit.\"\"\"\n # Blame from D, ignoring C.\n\n # Lines 1, 2 were added by D.\n # Lines 3, 4 were added by C (but ignored, so blame A).\n # Line 5 was added by A.\n # Line 6 was modified by C (but ignored, so blame B). (Note: This requires\n # the algorithm to figure out that Line 6 in D == Line 4 in C ~= Line 2 in\n # B, so it blames B. Otherwise, it would blame A.)\n # Line 7 was added by A.\n # Line 8 was added by C (but ignored, so blame A).\n expected_output = [self.blame_line('D', ' 1) earth'),\n self.blame_line('D', ' 2) fire'),\n self.blame_line('A', '3*) X'),\n self.blame_line('A', '4*) Y'),\n self.blame_line('A', ' 5) A'),\n self.blame_line('B', '6*) red'),\n self.blame_line('A', ' 7) blue'),\n self.blame_line('A', '8*) Z'),\n ]\n retval, output = self.run_hyperblame(['C'], 'file', 'tag_D')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testIntraHunkLineMotion(self):\n \"\"\"Tests a blame with line motion in the same hunk in the ignored commit.\"\"\"\n # This test was mostly written as a demonstration of the limitations of the\n # current algorithm (it exhibits non-ideal behaviour).\n\n # Blame from E, ignoring E.\n # Line 6 was added by E (but ignored, so blame C).\n # Lines 7, 8 were modified by E (but ignored, so blame A).\n # TODO(mgiuca): Ideally, this would blame Line 7 on C, because the line\n # \"red\" was added by C, and this is just a small change to that line. But\n # the current algorithm can't deal with line motion within a hunk, so it\n # just assumes Line 7 in E ~= Line 7 in D == Line 3 in A (which was \"blue\").\n expected_output = [self.blame_line('D', ' 1) earth'),\n self.blame_line('D', ' 2) fire'),\n self.blame_line('C', ' 3) X'),\n self.blame_line('C', ' 4) Y'),\n self.blame_line('A', ' 5) A'),\n self.blame_line('C', '6*) colors:'),\n self.blame_line('A', '7*) red'),\n self.blame_line('A', '8*) blue'),\n self.blame_line('C', ' 9) Z'),\n ]\n retval, output = self.run_hyperblame(['E'], 'file', 'tag_E')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n\nclass GitHyperBlameLineNumberTest(GitHyperBlameTestBase):\n REPO_SCHEMA = \"\"\"\n A B C D\n \"\"\"\n\n COMMIT_A = {\n 'file': {'data': 'red\\nblue\\n'},\n }\n\n # Change \"blue\" to \"green\".\n COMMIT_B = {\n 'file': {'data': 'red\\ngreen\\n'},\n }\n\n # Insert 2 lines at the top,\n COMMIT_C = {\n 'file': {'data': '\\n\\nred\\ngreen\\n'},\n }\n\n # Change \"green\" to \"yellow\".\n COMMIT_D = {\n 'file': {'data': '\\n\\nred\\nyellow\\n'},\n }\n\n def testTwoChangesWithAddedLines(self):\n \"\"\"Regression test for https://crbug.com/709831.\n\n Tests a line with multiple ignored edits, and a line number change in\n between (such that the line number in the current revision is bigger than\n the file's line count at the older ignored revision).\n \"\"\"\n expected_output = [self.blame_line('C', ' 1) '),\n self.blame_line('C', ' 2) '),\n self.blame_line('A', ' 3) red'),\n self.blame_line('A', '4*) yellow'),\n ]\n # Due to https://crbug.com/709831, ignoring both B and D would crash,\n # because of C (in between those revisions) which moves Line 2 to Line 4.\n # The algorithm would incorrectly think that Line 4 was still on Line 4 in\n # Commit B, even though it was Line 2 at that time. Its index is out of\n # range in the number of lines in Commit B.\n retval, output = self.run_hyperblame(['B', 'D'], 'file', 'tag_D')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n\nclass GitHyperBlameUnicodeTest(GitHyperBlameTestBase):\n REPO_SCHEMA = \"\"\"\n A B C\n \"\"\"\n\n COMMIT_A = {\n GitRepo.AUTHOR_NAME: 'ASCII Author',\n 'file': {'data': 'red\\nblue\\n'},\n }\n\n # Add a line.\n COMMIT_B = {\n GitRepo.AUTHOR_NAME: u'\\u4e2d\\u56fd\\u4f5c\\u8005'.encode('utf-8'),\n 'file': {'data': 'red\\ngreen\\nblue\\n'},\n }\n\n # Modify a line with non-UTF-8 author and file text.\n COMMIT_C = {\n GitRepo.AUTHOR_NAME: u'Lat\\u00edn-1 Author'.encode('latin-1'),\n 'file': {'data': u'red\\ngre\\u00e9n\\nblue\\n'.encode('latin-1')},\n }\n\n def testNonASCIIAuthorName(self):\n \"\"\"Ensures correct tabulation.\n\n Tests the case where there are non-ASCII (UTF-8) characters in the author\n name.\n\n Regression test for https://crbug.com/808905.\n \"\"\"\n expected_output = [\n self.blame_line('A', '1) red', author='ASCII Author'),\n # Expect 8 spaces, to line up with the other name.\n self.blame_line('B', '2) green',\n author=u'\\u4e2d\\u56fd\\u4f5c\\u8005 '.encode('utf-8')),\n self.blame_line('A', '3) blue', author='ASCII Author'),\n ]\n retval, output = self.run_hyperblame([], 'file', 'tag_B')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n def testNonUTF8Data(self):\n \"\"\"Ensures correct behaviour even if author or file data is not UTF-8.\n\n There is no guarantee that a file will be UTF-8-encoded, so this is\n realistic.\n \"\"\"\n expected_output = [\n self.blame_line('A', '1) red', author='ASCII Author '),\n # The Author has been re-encoded as UTF-8. The file data is preserved as\n # raw byte data.\n self.blame_line('C', '2) gre\\xe9n', author='Lat\\xc3\\xadn-1 Author'),\n self.blame_line('A', '3) blue', author='ASCII Author '),\n ]\n retval, output = self.run_hyperblame([], 'file', 'tag_C')\n self.assertEqual(0, retval)\n self.assertEqual(expected_output, output)\n\n\nif __name__ == '__main__':\n sys.exit(coverage_utils.covered_main(\n os.path.join(DEPOT_TOOLS_ROOT, 'git_hyper_blame.py')))\n", "repo_name": "kiwibrowser/src", "sub_path": "third_party/depot_tools/tests/git_hyper_blame_test.py", "file_name": "git_hyper_blame_test.py", "file_ext": "py", "file_size_in_byte": 24823, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "testing_support.git_test_utils.GitRepo", "line_number": 18, "usage_type": "attribute"}, {"api_name": "testing_support.git_test_utils", "line_number": 18, "usage_type": "name"}, {"api_name": "testing_support.git_test_utils.GitRepoReadOnlyTestBase", "line_number": 21, "usage_type": "attribute"}, {"api_name": "testing_support.git_test_utils", "line_number": 21, "usage_type": "name"}, {"api_name": "StringIO.StringIO", "line_number": 29, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 30, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 86, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 87, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 99, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 100, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 111, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 112, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 114, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 115, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 116, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 121, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 122, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 126, "usage_type": "call"}, {"api_name": "re.I", "line_number": 126, "usage_type": "attribute"}, {"api_name": "StringIO.StringIO", "line_number": 131, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 132, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 147, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 148, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 162, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 163, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 175, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 176, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 178, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 202, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 203, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 216, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 217, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 235, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 236, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 634, "usage_type": "call"}, {"api_name": "testing_support.coverage_utils.covered_main", "line_number": 634, "usage_type": "call"}, {"api_name": "testing_support.coverage_utils", "line_number": 634, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 635, "usage_type": "call"}, {"api_name": "os.path", "line_number": 635, "usage_type": "attribute"}]} +{"seq_id": "37414719109", "text": "import shutil\nimport sys\nfrom pathlib import Path\n\nimport cv2\nfrom tqdm import tqdm\n\npath = (Path(__file__).parents[1]).as_posix()\nsys.path.insert(0, path)\n\nfrom tracking.visualize import get_video_parameters\n\n\ndef save_video_frames_on_disk(\n save_path: Path,\n vc,\n start_frame: int,\n end_frame: int,\n step: int,\n name_prefix: str = \"\",\n format: str = \"jpg\",\n):\n vc.set(cv2.CAP_PROP_POS_FRAMES, start_frame)\n for frame_number in tqdm(range(start_frame, end_frame, step)):\n _, frame = vc.read()\n save_file = save_path / f\"{name_prefix}frame_{frame_number:06d}.{format}\"\n cv2.imwrite(save_file.as_posix(), frame)\n\n\ndef save_yolo_labels_on_disk(\n src_path: Path,\n dst_path: Path,\n start_frame: int,\n end_frame: int,\n step: int,\n name_prefix: str = \"\",\n):\n for frame_number in tqdm(range(start_frame, end_frame, step)):\n label = f\"frame_{frame_number:06d}.txt\"\n src_file = src_path / label\n dst_file = dst_path / f\"{name_prefix}{label}\"\n shutil.copy2(src_file, dst_file)\n\n\ndef prepare_data_for_yolo_one_vid_tr_val(\n save_path: Path,\n video_path: Path,\n label_path: Path,\n video_name: str,\n):\n start_frame, end_frame, step = 0, 64, 1 # 512, 8\n val_end_frame = end_frame + 32 * step\n name_prefix = Path(video_name).stem + \"_\"\n\n train_path = save_path / \"train/images\"\n valid_path = save_path / \"valid/images\"\n train_label_path = save_path / \"train/labels\"\n valid_label_path = save_path / \"valid/labels\"\n for path in [train_path, valid_path, train_label_path, valid_label_path]:\n path.mkdir(exist_ok=True, parents=True)\n\n # save images\n vc = cv2.VideoCapture((video_path / video_name).as_posix())\n save_video_frames_on_disk(train_path, vc, start_frame, end_frame, step, name_prefix)\n save_video_frames_on_disk(\n valid_path, vc, end_frame, val_end_frame, step, name_prefix\n )\n\n # save labels\n zip_file = label_path / (Path(video_name).stem + \".zip\")\n shutil.unpack_archive(zip_file, zip_file.parent / zip_file.stem, \"zip\")\n yolo_path = zip_file.parent / zip_file.stem / \"obj_train_data\"\n save_yolo_labels_on_disk(\n yolo_path, train_label_path, start_frame, end_frame, step, name_prefix\n )\n save_yolo_labels_on_disk(\n yolo_path, valid_label_path, end_frame, val_end_frame, step, name_prefix\n )\n shutil.rmtree(yolo_path.parent)\n\n # save meta file\n # /home/fkarimineja/data\n with open(save_path / \"data.yaml\", \"w\") as wfile:\n location = Path(\"/home/fkarimineja/data\") / save_path.stem\n remote_train_path = location / \"train/images\"\n remote_val_path = location / \"valid/images\"\n wfile.write(f\"train: {remote_train_path}\\n\")\n wfile.write(f\"val: {remote_val_path}\\n\\n\")\n wfile.write(\"nc: 1\\n\")\n wfile.write(\"names: ['0']\")\n\n\ndef prepare_data_for_yolo_one_vid(\n save_path: Path,\n video_path: Path,\n label_path: Path,\n video_name: str, # with .mp4\n stage: str, # train or val\n start_frame: int = 0,\n end_frame: int = None,\n step: int = 8,\n):\n name_prefix = Path(video_name).stem + \"_\"\n\n train_image_path = save_path / f\"{stage}/images\"\n train_label_path = save_path / f\"{stage}/labels\"\n for path in [train_image_path, train_label_path]:\n path.mkdir(exist_ok=True, parents=True)\n\n # save images\n vc = cv2.VideoCapture((video_path / video_name).as_posix())\n height, width, total_no_frames, fps = get_video_parameters(vc)\n if end_frame is None:\n end_frame = total_no_frames\n save_video_frames_on_disk(\n train_image_path, vc, start_frame, end_frame, step, name_prefix\n )\n\n # save labels\n zip_file = label_path / (Path(video_name).stem + \".zip\")\n print(zip_file)\n shutil.unpack_archive(zip_file, zip_file.parent / zip_file.stem, \"zip\")\n yolo_path = zip_file.parent / zip_file.stem / \"obj_train_data\"\n save_yolo_labels_on_disk(\n yolo_path, train_label_path, start_frame, end_frame, step, name_prefix\n )\n shutil.rmtree(yolo_path.parent)\n\n # save meta file\n with open(save_path / \"data.yaml\", \"w\") as wfile:\n location = Path(\"/home/fkarimineja/data\") / save_path.stem\n remote_train_path = location / \"train/images\"\n remote_val_path = location / \"valid/images\"\n wfile.write(f\"train: {remote_train_path}\\n\")\n wfile.write(f\"val: {remote_val_path}\\n\\n\")\n wfile.write(\"nc: 1\\n\")\n wfile.write(\"names: ['0']\")\n\n\ndef prepare_data_for_yolo_all(\n save_path: Path,\n videos_main_path: Path,\n labels_main_path: Path,\n):\n for video_path in videos_main_path.glob(\"*\"):\n video_name = video_path.name\n if video_name == \"231_cam_1.MP4\":\n prepare_data_for_yolo_one_vid(\n save_path, videos_main_path, labels_main_path, video_name, \"valid\"\n )\n elif video_name in [\n \"04_07_22_F_2_rect_valid.mp4\",\n \"04_07_22_G_2_rect_valid.mp4\",\n ]:\n prepare_data_for_yolo_one_vid(\n save_path,\n videos_main_path,\n labels_main_path,\n video_name,\n \"train\",\n step=1,\n )\n else:\n prepare_data_for_yolo_one_vid(\n save_path, videos_main_path, labels_main_path, video_name, \"train\"\n )\n\n\nif __name__ == \"__main__\":\n save_path = Path(\"~/Downloads/vids/dat8_v1/\").expanduser().resolve()\n videos_main_path = Path(\"~/Downloads/vids/all\").expanduser().resolve()\n labels_main_path = Path(\"~/Downloads/vids/yolo\").expanduser().resolve()\n prepare_data_for_yolo_all(save_path, videos_main_path, labels_main_path)\n", "repo_name": "fkariminejadasl/tracking", "sub_path": "exps/data_v1.py", "file_name": "data_v1.py", "file_ext": "py", "file_size_in_byte": 5725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "name"}, {"api_name": "cv2.CAP_PROP_POS_FRAMES", "line_number": 23, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 31, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 32, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 38, "usage_type": "call"}, {"api_name": "shutil.copy2", "line_number": 42, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 46, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 48, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 63, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 70, "usage_type": "call"}, {"api_name": "shutil.unpack_archive", "line_number": 71, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 79, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 84, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 94, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 95, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 96, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 111, "usage_type": "call"}, {"api_name": "tracking.visualize.get_video_parameters", "line_number": 112, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 120, "usage_type": "call"}, {"api_name": "shutil.unpack_archive", "line_number": 122, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 127, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 131, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 141, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 142, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 143, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 170, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 171, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "17238269199", "text": "from google.appengine.api import namespace_manager\nfrom google.appengine.ext.ndb import metadata\nfrom handlers.web_admin.web.company.base import CompanyBaseHandler\nfrom methods.auth import company_user_required\nfrom models.config.config import config\n\n\nclass ChooseNamespaceHandler(CompanyBaseHandler):\n @company_user_required\n def get(self):\n if \"namespace\" in self.session:\n del self.session[\"namespace\"]\n namespaces = metadata.get_namespaces()\n real_namespaces = []\n for namespace in namespaces:\n namespace_manager.set_namespace(namespace)\n if config and config.APP_NAME:\n real_namespaces.append((namespace, config.APP_NAME))\n real_namespaces = sorted(real_namespaces, key=lambda t: t[1])\n namespace_manager.set_namespace(None)\n self.render('/choose_namespace.html', namespaces=real_namespaces)\n\n @company_user_required\n def post(self):\n self.session[\"namespace\"] = self.request.get(\"namespace\")\n self.redirect_to(\"company_main\")\n", "repo_name": "lopatinsky/automation-gae", "sub_path": "handlers/web_admin/web/company/choose_ns.py", "file_name": "choose_ns.py", "file_ext": "py", "file_size_in_byte": 1056, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "handlers.web_admin.web.company.base.CompanyBaseHandler", "line_number": 8, "usage_type": "name"}, {"api_name": "google.appengine.ext.ndb.metadata.get_namespaces", "line_number": 13, "usage_type": "call"}, {"api_name": "google.appengine.ext.ndb.metadata", "line_number": 13, "usage_type": "name"}, {"api_name": "google.appengine.api.namespace_manager.set_namespace", "line_number": 16, "usage_type": "call"}, {"api_name": "google.appengine.api.namespace_manager", "line_number": 16, "usage_type": "name"}, {"api_name": "models.config.config.config", "line_number": 17, "usage_type": "name"}, {"api_name": "models.config.config.config.APP_NAME", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.config.config.config.APP_NAME", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.config.config.config", "line_number": 18, "usage_type": "name"}, {"api_name": "google.appengine.api.namespace_manager.set_namespace", "line_number": 20, "usage_type": "call"}, {"api_name": "google.appengine.api.namespace_manager", "line_number": 20, "usage_type": "name"}, {"api_name": "methods.auth.company_user_required", "line_number": 9, "usage_type": "name"}, {"api_name": "methods.auth.company_user_required", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "73118318885", "text": "import cv2\nfrom queue import Queue\nfrom yolov5_detect_image import Y5Detect, draw_boxes_detection\nimport time\nfrom threading import Thread\n\n\ny5_model = Y5Detect(weights='/home/duyngu/Downloads/model/weights/best.pt')\nclass_names = y5_model.class_names\n\n\ndef video_capture(frame_detect_queue, frame_origin_queue):\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_detect_queue.put(image_rgb)\n frame_origin_queue.put(frame)\n\n cap.release()\n\n\ndef inference(frame_detect_queue, detections_queue):\n while cap.isOpened():\n image_rgb = frame_detect_queue.get()\n bboxes, labels, scores = y5_model.predict(image_rgb)\n detections_queue.put([bboxes, labels, scores])\n\n cap.release()\n\n\ndef drawing(detections_queue, frame_origin_queue, frame_final_queue):\n while cap.isOpened():\n frame_origin = frame_origin_queue.get()\n bboxes, labels, scores = detections_queue.get()\n if frame_origin is not None:\n image = draw_boxes_detection(frame_origin, bboxes, scores=scores, labels=labels, class_names=class_names)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n if frame_final_queue.full() is False:\n frame_final_queue.put(image)\n else:\n time.sleep(10)\n cap.release()\n\n\nif __name__ == '__main__':\n frame_detect_queue = Queue(maxsize=1)\n frame_origin_queue = Queue(maxsize=1)\n detections_queue = Queue(maxsize=1)\n frame_final_queue = Queue(maxsize=1)\n input_path = \"/home/duyngu/Desktop/Check_Label/output.avi\"\n cap = cv2.VideoCapture(input_path)\n Thread(target=video_capture, args=(frame_detect_queue, frame_origin_queue)).start()\n Thread(target=inference, args=(frame_detect_queue, detections_queue)).start()\n Thread(target=drawing, args=(detections_queue, frame_origin_queue, frame_final_queue)).start()\n\n while True:\n if cap.isOpened():\n cv2.namedWindow('output')\n image = frame_final_queue.get()\n cv2.imshow('output', image)\n time.sleep(10)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n cv2.destroyWindow('output')\n break\n\n cv2.destroyAllWindows()\n\n\n", "repo_name": "DuyNguDao/Test-model-yolov5", "sub_path": "yolov5_detect_video.py", "file_name": "yolov5_detect_video.py", "file_ext": "py", "file_size_in_byte": 2313, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yolov5_detect_image.Y5Detect", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 17, "usage_type": "attribute"}, {"api_name": "yolov5_detect_image.draw_boxes_detection", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 48, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 49, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 50, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 51, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 53, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 54, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 55, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 62, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.destroyWindow", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "26604179213", "text": "\"\"\"Custom errors for the GETL module.\"\"\"\nfrom contextlib import contextmanager\nfrom typing import Dict, Tuple, Type\n\nfrom botocore.exceptions import ClientError\nfrom pyspark.sql.utils import AnalysisException\n\n# Format: ExceptionClass, Message\n_ERROR_MAP: Dict[str, Tuple[Type[Exception], str]] = {\n \"NoSuchBucket\": (\n FileNotFoundError,\n \"The specified bucket {BucketName} does not exist\",\n ),\n \"NoSuchKey\": (FileNotFoundError, \"{Message}\"),\n \"404\": (FileNotFoundError, \"{Message}\"),\n}\n\n\n@contextmanager\ndef handle_client_error():\n \"\"\"Raises other exceptions depending on the error code.\n\n Converts the following codes to a different exception:\n - NoSuchBucket: FileNotFoundError\n - NoSuchKey: FileNotFoundError\n \"\"\"\n try:\n yield\n except ClientError as client_error:\n # LOGGER.error(str(client_error))\n error = client_error.response[\"Error\"]\n try:\n exception_class, msg = _ERROR_MAP[error[\"Code\"]]\n except KeyError:\n pass\n else:\n raise exception_class(msg.format(**error))\n\n raise client_error\n\n\n@contextmanager\ndef handle_delta_files_dont_exist():\n try:\n yield\n except AnalysisException as spark_exception:\n exceptions = [\n \"Incompatible format detected\",\n \"doesn't exist\",\n \"is not a Delta table\",\n \"Path does not exist:\",\n ]\n\n if any(e in str(spark_exception) for e in exceptions):\n return\n raise spark_exception\n\n\nclass NoDataToProcess(Exception):\n \"\"\"Should be thrown when there is not more data to process.\"\"\"\n", "repo_name": "husqvarnagroup/GETL", "sub_path": "getl/common/errors.py", "file_name": "errors.py", "file_ext": "py", "file_size_in_byte": 1645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.Dict", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 9, "usage_type": "name"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 29, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 19, "usage_type": "name"}, {"api_name": "pyspark.sql.utils.AnalysisException", "line_number": 46, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "43377047527", "text": "import pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\nimport smtplib\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport time\n\n\nNOME = \"Drilon\"\nprint(\"Initializing Poncho\")\n\nengine = pyttsx3.init(\"sapi5\")\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[0].id)\n\n\n# Speak function will pronounce the string which is passed to it\ndef speak(text):\n engine.say(text)\n engine.runAndWait()\n\n# Will speak by current time\ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n if hour >= 0 and hour < 12:\n speak(\"Buongiorno \"+NOME)\n\n elif hour >= 12 and hour<18:\n speak(\"Buon pomeriggio \"+NOME)\n\n else:\n speak(\"Buonasera \"+NOME)\n\n #speak(\"Come posso aiutarti?\")\n\n\n# Main program starts here\n# this funciton will take command from the microphone\ndef takeCommand():\n query = None\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Sto ascoltando...\")\n r.adjust_for_ambient_noise(source) #remove ambient noise\n audio = r.listen(source)\n\n try:\n print(\"Riconoscendo...\")\n query = r.recognize_google(audio, language='it-IT')\n print(f\"l'utente ha detto: {query}\\n\")\n\n except Exception as e:\n print(\"Say that again please\")\n\n\n return query\n\n\n\n\nwishMe()\nrequest = takeCommand()\n\n#Logic for executing task\nif 'su wikipedia' in request.lower():\n speak('Sto cercando su Wikipedia...')\n request = request.replace(\"su wikipedia\", \"\")\n request = request.replace(\"cerca\", \"\")\n results = wikipedia.summary(request, sentences=2)\n speak(results)\n\nelif 'su youtube' in request.lower():\n speak('Sto cercando su YouTube...')\n driver = webdriver.Chrome(ChromeDriverManager().install())\n driver.get('https://youtube.com')\n time.sleep(5)\n request = request.lower()\n request = request.replace(\"su youtube\", \"\")\n request = request.replace(\"cerca\", \"\")\n request = request.replace(\"riproduci\", \"\")\n driver.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/form/div/div[1]/input').send_keys(request)\n driver.find_element_by_xpath('/html/body/ytd-app/div/div/ytd-masthead/div[3]/div[2]/ytd-searchbox/form/button').click()\n speak('Ecco a te... '+request + 'su Youtube...')\n time.sleep(2)\n driver.find_element_by_xpath('//*[@id=\"contents\"]/ytd-video-renderer[1]').click()\n\n\n", "repo_name": "drilon-hametaj/jarvis", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2471, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyttsx3.init", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "speech_recognition.Recognizer", "line_number": 45, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 46, "usage_type": "call"}, {"api_name": "wikipedia.summary", "line_number": 73, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 78, "usage_type": "name"}, {"api_name": "webdriver_manager.chrome.ChromeDriverManager", "line_number": 78, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "43477220351", "text": "from functions import helper\nfrom classes import Solution\nimport sys, os, time, argparse, pickle\n\n\nclass MyParser(argparse.ArgumentParser):\n \"\"\" Class that allows us to run help message when no arguments are given\"\"\"\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\ndef main(argv):\n \"\"\" Main function calling all algorithms.\n Args:\n args: Command-line argument that determines which algortihm\n should be called. Can include prompt for visualisation and to\n store the results.\n \"\"\"\n\n # Store start time of program.\n start_time = time.time()\n\n parser = MyParser(description='RailNL Discription!', add_help=False)\n\n required = parser.add_argument_group('Required argument')\n required.add_argument('-a', '--algorithm', action='store', dest=\"algorithm\",\n choices=['random', 'greedy', 'genetic', 'hillclimber', 'annealing'], required=True,\n help=\"specify which algorithm to run\")\n\n optional = parser.add_argument_group('Optional arguments')\n\n optional.add_argument('--demo', action='store_true',\n help='run demo site with routes - default: false')\n optional.add_argument(\"-h\", \"--help\", action=\"help\",\n help=\"show this help message and exit\")\n optional.add_argument('-s', '--scenario', action='store', default='netherlands',\n choices=['netherlands', 'netherlands-simple', 'holland', 'holland-simple'],\n help='specify which scenario needs to be loaded - default: netherlands')\n optional.add_argument('--store', action='store_true',\n help=\"store the results in a .scv file - default: false\")\n optional.add_argument('-t', '--times', action='store', type=int, nargs='?',\n const=0, default=1, help=\"specify how many times to run - default: 1\")\n optional.add_argument('-v', '--visual', action='store_true',\n help=\"create visual of the results - default: false\")\n\n optional.add_argument('--version', action='version', version='%(prog)s 1.0')\n optional.add_argument('--temp', action='store', type=int, nargs='?',\n default='1000', help=\"the maximum temp of the cooling function as integer\")\n optional.add_argument('--cooling', action='store', type=int, nargs='?',\n default='0', choices=[0, 1, 2, 3], help=\"an integer representing the choice of cool function\")\n optional.add_argument('-i', '--ignore', action='store',\n help='ignore station - default: none')\n optional.add_argument('--start', action='store', type=int, nargs='?',\n default = '0', choices=[0, 1, 2], \\\n help=\"an integer representing the choice of soluiton to use as input - default: 0\" )\n optional.add_argument('--route', action='store', type=int, nargs='?',\n default ='0', help=\"an integer representing how many iterations on routes should be ran - default: 0\")\n optional.add_argument('--connection', action='store', type=int, nargs='?',\n default = '0', help=\"an integer representing how many iterations on connections should be ran - default: 0\")\n\n args = parser.parse_args()\n\n algo = args.algorithm\n demo = args.demo\n scenario = args.scenario\n store = args.store\n times = args.times\n visual = args.visual\n temp = args.temp\n cooling = args.cooling\n ignore = args.ignore\n start_algorithm = args.start\n route_iterations = args.route\n connection_iterations = args.connection\n\n station_dict, train, max_time = helper.load_scenario(scenario, ignore)\n\n solution = helper.init_solution(station_dict, train, max_time)\n best_solution, best_score = helper.init_best_solution(solution, \\\n station_dict, train, max_time)\n\n best_solution, best_score, outfile, score = helper.run_times(times, algo, \\\n solution, best_solution, best_score, temp, cooling, \\\n start_algorithm, route_iterations, connection_iterations)\n\n run_time = time.time() - start_time\n\n best_solution.print_solution()\n helper.print_score(run_time, times, score, outfile, visual, store)\n\n if store != True:\n os.remove(outfile)\n\n if (demo):\n helper.run_demo(scenario)\n\n os.remove(\"./data/temp/displayroute.csv\")\n\n exit(1)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "repo_name": "NegativeNancy/Team_Arjan", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 11, "usage_type": "call"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "functions.helper.load_scenario", "line_number": 77, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 77, "usage_type": "name"}, {"api_name": "functions.helper.init_solution", "line_number": 79, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 79, "usage_type": "name"}, {"api_name": "functions.helper.init_best_solution", "line_number": 80, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 80, "usage_type": "name"}, {"api_name": "functions.helper.run_times", "line_number": 83, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 83, "usage_type": "name"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "functions.helper.print_score", "line_number": 90, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 90, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 93, "usage_type": "call"}, {"api_name": "functions.helper.run_demo", "line_number": 96, "usage_type": "call"}, {"api_name": "functions.helper", "line_number": 96, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 98, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 104, "usage_type": "attribute"}]} +{"seq_id": "34572637267", "text": "import unittest\nfrom descarteslabs.client.auth import Auth\nimport requests\nimport json\n\n\n# flake8: noqa\nanon_token = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJncm91cHMiOlsicHVibGljIl0sImlzcyI6Imh0dHBzOi8vZGVzY2FydGVzbGFicy5hdXRoMC5jb20vIiwic3ViIjoiZGVzY2FydGVzfGFub24tdG9rZW4iLCJhdWQiOiJaT0JBaTRVUk9sNWdLWklweHhsd09FZng4S3BxWGYyYyIsImV4cCI6OTk5OTk5OTk5OSwiaWF0IjoxNDc4MjAxNDE5fQ.QL9zq5SkpO7skIy0niIxI0B92uOzZT5t1abuiJaspRI\"\n\n\nclass TestAuth(unittest.TestCase):\n def test_get_token(self):\n # get a jwt\n auth = Auth.from_environment_or_token_json()\n self.assertIsNotNone(auth.token)\n\n # validate the jwt\n url = \"https://descarteslabs.auth0.com\" + \"/tokeninfo\"\n params = {\"id_token\": auth.token}\n headers = {\"content-type\": \"application/json\"}\n r = requests.post(url, data=json.dumps(params), headers=headers)\n self.assertEqual(200, r.status_code)\n\n def test_get_namespace(self):\n auth = Auth.from_environment_or_token_json()\n self.assertIsNotNone(auth.namespace)\n\n def test_init_token_no_path(self):\n auth = Auth(jwt_token=anon_token, token_info_path=None, client_id=\"foo\")\n self.assertEquals(anon_token, auth._token)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "AlexJinlei/descarteslabs-python", "sub_path": "descarteslabs/client/auth/tests/test_auth.py", "file_name": "test_auth.py", "file_ext": "py", "file_size_in_byte": 1264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "descarteslabs.client.auth.Auth.from_environment_or_token_json", "line_number": 14, "usage_type": "call"}, {"api_name": "descarteslabs.client.auth.Auth", "line_number": 14, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 21, "usage_type": "call"}, {"api_name": "descarteslabs.client.auth.Auth.from_environment_or_token_json", "line_number": 25, "usage_type": "call"}, {"api_name": "descarteslabs.client.auth.Auth", "line_number": 25, "usage_type": "name"}, {"api_name": "descarteslabs.client.auth.Auth", "line_number": 29, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "4444257797", "text": "from django.contrib.auth.models import User\nfrom django.db import models\nfrom datetime import datetime\nfrom django.core.mail import send_mail\n\nfrom json import dumps\nfrom random import getrandbits\nfrom datetime import datetime\n\n#\n# Doctor manager\n#\tCreate function that first creates the user\n#\nclass DoctorManager(models.Manager) :\n\tdef create_doctor(self, username, password, mail, first_name='', last_name='', birth_year='', gender='', country='', hospital='', license='') :\n\t\tauth_user = User.objects.create_user(\n\t\t\tusername, \n\t\t\temail=mail, \n\t\t\tpassword=password,\n\t\t\tfirst_name=first_name,\n\t\t\tlast_name=last_name,\n\t\t\tlast_login=datetime.now()\n\t\t)\n\n\t\tdoctor = self.create(\n\t\t\tuser=auth_user,\n\t\t\tbirth_year=birth_year,\n\t\t\tgender=gender,\n\t\t\tcountry=country,\n\t\t\thospital=hospital,\n\t\t\tlicense=license\n\t\t)\n\n\t\tdoctor.send_activation_mail()\n\n\t\treturn doctor\n\n#\n# Doctor class\n#\tExtend the default user class\n#\tAdd hospital and license\n#\tNeeds to be manually validated by staff\n#\nclass Doctor(models.Model) :\n\n\tMALE = 'M'\n\tFEMALE = 'F'\n\tOTHER = 'O'\n\tGENDER_CHOICES = (\n\t\t(MALE, 'Male'),\n\t\t(FEMALE, 'Female'),\n\t\t(OTHER, 'Other'),\n\t)\n\n\tuser = models.OneToOneField(\n\t\tUser, \n\t\ton_delete=models.CASCADE\n\t)\n\tbirth_year = models.IntegerField(\n\t\tnull=True, \n\t\tblank=True,\n\t)\n\tgender = models.CharField(\n\t\tmax_length=1,\n\t\tchoices=GENDER_CHOICES,\n\t\tnull=True,\n\t)\n\tcountry = models.CharField(\n\t\tmax_length=30, \n\t\tnull=True, \n\t\tblank=True,\n\t)\n\thospital = models.CharField(\n\t\tmax_length=100, \n\t\tnull=True, \n\t\tblank=True\n\t)\n\tlicense = models.CharField(\n\t\tmax_length=100,\n\t\tnull=True,\n\t\tblank=True\n\t)\n\tmail_code = models.CharField(\n\t\tmax_length=200,\n\t\tnull=True,\n\t\tblank=True\n\t)\n\tmail_confirmed = models.BooleanField(\n\t\tdefault=False\n\t)\n\tvalidated = models.BooleanField(\n\t\tdefault=False\n\t)\n\n\tobjects = DoctorManager()\n\n\tdef __str__ (self) :\n\t\treturn \"{} - {}\".format(\n\t\t\t\t\tself.user.get_full_name, \n\t\t\t\t\tself.license)\n\n\tdef json(self) :\n\t\tdata = {\n\t\t\t\"user_id\" : self.user.id,\n\t\t\t\"username\": self.user.username,\n\t\t\t\"first_name\": self.user.first_name,\n\t\t\t\"last_name\" : self.user.last_name,\n\t\t\t\"mail\" : self.user.email,\n\t\t\t\"is_active\" : self.user.is_active,\n\t\t\t\"last_login\" : self.user.last_login.strftime(\"%c\"),\n\t\t\t\"date_joined\" : self.user.date_joined.strftime(\"%c\"),\n\t\t\t\n\t\t\t\"doctor_id\" : self.id,\n\t\t\t\"country\" : self.country,\n\t\t\t\"gender\" : self.gender,\n\t\t\t\"birth_year\" : self.birth_year,\n\t\t\t\"license\" : self.license,\n\t\t\t\"hospital\" : self.hospital,\n\t\t\t\"mail_code\" : self.mail_code,\n\t\t\t\"mail_confirmed\" : self.mail_confirmed,\n\t\t\t\"validated\" : self.validated\n\t\t}\n\t\treturn dumps(data)\n\n\tdef send_activation_mail(self) :\n\t\tcode = getrandbits(128)\n\t\tself.mail_code = code\n\t\turl = \"lungcheck.tk/app/confirm_mail?uid=\"+str(self.user.id)+\"&code=\"+str(code)\n\t\tsend_mail(\n\t\t 'Activate your account',\n\t\t 'Click in the following link to activate your account.\\n\\n'+url,\n\t\t 'lungcheckturku@gmail.com',\n\t\t [self.user.email],\n\t\t fail_silently=True,\n\t\t)", "repo_name": "Sisqui/lungcheck_django", "sub_path": "lungcheck/doctors/models/doctor.py", "file_name": "doctor.py", "file_ext": "py", "file_size_in_byte": 2935, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Manager", "line_number": 14, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 56, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 83, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 126, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "72575587686", "text": "\"\"\"\nCreated on Wed Mar 16 18:05:28 2016\n@author: Roberto Gaudenzi\n+------------------------------------------------------+\n| Sapienza - Università di Roma |\n| Master of Science in Engineering in Computer Science |\n| Pervasive Systems |\n| a.y. 2015-16 |\n| Introduction to InfluxDB |\n| InfluxDB-Python Example |\n| InfluxChat - Server |\n+------------------------------------------------------+\n\"\"\"\n\nfrom influxdb import InfluxDBClient\nimport socket\nimport select\nimport sys\nimport signal\n\n\n# The broadcast_message() function forwards the message msg\n# received from sock to all the other connected users.\ndef broadcast_message(sender_sock, sender_name, msg_content):\n\n # If the sender username is equal to DUMMYUSERNAME, then the message won't be stored into InfluxDB.\n if sender_name != DUMMY_USERNAME:\n json_body = [\n {\n \"measurement\": MEASUREMENT_NAME,\n \"tags\": {\n \"username\": sender_name\n },\n \"fields\": {\n \"value\": msg_content\n }\n }\n ]\n # Send the message to InfluxDB\n influxdb_client.write_points(json_body)\n\n # Forward the message to all other users.\n for dest_sock in list_channels:\n # Clearly msg is not sent neither to the server itself nor to the initial sender.\n if dest_sock != server_socket and dest_sock != sender_sock:\n # The try-except construct is used to handle broken channels (e.g., when a user pressed \"Ctrl+C\")\n try:\n if sender_name != DUMMY_USERNAME:\n msg_to_deliver = \"<\"+sender_name+\">: \"+msg_content\n else:\n msg_to_deliver = msg_content\n dest_sock.send(msg_to_deliver.encode('utf-8'))\n except:\n dest_sock.close()\n print(\"DEBUG: removing socket in broadcasting \"+str(dest_sock))\n list_channels.remove(dest_sock)\n\n\n# signal_handler function is called when a SIGINT signal is detected (e.g., Ctrl+C have been pressed)\ndef signal_handler(signal, frame):\n print('\\nInfluxChat Server will be terminated.')\n broadcast_message(server_socket, DUMMY_USERNAME, MSG_END)\n server_socket.close()\n sys.exit()\n\n# Main function\nif __name__ == \"__main__\":\n print(\"+---------------------+\")\n print(\"| InfluxChat - Server |\")\n print(\"+---------------------+\")\n\n # Constants for interacting with InfluxDB.\n DB_NAME = \"PervSystPers\"\n DB_ADDRESS = \"localhost\"\n DB_PORT = 8086\n MEASUREMENT_NAME = \"influxchat\"\n RET_POL_NAME = 'del_4_weeks'\n RET_POL_PERIOD = '4w'\n RET_POL_N_COPY = 1\n CONT_QUERY = \"\"\"create continuous query cq_30m on PervSystPers\n begin select count(value) as num_msg\n into PervSystPers.\\\"default\\\".downsampled_msg\n from influxchat\n group by time(30m)\n end\"\"\"\n\n print(\"Connecting to InfluxDB database...\")\n influxdb_client = InfluxDBClient(host=DB_ADDRESS, port=DB_PORT, database=DB_NAME)\n\n print(\"Done.\")\n\n print(\"Setting up continuous queries and retention policies...\")\n # Set up the continuous query\n try:\n result_db = influxdb_client.query(CONT_QUERY)\n except:\n print(\"Note: continuous query already exists.\")\n\n # Set up retention policy\n resultdb = influxdb_client.create_retention_policy(RET_POL_NAME, RET_POL_PERIOD,\n RET_POL_N_COPY, DB_NAME, default=True)\n print(\"Done.\")\n\n print(\"Setting up socket...\")\n # List to keep track of socket descriptors\n list_channels = []\n\n # Constants for socket interactions.\n MSG_END = \"QUIT\"\n RECV_BUFFER_SIZE = 4096\n CHAT_SERVER_ADDR = \"127.0.0.1\"\n CHAT_SERVER_PORT = 5050\n ALLOWED_CONNECTIONS = 10\n DUMMY_USERNAME = \"\"\n\n # The server keeps track of the name associated to each currently connected user\n username_addr_map = {}\n\n # Set up the signal handler for Ctrl+C.\n signal.signal(signal.SIGINT, signal_handler)\n\n # Create socket for InfluxChat Server\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server_socket.bind((\"0.0.0.0\", CHAT_SERVER_PORT))\n server_socket.listen(ALLOWED_CONNECTIONS)\n\n # Add server socket to the list of readable channels\n list_channels.append(server_socket)\n\n print(\"Done.\")\n print(\"Waiting for connections on port \" + str(CHAT_SERVER_PORT) + \".\")\n\n while True:\n # Use select() for getting the channels that are ready to be read\n read_sockets, write_sockets, err_sockets = select.select(list_channels, [], [])\n\n for read_sock in read_sockets:\n # If read_sock is the server socket, then a new inbound connection occurred\n if read_sock == server_socket:\n print(\"DEBUG: I'm receiving from server socket.\")\n read_sock_fd, addr = server_socket.accept()\n list_channels.append(read_sock_fd)\n\n name_received = False\n new_username = ''\n while not name_received:\n read_sockets_name, write_sockets_name, err_sockets_name = select.select([read_sock_fd], [], [])\n\n for read_name_sock in read_sockets_name:\n if read_name_sock != read_sock_fd:\n print(\"DEBUG: Error while receiving username.\")\n else:\n new_username = read_name_sock.recv(RECV_BUFFER_SIZE).decode()\n # We need to remove the \"\\n\" at the end.\n new_username = new_username[0:len(new_username)-1]\n # Add the username to a map for future usage.\n username_addr_map[read_name_sock] = new_username\n name_received = True\n\n # Notify to all users that someone joined the conversation\n broadcast_message(read_sock_fd, DUMMY_USERNAME, \"--- %s joined the conversation ---\" % new_username)\n print(\"--- %s joined the conversation ---\" % new_username)\n\n # Else, the value to be read is a new message sent by some user\n else:\n print(\"DEBUG: I'm receiving from users.\")\n\n # Get the username related to the socket.\n sender_username = username_addr_map[read_sock]\n\n # the try-except construct is used to handle\n # \"Connection reset by peer\" exceptions in Windows\n try:\n msg = read_sock.recv(RECV_BUFFER_SIZE).decode()\n if msg and len(str(msg)) > 0:\n\n # If the user sent MSG_END, then he/she has left the room.\n if str(msg) == MSG_END:\n # print(\"--- <\"+str(read_sock_fd.getpeername())+\"> is now offline ---\")\n print(\"--- <\"+sender_username+\"> is now offline ---\")\n broadcast_message(read_sock, DUMMY_USERNAME, \"--- <\"+sender_username+\"> is now offline ---\")\n\n # Close the socket, and remove it from the list of channels we're listening to.\n read_sock_fd.close()\n list_channels.remove(read_sock)\n # Also remove the username from the map.\n del username_addr_map[read_sock]\n\n # Else, simply broadcast the inbound message.\n else:\n broadcast_message(read_sock, sender_username, str(msg))\n else:\n print(\"--- <\"+sender_username+\"> is now offline ---\")\n except:\n print(\"--- <\"+sender_username+\"> is now offline ---\")\n broadcast_message(read_sock_fd, DUMMY_USERNAME, \"--- <\"+sender_username+\"> is now offline ---\")\n read_sock_fd.close()\n print(\"DEBUG: removing socket in except \"+str(read_sock_fd))\n list_channels.remove(read_sock_fd)\n continue\n\n server_socket.close()\n", "repo_name": "RobGaud/PervasiveSystemsPersonal", "sub_path": "influx_chat_server.py", "file_name": "influx_chat_server.py", "file_ext": "py", "file_size_in_byte": 8553, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "influxdb.InfluxDBClient", "line_number": 88, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 120, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 120, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 123, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 123, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 123, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 124, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 124, "usage_type": "attribute"}, {"api_name": "select.select", "line_number": 136, "usage_type": "call"}, {"api_name": "select.select", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "12354808051", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: events\n :platform: Unix, Windows\n :synopsis: Custom events definition\n\n.. moduleauthor:: Anton Konyshev \n\n\"\"\"\n\nfrom wx.lib.newevent import NewEvent\n\n\nnewevent = NewEvent()\nLoadVideo = newevent[0]\n\"\"\"Command to load chosen video file.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n filepath (str): path to the video file.\n\n\"\"\"\nLOAD_VIDEO = newevent[1]\n\nnewevent = NewEvent()\nLoadSubtitles = newevent[0]\n\"\"\"Command to load chosen subtitles file.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n filepath (str): path to the subtitles file.\n\n\"\"\"\nLOAD_SUBTITLES = newevent[1]\n\nnewevent = NewEvent()\nContentLoadingState = newevent[0]\n\"\"\"Notification about successful video or subtitles file opening and loading.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n video (bool): video loading state::\n\n True -- Chosen video successful loaded.\n False -- Video is not loaded.\n None -- Video loading state without changes.\n\n subtitles (bool): subtitles loading state::\n\n True -- Chosen subtitles successful loaded.\n False -- Subtitles is not loaded.\n None -- Subtitles loading state without changes.\n\n\"\"\"\nCONTENT_LOADING_STATE = newevent[1]\n\nnewevent = NewEvent()\nFragmentComplete = newevent[0]\n\"\"\"Notification about successful completion of current subtitles fragment.\n\nReceivers:\n Player (wx.Frame): player window.\n\n\"\"\"\nFRAGMENT_COMPLETE = newevent[1]\n\nnewevent = NewEvent()\nFragmentStarted = newevent[0]\n\"\"\"Notification about beginning of a next subtitles fragment learning.\n\nReceivers:\n Player (wx.Frame): player window.\n\n\"\"\"\nFRAGMENT_STARTED = newevent[1]\n\nnewevent = NewEvent()\nSubtitlesShift = newevent[0]\n\"\"\"Command to shift subtitles.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n shift (int): shift in milliseconds.\n\n\"\"\"\nSUBTITLES_SHIFT = newevent[1]\n\nnewevent = NewEvent()\nTranslateRequest = newevent[0]\n\"\"\"Command to translate a subtitles fragment or word.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n src (str or unicode): source text or word.\n details (bool): request detailed result or not (default is False)::\n\n True -- Details requested.\n False -- Only translation.\n\n\"\"\"\nTRANSLATE_REQUEST = newevent[1]\n\nnewevent = NewEvent()\nTranslationResult = newevent[0]\n\"\"\"Notification about successful translation.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n result (str or unicode): translated text.\n details (bool): is result detailed or not (default is False)::\n\n True -- Detailed result.\n False -- Only translation.\n\n\"\"\"\nTRANSLATION_RESULT = newevent[1]\n\nnewevent = NewEvent()\nTranslateAnswer = newevent[0]\n\"\"\"Command to translate an answer entered by user.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n answer (str or unicode): user's answer.\n\n\"\"\"\nTRANSLATE_ANSWER = newevent[1]\n\nnewevent = NewEvent()\nConfigUpdate = newevent[0]\n\"\"\"Notification about settings changing.\n\nReceivers:\n Player (wx.Frame): player window.\n\nKwargs:\n params (dict): changes (format: parameter_name-parameter_value).\n name (str or unicode): parameter name.\n value (str or unicode or bool or int or float): new parameter value.\n apply_updated (bool): apply changed settings now (default: False).\n apply_all (bool): apply all settings now (default: False).\n\n\"\"\"\nCONFIG_UPDATE = newevent[1]\n", "repo_name": "antonkonyshev/steno", "sub_path": "steno/events.py", "file_name": "events.py", "file_ext": "py", "file_size_in_byte": 3449, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "wx.lib.newevent.NewEvent", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 27, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 40, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 63, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 73, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 83, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 96, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 113, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 130, "usage_type": "call"}, {"api_name": "wx.lib.newevent.NewEvent", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "21823734572", "text": "\"\"\"\r\nCopywriteR set up using suggested settings for targeted exome \r\nhttps://bioconductor.org/packages/release/bioc/vignettes/CopywriteR/inst/doc/CopywriteR.pdf\r\n - 50 kb window\r\n\r\nNote: only has paired sample mode, so normal samples are just randomly paired with unknowns\r\n\"\"\"\r\n\r\nimport csv\r\nimport subprocess\r\nimport os\r\nimport pathlib\r\n\r\nfrom . import utils, base_classes\r\n\r\n\r\nclass Copywriter(base_classes.BaseCNVTool):\r\n def __init__(self, capture, gene, start_time, normal_panel=True):\r\n self.run_type = \"copywriter\"\r\n super().__init__(capture, gene, start_time, normal_panel=normal_panel)\r\n self.extra_db_fields = [\"num.mark\", \"unknown\", \"seg.mean\", \"control_id\"]\r\n self.settings = {**self.settings, \"docker_image\": \"stefpiatek/copywriter:2.14.1\"}\r\n\r\n def parse_output_file(self, file_path, sample_id):\r\n cnvs = []\r\n with open(file_path, \"r\") as handle:\r\n output = csv.DictReader(handle, delimiter=\"\\t\")\r\n sample_to_bam = {sample: bam for (bam, sample) in self.bam_to_sample.items()}\r\n bam_name = pathlib.Path(sample_to_bam[sample_id]).name\r\n bamfile_to_sample = {pathlib.Path(bam_path).name: sample for bam_path, sample in self.bam_to_sample.items()}\r\n for row in output:\r\n if row[\"unknown\"] == bam_name:\r\n cnv = dict(row)\r\n cnv[\"chrom\"] = f\"{self.settings['chromosome_prefix']}{cnv['chrom']}\"\r\n cnv[\"sample_id\"] = sample_id\r\n cnv[\"control_id\"] = bamfile_to_sample[cnv.pop(\"control\")]\r\n cnv[\"seg.mean\"] = float(cnv[\"seg.mean\"])\r\n # Determined using ROC curve\r\n if cnv[\"seg.mean\"] <= -0.5:\r\n cnv[\"alt\"] = \"DEL\"\r\n elif cnv[\"seg.mean\"] >= 0.5:\r\n cnv[\"alt\"] = \"DUP\"\r\n else:\r\n continue\r\n cnvs.append(cnv)\r\n return cnvs\r\n\r\n def run_command(self, args):\r\n self.run_docker_subprocess([\"Rscript\", f\"/mnt/cnv-caller-resources/copywriter/copywriter_runner.R\", *args])\r\n\r\n def run_workflow(self):\r\n # write bam locations to file to be read by R script\r\n # only paired settings so just pair up unknowns with controls at random\r\n # if it looks good, could use exomedepth choice of normal to select the appropriate control\r\n\r\n sample_names = []\r\n output_paths = []\r\n # assume 3 gb per worker so memory doesn't run out\r\n max_workers = min([int(self.max_cpu), int(self.max_mem) // 3])\r\n total_batches = len(self.settings[\"unknown_bams\"]) // 30\r\n if len(self.settings[\"unknown_bams\"]) % 30:\r\n total_batches += 1\r\n for batch_number in range(total_batches):\r\n batch_output = f\"{self.output_base}/batch_{batch_number}\"\r\n docker_batch_output = f\"{self.docker_output_base}/batch_{batch_number}\"\r\n try:\r\n os.makedirs(batch_output)\r\n except FileExistsError:\r\n pass\r\n\r\n with open(f\"{batch_output}/all_samples.txt\", \"w\") as handle:\r\n writer = csv.DictWriter(handle, fieldnames=[\"samples\", \"controls\"], delimiter=\"\\t\")\r\n writer.writeheader()\r\n for normal_index in range(30):\r\n unknown_index = normal_index + 30 * batch_number\r\n normal_bam = self.settings[\"normal_bams\"][normal_index]\r\n try:\r\n unknown_bam = self.settings[\"unknown_bams\"][unknown_index]\r\n except IndexError:\r\n # will get index error for the last batch if not divisible by 30. Skip these\r\n continue\r\n writer.writerow({\"samples\": normal_bam, \"controls\": normal_bam})\r\n writer.writerow({\"samples\": unknown_bam, \"controls\": normal_bam})\r\n\r\n sample_names.append(f\"{self.bam_to_sample[unknown_bam]}\")\r\n output_paths.append(f\"{self.output_base}/batch_{batch_number}/results.txt\")\r\n\r\n base_classes.logger.info(f\"Running CopywriteR on batch {batch_number}\")\r\n self.run_command(\r\n [\r\n f\"--max-cpu={max_workers}\",\r\n f\"--output-path={docker_batch_output}\",\r\n f\"--capture-regions={self.settings['capture_path']}\",\r\n f\"--chromosome-prefix={self.settings['chromosome_prefix']}\",\r\n ]\r\n )\r\n\r\n return output_paths, sample_names\r\n", "repo_name": "stefpiatek/cnv-patissier", "sub_path": "scripts/copywriter.py", "file_name": "copywriter.py", "file_ext": "py", "file_size_in_byte": 4634, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "csv.DictReader", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 29, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 30, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 67, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "33531903169", "text": "\nimport xbmc\nimport xml.dom.minidom\nimport random\n\n# Can't find a way to play the favourites list - manual approach it is\ntree = xml.dom.minidom.parse(\"/storage/.kodi/userdata/favourites.xml\")\nnodes = tree.getElementsByTagName(\"favourite\")\ndatas = [x.childNodes[0].data for x in nodes]\n\n# Get just the file name\nfiles = [x[11:-2] for x in datas]\n\n# SystemRandom does appear to be random at each boot\nrand = random.SystemRandom()\nrand.shuffle(files)\n\nsongList = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)\nsongList.clear()\n\nfor f in files:\n songList.add(f)\n\n# This shuffle appears to use time()\n# time will always be the same value because the RPi doesn't have an RTC\n#songList.shuffle()\n\nxbmc.executebuiltin(\"xbmc.playercontrol(RepeatAll)\")\n\nxbmc.Player().play(songList)\n", "repo_name": "chris-cartwright/CarPC", "sub_path": "autoexec.py", "file_name": "autoexec.py", "file_ext": "py", "file_size_in_byte": 765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xml.dom.minidom.dom.minidom.parse", "line_number": 7, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom", "line_number": 7, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom", "line_number": 7, "usage_type": "name"}, {"api_name": "random.SystemRandom", "line_number": 15, "usage_type": "call"}, {"api_name": "xbmc.PlayList", "line_number": 18, "usage_type": "call"}, {"api_name": "xbmc.PLAYLIST_MUSIC", "line_number": 18, "usage_type": "attribute"}, {"api_name": "xbmc.executebuiltin", "line_number": 28, "usage_type": "call"}, {"api_name": "xbmc.Player", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "9232740429", "text": "from django.db import models\nfrom apps.category.models import Category\nfrom apps.user.models import User\nfrom django.shortcuts import get_object_or_404\nimport datetime\n\n# Create your models here.\n\n\nclass Product(models.Model):\n name = models.CharField(max_length=50)\n description = models.CharField(max_length=50)\n cost = models.CharField(max_length=50)\n deleted_at = models.DateTimeField(blank=True, null=True)\n category = models.ForeignKey(Category, related_name=\"category_product\", on_delete=models.CASCADE)\n user = models.ForeignKey(User, related_name=\"user_product\", on_delete=models.CASCADE)\n\n @staticmethod\n def soft_delete(id):\n product = get_object_or_404(Product.objects.all(), pk=id)\n # product = Product.objects.get(pk=id)\n # product.deleted_at = timezone.now()\n product.deleted_at = datetime.datetime.utcnow()\n product.save()\n return product\n", "repo_name": "julianrico1994/Takum-prueba-Fron-Endt-Angular-7-Developer", "sub_path": "prueba_back/apps/product/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 921, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "apps.category.models.Category", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 16, "usage_type": "call"}, {"api_name": "apps.user.models.User", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "attribute"}]} +{"seq_id": "7325629954", "text": "from config import *\r\n\r\nprint(satelite_data_path)\r\n\r\n##saving satelites that have beta between -14 and 14\r\nf=open(satelite_data_path, \"r\")\r\nsateliti14=open(satelites_b14_path, \"w\")\r\ncounter14=0 ##counter for how many satelites there are that are with beta between -2 and 2\r\n\r\n\r\nfor line in f:\r\n a=line.split()\r\n if len(a)==11:\r\n if float(a[2])>=-14 and float(a[2])<=14:\r\n counter14 += 1\r\n sateliti14.write(line)\r\n\r\nsateliti14.close()\r\n\r\n##listing satelites that are closest to 0 and 180 (beta between -14 and 14)\r\nsateliti14=open(satelites_b14_path, \"r\")\r\n\r\n##file where it saves\r\nangle=open(satelites_angle_path, \"w\")\r\n\r\nprevious_line=sateliti14.readline()\r\n\r\n#counter for how many times satelit passes 0 and 180 wit beta between -14 and 14\r\ncounter14_0=0 \r\n#counter naming of the satellites\r\ncnt = 0\r\n\r\ndef checkSameSatellite(previous, current):\r\n # print(\"previous\")\r\n # print(previous.split())\r\n # print(\"current\")\r\n # print(current.split())\r\n if float(previous.split()[0]) == float(current.split()[0]):\r\n return True\r\n return False\r\n \r\ndef compare_angles(previous_l, current_l, angle, counter=0):\r\n if float(previous_l.split()[4]) < 180 and float(current_l.split()[4]) > 180:\r\n if 180-float(previous_l.split()[4]) < float(current_l.split()[4]) - 180:\r\n previous_l1 = previous_l.split()\r\n previous_l1[0] = previous_l1[0] + \"_\"+ str(counter)\r\n previous_l1.append(\"midnight\")\r\n # previous_l = previous_l.rstrip('\\n')\r\n # previous_l = previous_l + \" midnight\"\r\n # previous_l = previous_l + '\\n'\r\n #print(previous_l)\r\n a, b, c, d, e, f, g, h, i, j, k, l = previous_l1[0], previous_l1[1], previous_l1[2], previous_l1[3], previous_l1[4], previous_l1[5], previous_l1[6], previous_l1[7], previous_l1[8], previous_l1[9], previous_l1[10], previous_l1[11]\r\n angle.write(\"%-5s %-20s %-7s %-7s %-7s %-7s %-5s %-5s %-9s %-7s %-7s %-10s\\n\" % (previous_l1[0], previous_l1[1], previous_l1[2], previous_l1[3], previous_l1[4], previous_l1[5], previous_l1[6], previous_l1[7], previous_l1[8], previous_l1[9], previous_l1[10], previous_l1[11]))\r\n #angle.write(str(previous_l1))\r\n return True\r\n else:\r\n current_l1 = current_l.split()\r\n current_l1[0] = current_l1[0] + \"_\"+ str(counter)\r\n current_l1.append(\"midnight\")\r\n # current_l = current_l.rstrip('\\n')\r\n # current_l = current_l + \" midnight\"\r\n # current_l = current_l + '\\n'\r\n #print(current_l)\r\n #angle.write(str(current_l1))\r\n a, b, c, d, e, f, g, h, i, j, k, l = current_l1[0], current_l1[1], current_l1[2], current_l1[3], current_l1[4], current_l1[5], current_l1[6], current_l1[7], current_l1[8], current_l1[9], current_l1[10], current_l1[11]\r\n angle.write(\"%-5s %-20s %-7s %-7s %-7s %-7s %-5s %-5s %-9s %-7s %-7s %-10s\\n\" % (current_l1[0], current_l1[1], current_l1[2], current_l1[3], current_l1[4], current_l1[5], current_l1[6], current_l1[7], current_l1[8], current_l1[9], current_l1[10], current_l1[11]))\r\n return True\r\n elif float(previous_l.split()[4]) > 300 and float(current_l.split()[4]) < 300:\r\n if 360-float(previous_l.split()[4]) < float(current_l.split()[4]):\r\n previous_l2 = previous_l.split()\r\n previous_l2[0] = previous_l2[0] +\"_\"+ str(counter)\r\n previous_l2.append(\"noon\")\r\n # previous_l = previous_l.rstrip('\\n')\r\n # previous_l=previous_l + \" noon\"\r\n # previous_l=previous_l + '\\n'\r\n #print(previous_l)\r\n a, b, c, d, e, f, g, h, i, j, k, l = previous_l2[0], previous_l2[1], previous_l2[2], previous_l2[3], previous_l2[4], previous_l2[5], previous_l2[6], previous_l2[7], previous_l2[8], previous_l2[9],previous_l2[10], previous_l2[11]\r\n angle.write(\"%-5s %-20s %-7s %-7s %-7s %-7s %-5s %-5s %-9s %-7s %-7s %-10s\\n\" % (previous_l2[0], previous_l2[1], previous_l2[2], previous_l2[3], previous_l2[4], previous_l2[5], previous_l2[6], previous_l2[7], previous_l2[8], previous_l2[9],previous_l2[10], previous_l2[11]))\r\n #angle.write(str(previous_l2))\r\n return True\r\n else:\r\n current_l2 = current_l.split()\r\n current_l2[0] = current_l2[0] + \"_\"+ str(counter)\r\n current_l2.append(\"noon\")\r\n # current_l = current_l.rstrip('\\n')\r\n # current_l = current_l + \" noon\"\r\n # current_l = current_l + '\\n'\r\n #print(current_l)\r\n a, b, c, d, e, f, g, h, i, j, k, l = current_l2[0], current_l2[1], current_l2[2], current_l2[3], current_l2[4], current_l2[5], current_l2[6], current_l2[7], current_l2[8], current_l2[9], current_l2[10], current_l2[11]\r\n angle.write(\"%-5s %-20s %-7s %-7s %-7s %-7s %-5s %-5s %-9s %-7s %-7s %-10s\\n\" % (current_l2[0], current_l2[1], current_l2[2], current_l2[3], current_l2[4], current_l2[5], current_l2[6], current_l2[7], current_l2[8], current_l2[9], current_l2[10], current_l2[11]))\r\n #angle.write(str(current_l2))\r\n return True\r\n else:\r\n return False\r\n\r\nt_cnt = 1\r\nfor x in range(len(open(satelites_b14_path, \"r\").readlines())-1):\r\n current_line=sateliti14.readline()\r\n if checkSameSatellite(previous_line, current_line):\r\n if compare_angles(previous_line, current_line, angle, t_cnt):\r\n counter14_0 += 1\r\n t_cnt += 1\r\n else:\r\n t_cnt = 1\r\n previous_line=current_line\r\n\r\nangle.close()\r\n\r\nf.seek(0)\r\n\r\n##saving satelates with beta between -2 and 2\r\nsateliti2=open(satelites_2_path, \"w\")\r\ncounter2=0 ##counter for how many satelites there are that are with beta between -2 and 2\r\n\r\nfor line in f:\r\n a=line.split()\r\n if len(a)==11:\r\n if float(a[2])>=-2 and float(a[2])<=2:\r\n counter2 += 1\r\n sateliti2.write(line)\r\n\r\nsateliti2.close()\r\n##listing satelites that are closest to 0/180 with beta between -2 and 2\r\nsateliti2=open(satelites_2_path, \"r\")\r\n##file where it saves \r\nangle2=open(satelites_angle2_path, \"w\")\r\n\r\n#counter for how many times satelit passes 0 and 180 wit beta between -2 and 2\r\ncounter2_0=0\r\n\r\nprevious_line=sateliti2.readline()\r\n\r\nfor x in range(len(open(satelites_2_path, \"r\").readlines())-1):\r\n current_line=sateliti2.readline()\r\n if checkSameSatellite(previous_line, current_line):\r\n if compare_angles(previous_line, current_line, angle2):\r\n counter2_0 += 1\r\n previous_line=current_line\r\n\r\nangle2.close()\r\n\r\n#caclulating fi. lambda coordinates of stations\r\nimport math\r\n\r\n# Function to calculate cos \r\n# value of angle c \r\ndef cal_cos(n): \r\n cosval = 0\r\n # Converting degrees to radian \r\n n = math.radians(n)\r\n cosval = math.cos(n)\r\n return cosval\r\n \r\n# Function to find third side \r\ndef third_side(a, b, alfa): \r\n angle = cal_cos(alfa) \r\n return math.sqrt((a * a) + (b * b) - 2 * a * b * angle)\r\n\r\n#Function that calculates the angel wich determenates wethwe it is seen or not\r\ndef AngleVidljivost (Ra, trd_s, cent_angl):\r\n angleVidljivost = math.degrees(math.asin(Ra*(math.sin(math.radians(cent_angl)))/trd_s))\r\n return angleVidljivost\r\n\r\nstations = open(mgex_path, \"r\")\r\nstationsFiLa = open(satelites_stationsFiLa_path, \"w\")\r\nstationsFiLaXYZ = open(satelites_stationsFiLaXYZ_path, \"w\")\r\n\r\nwith open(mgex_path) as myfile:\r\n head= [next(myfile) for x in range(4)]\r\n#linija=\"NUM\"+\" \"+\"STATION\"+\" \"+\"FI\"+\" \"+\"LAMBDA\"+\" \"+\"r\"+'\\n'\r\n\r\nstationsFiLa.writelines(head)\r\nstationsFiLa.write(\"%-8s %-8s %-22s %-22s %-20s \\n\" % (\"NUM\", \"STATION\", \"FI_Station\", \"Lamda_Station\", \"r_station\"))\r\n\r\nstationsFiLaXYZ.writelines(head)\r\nstationsFiLaXYZ.write(\"%-8s %-8s %-22s %-22s %-20s %-22s %-22s %-22s \\n\" % (\"NUM\", \"STATION\", \"FI_Station\", \"Lamda_Station\", \"r_station\", \"X\", \"Y\", \"Z\"))\r\n\r\nfor line in stations:\r\n red=line.split()\r\n if len(red) == 5 or len(red) == 6 or len(red) == 9:\r\n try:\r\n xKoor = float(red[2]) \r\n yKoor = float(red[3])\r\n zKoor = float(red[4])\r\n except:\r\n xKoor = float(red[3]) \r\n yKoor = float(red[4])\r\n zKoor = float(red[5])\r\n #print(xKoor,yKoor,zKoor)\r\n\r\n\r\n LambdaSt= math.degrees(math.atan2(yKoor,xKoor))\r\n FiSt= math.degrees(math.atan2(zKoor,(math.sqrt((xKoor**2)+(yKoor**2)))))\r\n rSt= math.sqrt((xKoor**2)+(yKoor**2)+(zKoor**2))\r\n\r\n #stationsFiLa.write(red[0]+\" \"+red[1]+\" \"+str(FiSt)+\" \"+str(LambdaSt)+\" \"+str(rSt)+'\\n')\r\n stationsFiLa.write(\"%-8s %-8s %-22s %-22s %-20s\\n\" % (red[0], red[1], str(FiSt), str(LambdaSt), str(rSt)))\r\n stationsFiLaXYZ.write(\"%-8s %-8s %-22s %-22s %-20s %-22s %-22s %-22s \\n\" % (red[0], red[1], str(FiSt), str(LambdaSt), str(rSt), str(xKoor), str(yKoor), str(zKoor)))\r\n\r\nstationsFiLa.close()\r\nstationsFiLaXYZ.close()\r\n\r\nstationsFiLa=open(satelites_stationsFiLa_path)\r\nangle=open(satelites_angle_path, \"r\")\r\n\r\ndistancies14=open(satelites_distance14_path, \"w\")\r\n\r\ndistancies14.write(\"%-11s %-10s %-12s %-16s %-22s %-22s %-22s %-22s %-22s %-22s %-22s\\n\" % (\"Station\", \"Satellite\", \"FI_Satellite\", \"Lamda_Satellite\", \"Fi_Station\", \"Lambda_Station\", \"Distance\", \"Azimuth\", \"Azimuth Reverse\", \"Central angle\", \"Third side\"))#add titels for angels\r\n\r\n\r\nimport pygeodesy as geo\r\n\r\ntemp = stationsFiLa.readlines()\r\n\r\nfor i in angle.readlines():\r\n a=i.split() \r\n if len(a) == 12:\r\n FiSa = float(a[10])\r\n name=a[0]\r\n if float(a[9]) < 180:\r\n LambdaSa = float(a[9])\r\n else:\r\n LambdaSa = (float(a[9])-180)*(-1.0) \r\n\r\n for j in temp:\r\n j = j.split()\r\n\r\n try:\r\n FiSt= float(j[2])\r\n LambdaSt= float(j[3])\r\n rSt= float(j[4])/1000.0\r\n\r\n distance = geo.cosineLaw(FiSa, LambdaSa, FiSt, LambdaSt, rSt)\r\n\r\n azimuth = geo.bearing(FiSt, LambdaSt, FiSa, LambdaSa)\r\n rev_azimuth = geo.bearing(FiSa, LambdaSa, FiSt, LambdaSt)\r\n alfa = (180*distance)/(rSt*math.pi)\r\n\r\n a = rSt\r\n b = rSt + 20200.0\r\n\r\n tr_side = third_side(a, b, alfa)\r\n\r\n #izracun kuta\r\n #AngleSaStCen = AngleVidljivost (b, tr_side, alfa)\r\n #angleCenSatSt = 180 - AngleSaStCen - alfa\r\n\r\n rednew=[j[0:2], name, FiSa, LambdaSa, FiSt, LambdaSt, distance, azimuth, rev_azimuth, tr_side]\r\n #distancies14.write(str(rednew)+'\\n')\r\n distancies14.write(\"%-5s %-5s %-10s %-12f %-16f %-22s %-22s %-22s %-22s %-22s %-22s %-22s\\n\" % (j[0], j[1], name, FiSa, LambdaSa, str(FiSt), str(LambdaSt), str(distance), str(azimuth), str(rev_azimuth), str(alfa), str(tr_side)))#add names for angels\r\n\r\n except:\r\n l=0\r\n\r\n\r\n continue\r\n\r\ndistancies14.close()\r\nangle.close()\r\n\r\n\r\n\r\n\r\n", "repo_name": "ana602/satelite_srp", "sub_path": "src/satelite_parser.py", "file_name": "satelite_parser.py", "file_ext": "py", "file_size_in_byte": 10898, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.radians", "line_number": 150, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 151, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 157, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 161, "usage_type": "call"}, {"api_name": "math.asin", "line_number": 161, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 161, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 161, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 192, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 192, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 193, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 193, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 193, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 194, "usage_type": "call"}, {"api_name": "pygeodesy.cosineLaw", "line_number": 233, "usage_type": "call"}, {"api_name": "pygeodesy.bearing", "line_number": 235, "usage_type": "call"}, {"api_name": "pygeodesy.bearing", "line_number": 236, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 237, "usage_type": "attribute"}]} +{"seq_id": "7541354418", "text": "import requests\nimport re\nimport string\nfrom bs4 import BeautifulSoup\n\nprotocol = 'https'\ndict_url_base = protocol + '://vocabulary.com'\nslang_url_base = protocol + '://urbandictionary.com'\nwiki_url_base = protocol + '://wiktionary.org'\n\nprintable = set(string.printable)\n\nclass Definition:\n\tdef __init__(self, part_of_speech, definition, examples):\n\t\tself.part_of_speech = part_of_speech\n\t\tself.definition = definition\n\t\tself.examples = examples\n\t\n\tdef prettify(self):\n\t\ts = self.part_of_speech + ' : ' + self.definition\n\t\tif self.examples:\n\t\t\ts += '\\n\\texamples:'\n\t\t\tfor ex in self.examples:\n\t\t\t\ts += '\\n\\t\"'\n\t\t\t\ts += ex.replace('\\n', '\\n\\t')\n\t\t\t\ts += '\"'\n\t\treturn s\n\ndef join(result_set):\n\tres = ''\n\tfor item in result_set:\n\t\tres += item\n\treturn res\n\ndef find_slang_def(word):\n\thtml_response = requests.get(slang_url_base + '/define.php?term=' + word, \n\t\theaders={\n\t\t\t'responseType': 'document'\n\t\t}).text\n\t\t\n\tweb_page = BeautifulSoup(html_response, 'lxml')\n\tif not web_page.find('body'):\n\t\treturn []\n\t\t\n\tdef_divs = web_page.find_all(class_='definition')\n\t\n\tdefinitions = []\n\tfor div in def_divs:\n\t\tmeaning = div.find(class_='meaning').get_text()\n\t\tmeaning = ''.join(filter(lambda x: x in printable, meaning))\n\t\t\n\t\texample = div.find(class_='example')\n\t\te = ''\n\t\tfor c in example.children:\n\t\t\tif c.text:\n\t\t\t\te += c.text\n\t\t\telif c.name == 'br':\n\t\t\t\te += '\\n'\n\t\te = ''.join(filter(lambda x: x in printable, e))\n\t\t\n\t\tdefinitions.append(Definition('slang', meaning, [e]))\n\t\tif len(definitions) == 3:\n\t\t\tbreak\n\t\n\treturn definitions\n\t\n\ndef find_formal_def(word):\n\thtml_response = requests.get(dict_url_base + '/dictionary/definition.ajax?search=' + word + '&lang=en', \n\t\theaders={\n\t\t\t'responseType': 'document'\n\t\t}).text\n\t\t\n\tweb_page = BeautifulSoup(html_response, 'lxml')\n\tword_overview = web_page.find(class_='word-area')\n\tif not word_overview:\n\t\treturn [], None\n\n\toverviews = []\n\tshort_overview = word_overview.find(class_='short')\n\tif short_overview:\n\t\toverviews.append(\n\t\t\t''.join(filter(lambda x: x in printable, short_overview.get_text())))\n\tlong_overview = word_overview.find(class_='long')\n\tif long_overview:\n\t\toverviews.append(\n\t\t\t''.join(filter(lambda x: x in printable, long_overview.get_text())))\n\n\tword_definitions = web_page.find(class_='word-definitions').find('ol').find_all('li')\n\n\tfirst_type = None\n\tdefinitions = []\n\tfor li in word_definitions:\n\t\tdef_area = li.find(class_='definition')\n\t\ttype_ = def_area.find(class_='pos-icon').get_text()\n\t\tif not first_type:\n\t\t\tfirst_type = type_\n\t\tdef_ = join(def_area.findAll(text=True, recursive=False)).strip()\n\t\tdef_ = ''.join(filter(lambda x: x in printable, def_))\n\t\t\n\t\tex_area = li.find_all(class_='example')\n\t\texamples = []\n\t\tfor ex in ex_area:\n\t\t\texamples.append(re.sub('[“”\\n]', '', ex.text))\n\t\t\n\t\tdefinitions.append(Definition(type_, def_, examples))\n\t\n\treturn overviews, definitions\n\ndef fits_criteria(el):\n\treturn not (el.has_attr('class') and el['class'][0] in ['HQToggle', 'ib-brac', 'ib-content', 'mw-editsection', 'floatright']) and not (el.name in ['ul', 'sup', 'dl', 'br']) and el.children\n\ndef get_wiki_text(el):\n\ts = ''\n\tfor child in el:\n\t\tis_tag = hasattr(child, 'has_attr')\n\t\tif not is_tag:\n\t\t\ts += child\n\t\telse:\n\t\t\tif fits_criteria(child):\n\t\t\t\ts += get_wiki_text(child)\n\treturn s.replace('\\n', '').strip()\n\ndef find_wiki_info(word):\n\thtml_response = requests.get(wiki_url_base + '/wiki/' + word, \n\t\theaders={\n\t\t\t'responseType': 'document'\n\t\t}).text\n\t\n\tweb_page = BeautifulSoup(html_response, 'lxml')\n\n\tcurrent_el = web_page.find(id='English')\n\tif not current_el:\n\t\treturn '', []\n\tcurrent_el = current_el.parent\n\n\tderivation = ''\n\tdefinitions = []\n\tlast_title = ''\n\twhile True:\n\t\tif not current_el:\n\t\t\tbreak\n\t\t\n\t\tif current_el.name == 'h3' or current_el.name == 'h4' or current_el.name == 'h5':\n\t\t\tlast_title = get_wiki_text(current_el).lower()\n\t\tif current_el.name == 'ol':\n\t\t\tfor def_ in current_el.children:\n\t\t\t\ti_text = get_wiki_text(def_)\n\t\t\t\tif not i_text:\n\t\t\t\t\tcontinue\n\t\t\t\tdefinitions.append(Definition(last_title, i_text, []))\n\t\telif current_el.name == 'hr':\n\t\t\tbreak\n\t\telif current_el.name == 'p' and last_title.startswith('etymology'):\n\t\t\tif derivation:\n\t\t\t\tderivation += '\\n'\n\t\t\tderivation += get_wiki_text(current_el)\n\n\t\tcurrent_el = current_el.next_sibling\n\t\n\treturn derivation, definitions", "repo_name": "NotAFlyingGoose/the-everything-dictionary-py", "sub_path": "py/definitions.py", "file_name": "definitions.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "string.printable", "line_number": 11, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 69, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 104, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 125, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "31766399588", "text": "import os\nimport logging\nimport numpy as np\nfrom cached_property import cached_property\nfrom sqlalchemy import Column, String, Integer, Text, ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.ext.hybrid import hybrid_property\n\nfrom tmlib.models.base import DirectoryModel, DateMixIn\nfrom tmlib.models.utils import remove_location_upon_delete\nfrom tmlib.models.status import FileUploadStatus as fus\nfrom tmlib.utils import autocreate_directory_property\n\nlogger = logging.getLogger(__name__)\n\n#: Supported plate formats (number of wells in the plate).\nSUPPORTED_PLATE_FORMATS = {1, 96, 384}\n\n#: Supported plate acquisition modes. Mode \"series\" means that *cycles*\n#: are interpreted as separate acquisitions relating to the same marker\n#: as part of a time series experiment.\n#: Mode \"multiplexing\" implies that a different marker was used in each\n#: acquisition as part of a multiplexing experiment.\nSUPPORTED_PLATE_AQUISITION_MODES = {'basic', 'multiplexing'}\n\n#: Format string for plate locations\nPLATE_LOCATION_FORMAT = 'plate_{id}'\n\n\ndef determine_plate_dimensions(n_wells):\n '''Determines the dimensions of a plate given its number of wells.\n\n Parameters\n ----------\n n_wells: int\n number of wells in the plate\n\n Returns\n -------\n Tuple[int]\n number of rows and column in the plate\n '''\n plate_dimensions = {\n 1: (1, 1),\n 96: (8, 12),\n 384: (16, 24)\n }\n return plate_dimensions[n_wells]\n\n\n@remove_location_upon_delete\nclass Plate(DirectoryModel, DateMixIn):\n\n '''A *plate* represents a container with reservoirs for biological\n samples (referred to as *wells*).\n It's assumed that all images of a *plate* were acquired with the\n same microscope settings implying that each acquisition has the\n same number of *z-planes* and *channels*.\n\n The *format* of the plate is encode by the number of wells in the plate,\n e.g. ``384``.\n\n Note\n ----\n For consistency, a *slide* is considered a single-well *plate*, i.e. a\n *plate* with only one *well*.\n\n Attributes\n ----------\n acquisitions: List[tmlib.model.Acqusition]\n acquisitions belonging to the plate\n wells: List[tmlib.model.Well]\n wells belonging to the plate\n '''\n\n __tablename__ = 'plates'\n\n __table_args__ = (UniqueConstraint('name'), )\n\n #: str: name given by user\n name = Column(String, index=True)\n\n #: str: description provided by user\n description = Column(Text)\n\n #: int: ID of parent experiment\n experiment_id = Column(\n Integer,\n ForeignKey('experiment.id', onupdate='CASCADE', ondelete='CASCADE'),\n index=True\n )\n\n #: tmlib.models.experiment.Experiment: parent experiment\n experiment = relationship(\n 'Experiment',\n backref=backref('plates', cascade='all, delete-orphan')\n )\n\n def __init__(self, name, experiment_id, description=''):\n '''\n Parameters\n ----------\n name: str\n name of the plate\n experiment_id: int\n ID of the parent\n :class:`Experiment `\n description: str, optional\n description of the plate\n '''\n self.name = name\n self.description = description\n self.experiment_id = experiment_id\n\n @hybrid_property\n def location(self):\n '''str: location were the plate is stored'''\n if self._location is None:\n if self.id is None:\n raise AttributeError(\n 'Plate \"%s\" doesn\\'t have an entry in the database yet. '\n 'Therefore, its location cannot be determined.' % self.name\n )\n self._location = os.path.join(\n self.experiment.plates_location,\n PLATE_LOCATION_FORMAT.format(id=self.id)\n )\n if not os.path.exists(self._location):\n logger.debug(\n 'create location for plate \"%s\": %s',\n self.name, self._location\n )\n os.mkdir(self._location)\n return self._location\n\n @location.setter\n def location(self, path_to_files):\n self._location = path_to_files\n\n @autocreate_directory_property\n def acquisitions_location(self):\n '''str: location where acquisitions are stored'''\n return os.path.join(self.location, 'acquisitions')\n\n @property\n def status(self):\n '''str: upload status based on the status of acquisitions'''\n child_status = set([f.status for f in self.acquisitions])\n if fus.UPLOADING in child_status:\n return fus.UPLOADING\n elif len(child_status) == 1 and fus.COMPLETE in child_status:\n return fus.COMPLETE\n else:\n return fus.WAITING\n\n @property\n def n_wells(self):\n '''int: number of wells in the plate'''\n return self.experiment.plate_format\n\n @property\n def dimensions(self):\n '''Tuple[int]: number of wells in the plate along the vertical and\n horizontal axis, i.e. the number of rows and columns\n '''\n return determine_plate_dimensions(self.n_wells)\n\n @cached_property\n def well_grid(self):\n '''numpy.ndarray[int]: IDs of wells arranged according to their\n relative position within the plate\n '''\n height, width = self.dimensions\n grid = np.zeros((height, width), dtype=int)\n for w in self.wells:\n grid[w.y, w.x] = w.id\n return grid\n\n @cached_property\n def _empty_wells_coordinates(self):\n '''List[Tuple[int]]: y, x coordinates of each empty wells in the plate,\n i.e. wells that were not imaged\n '''\n empty_wells = np.where(np.logical_not(self.well_grid))\n coordinates = list()\n for i in xrange(len(empty_wells[0])):\n coordinates.append((empty_wells[0][i], empty_wells[1][i]))\n return coordinates\n\n @cached_property\n def nonempty_columns(self):\n '''List[int]: indices of nonempty columns, i.e. columns of the plate\n where at least one well has been imaged\n '''\n nonempty_columns = list()\n for i in xrange(self.well_grid.shape[1]):\n if any(self.well_grid[:, i]):\n nonempty_columns.append(i)\n return nonempty_columns\n\n @cached_property\n def nonempty_rows(self):\n '''List[int]: indices of nonempty rows, i.e. rows of the plate where\n at least one well has been imaged\n '''\n nonempty_rows = list()\n for i in xrange(self.well_grid.shape[0]):\n if any(self.well_grid[i, :]):\n nonempty_rows.append(i)\n return nonempty_rows\n\n @cached_property\n def _well_image_size(self):\n well_dims = np.array([w._image_size for w in self.wells])\n if not(len(np.unique(well_dims[:, 0])) == 1 and\n len(np.unique(well_dims[:, 1])) == 1):\n logger.debug('wells don\\'t have the same size')\n logger.debug('use size of largest well')\n return (np.max(well_dims[:, 0]), np.max(well_dims[:, 1]))\n\n @cached_property\n def image_size(self):\n '''Tuple[int]: number of pixels along the vertical and horizontal axis\n '''\n offset = self.experiment.well_spacer_size\n rows = len(self.nonempty_rows)\n cols = len(self.nonempty_columns)\n return (\n rows * self._well_image_size[0] + offset * (rows - 1),\n cols * self._well_image_size[1] + offset * (cols - 1)\n )\n\n @cached_property\n def offset(self):\n '''Tuple[int]: *y*, *x* coordinate of the top, left corner of the plate\n relative to the layer overview at the maximum zoom level\n '''\n logger.debug('calculate plate offset')\n experiment = self.experiment\n plate_coordinate = zip(*np.where(experiment.plate_grid == self.id))[0]\n y_offset = (\n # Plates above the plate\n plate_coordinate[0] * self.image_size[0] +\n # Gaps introduced between plates\n plate_coordinate[0] * experiment.plate_spacer_size\n )\n x_offset = (\n # Plates left of the plate\n plate_coordinate[1] * self.image_size[1] +\n # Gaps introduced between plates\n plate_coordinate[1] * experiment.plate_spacer_size\n )\n return (y_offset, x_offset)\n\n def __repr__(self):\n return '' % (self.id, self.name)\n", "repo_name": "TissueMAPS/TissueMAPS", "sub_path": "tmlibrary/tmlib/models/plate.py", "file_name": "plate.py", "file_ext": "py", "file_size_in_byte": 8600, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "tmlib.models.base.DirectoryModel", "line_number": 53, "usage_type": "name"}, {"api_name": "tmlib.models.base.DateMixIn", "line_number": 53, "usage_type": "name"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 79, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 82, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 82, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 85, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 85, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 88, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 89, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 90, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.relationship", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.backref", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 134, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.hybrid.hybrid_property", "line_number": 116, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path", "line_number": 144, "usage_type": "attribute"}, {"api_name": "tmlib.utils.autocreate_directory_property", "line_number": 141, "usage_type": "name"}, {"api_name": "tmlib.models.status.FileUploadStatus.UPLOADING", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tmlib.models.status.FileUploadStatus", "line_number": 150, "usage_type": "name"}, {"api_name": "tmlib.models.status.FileUploadStatus.UPLOADING", "line_number": 151, "usage_type": "attribute"}, {"api_name": "tmlib.models.status.FileUploadStatus", "line_number": 151, "usage_type": "name"}, {"api_name": "tmlib.models.status.FileUploadStatus.COMPLETE", "line_number": 152, "usage_type": "attribute"}, {"api_name": "tmlib.models.status.FileUploadStatus", "line_number": 152, "usage_type": "name"}, {"api_name": "tmlib.models.status.FileUploadStatus.COMPLETE", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tmlib.models.status.FileUploadStatus", "line_number": 153, "usage_type": "name"}, {"api_name": "tmlib.models.status.FileUploadStatus.WAITING", "line_number": 155, "usage_type": "attribute"}, {"api_name": "tmlib.models.status.FileUploadStatus", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 175, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 169, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.logical_not", "line_number": 185, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 180, "usage_type": "name"}, {"api_name": "cached_property.cached_property", "line_number": 191, "usage_type": "name"}, {"api_name": "cached_property.cached_property", "line_number": 202, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 220, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 213, "usage_type": "name"}, {"api_name": "cached_property.cached_property", "line_number": 222, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 241, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 234, "usage_type": "name"}, {"api_name": "tmlib.models.utils.remove_location_upon_delete", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "11105572140", "text": "import json\nimport os\nimport requests\nfrom dotenv import load_dotenv\nfrom fastapi import Body, FastAPI\nfrom fastapi.encoders import jsonable_encoder\nfrom models.models import Member, Dependant\n\n\nload_dotenv()\n\n\napp = FastAPI()\n\n\nBASE_URL = os.environ.get(\"BASE_URL\")\nTEST_TOKEN = os.environ.get(\"TEST_TOKEN\")\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/members/{member_id}\")\nasync def get_member_by_id(member_id: int):\n get_member_url = f\"{BASE_URL}members/{str(member_id)}\"\n response = requests.get(get_member_url, headers=get_headers())\n return build_response(response)\n\n\n@app.post(\"/members\")\nasync def create_member(data: Member):\n print(data)\n create_member_url = f\"{BASE_URL}members\"\n dict_data = jsonable_encoder(data)\n response = requests.post(\n create_member_url,\n data=json.dumps(dict_data),\n headers=get_headers()\n )\n return build_response(response)\n\n\n@app.post(\"/members/{external_member_id}\")\nasync def create_member(data: Dependant, external_member_id: int):\n print(data)\n create_member_url = f\"{BASE_URL}members/{str(external_member_id)}\"\n dict_data = jsonable_encoder(data)\n response = requests.post(\n create_member_url,\n data=json.dumps(dict_data),\n headers=get_headers()\n )\n return build_response(response)\n\n\n###################\n## Utils methods ##\n###################\n\n\ndef get_headers():\n return {\n \"Authorization\": TEST_TOKEN,\n \"content-type\": \"application/json\",\n }\n\n\ndef build_response(response):\n return {\n \"api_response\": response.json()\n }\n", "repo_name": "jesus9529/third-party-integration-challenge", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 13, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 17, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "models.models.Member", "line_number": 33, "usage_type": "name"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}, {"api_name": "models.models.Dependant", "line_number": 46, "usage_type": "name"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "30801623022", "text": "from bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport requests\n\nFILE_NAME = \"pars.csv\"\nr = requests.get('https://www.perekrestok.ru/cat/c/27/zavtrak')\nprint(r.status_code)\nperek_soup = bs(r.text, 'html.parser')\nresult_list = {'category': [],'title': [], 'price': []}\n\ncategory_names = perek_soup.find_all('h2', attrs={\"class\":\"catalog-content-group__title\"})\nprint(len(category_names)) # категория под индексом 3 не имеет одну цену\nfor el in range(len(category_names)):\n if el == 3:\n continue\n patent_category = category_names[el].find_parent('div')\n item_category_names = patent_category.find_all('div', class_='product-card__title')\n item_category_prices = patent_category.find_all('div', class_='price-new')\n for item in range(len(item_category_names)):\n result_list['category'].append(category_names[el].text)\n result_list['title'].append(item_category_names[item].text)\n result_list['price'].append(item_category_prices[item].text)\n\ndf = pd.DataFrame(data=result_list)\ndf.to_csv(FILE_NAME)\n\n", "repo_name": "SergeyMorin/testovoe25.04.2023", "sub_path": "next test/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "28530969899", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom uncertainties import ufloat\r\nfrom uncertainties import unumpy as unp\r\nfrom scipy.optimize import curve_fit\r\n\r\nU, I = np.genfromtxt(\"C02_Kennlinie.txt\", unpack=True) # U in Volt I in milli Ampere\r\n\r\nprint(\"C02---------------------------\")\r\n\r\nI = I * 1000 # I in micro Ampere\r\n\r\n# Ausgabe der Werte mit Gradient\r\nprint(\"U, gradient\")\r\n\r\ngradient = np.ones(len(U)) *(-1)\r\nfor index in np.arange(1,len(I)):\r\n gradient[index-1] = (I[index] - I[index-1]) / (U[index] - U[index-1])\r\n\r\nfor index in np.arange(0,len(I)):\r\n print(index, \" &\", U[index], \"\\t&\", I[index], \" &\", gradient[index], \"\\\\\\\\\")\r\n\r\n\r\n# Curve fit für die Raumkoordinaten\r\n\r\nU_raum= U[:11]\r\nI_raum= I[:11]\r\n\r\ndef Raum(U, a, b ):\r\n return a * U ** b\r\n\r\nparams_Raum, covariance_matrix = curve_fit(Raum, U_raum, I_raum, p0=(200, 1.5))\r\n\r\nerrors = np.sqrt(np.diag(covariance_matrix))\r\nprint(\"Parameter: \")\r\nfor name, value, error in zip('ab', params_Raum, errors):\r\n print(f'{name} = {value:.3f} \\pm {error:.3f}')\r\n\r\n# #curve fit für Sättigungsstrom\r\n# U_saet = U[11:]\r\n# I_saet = I[11:]\r\n\r\n# def saet(U, a, b, c, d):\r\n# return np.log(- a) * ( -1* (U - d) ) + np.log(c)\r\n\r\n# params_saet, covariance_matrix_saet = curve_fit(saet, np.log(U_saet), np.log(I_saet) ) # p0=(1,1,600,U[11])\r\n\r\n# errors_saet = np.sqrt(np.diag(covariance_matrix_saet))\r\n# print(\"Parameter: \")\r\n# for name, value, error in zip('abcd', params_saet, errors_saet):\r\n# print(f'{name} = {value:.3f} ± {error:.3f}')\r\n\r\nI_s = 600\r\n\r\n\r\n# Plot\r\n\r\nplt.figure(constrained_layout=True)\r\nxplot_Raum = np.linspace(0, U[11] + 1)\r\nxplot_saet = np.linspace(0, U[-1] + 1)\r\nplt.plot(U, I, \"x\", label=\"Messwerte\")\r\nplt.plot(xplot_Raum,Raum(xplot_Raum,*params_Raum), label=\"Raumkurve\")\r\n# plt.plot(xplot_saet,np.exp(saet(xplot_saet,*params_saet)), label=\"Sättigungskurve\")\r\nplt.plot(xplot_saet,I_s * np.ones(len(xplot_saet)), \"k--\", label=\"$I_s$\") \r\nplt.xlabel(\"$U_A / \\\\unit{{\\\\volt}}$\")\r\nplt.ylabel(\"$I / \\\\unit{{\\\\micro\\\\ampere}}$\")\r\nplt.grid()\r\nplt.legend()\r\n\r\nplt.savefig(\"build/C02_plot.pdf\")", "repo_name": "Enno-Enno/PraktikumWS2223", "sub_path": "12_v504/C02_Kennlinie.py", "file_name": "C02_Kennlinie.py", "file_ext": "py", "file_size_in_byte": 2085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.genfromtxt", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "27617392095", "text": "import secrets\nimport ecdsa\nimport hashlib\nimport base58\n\n\"\"\"\n秘密鍵->公開鍵->アドレスの順で生成するクラス\nPython で Bitcoin(仮想通貨)アドレスを作る【python3】\nhttps://qiita.com/kaz1shuu2/items/921dcbebb7fbea14f085\n\"\"\"\n\n\nclass KeyAddressGenerator:\n def __init__(self):\n self.private_key, self.public_key, self.address = self.generate()\n\n def generate(self):\n p = 2 ** 256 - 2 ** 32 - 2 ** 9 - 2 ** 8 - 2 ** 7 - 2 ** 6 - 2 ** 4 - 1 # 素数らしい\n private_key = self.new_private_key(p)\n public_key = self.new_public_key(private_key)\n address = self.new_address(bytes.fromhex(\"00\"), public_key)\n return private_key, public_key, address\n\n def new_private_key(self, p):\n private_key = secrets.randbelow(p)\n private_key = format(private_key, 'x')\n print(\"PrivateKey = \" + private_key)\n return private_key\n\n def new_public_key(self, private_key):\n bin_private_key = bytes.fromhex(private_key)\n signing_key = ecdsa.SigningKey.from_string(bin_private_key, curve=ecdsa.SECP256k1)\n verifying_key = signing_key.get_verifying_key()\n public_key = bytes.fromhex(\"04\") + verifying_key.to_string()\n public_key = public_key.hex()\n # print(\"PublicKey = \" + public_key)\n return public_key\n\n def new_address(self, version, public_key):\n ba = bytes.fromhex(public_key) # 16進数公開鍵をバイト列に変換\n digest = hashlib.sha256(ba).digest() # baをsha256でハッシュ化してできたものを文字列にして代入\n new_digest = hashlib.new('ripemd160')\n new_digest.update(digest)\n public_key_hash = new_digest.digest()\n\n pre_address = version + public_key_hash\n address = hashlib.sha256(pre_address).digest()\n address = hashlib.sha256(address).digest()\n checksum = address[:4]\n address = pre_address + checksum\n address = base58.b58encode(address)\n address = address.decode()\n # print(\"Address = \" + address)\n return address\n\n def get_list(self):\n return self.private_key, self.public_key, self.address\n\n\npri_key, pub_key, addr = KeyAddressGenerator().get_list()\nsk = ecdsa.SigningKey.from_string(bytes.fromhex(pri_key), curve=ecdsa.SECP256k1)\nvk = ecdsa.VerifyingKey.from_string(bytes.fromhex(pub_key.replace('04', '', 1)), curve=ecdsa.SECP256k1)\nsignature = sk.sign(b\"message\")\nassert vk.verify(signature, b\"message\")\n\n", "repo_name": "y-okochi/semiB", "sub_path": "testProject/ecdsa_generator.py", "file_name": "ecdsa_generator.py", "file_ext": "py", "file_size_in_byte": 2505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "secrets.randbelow", "line_number": 25, "usage_type": "call"}, {"api_name": "ecdsa.SigningKey.from_string", "line_number": 32, "usage_type": "call"}, {"api_name": "ecdsa.SigningKey", "line_number": 32, "usage_type": "attribute"}, {"api_name": "ecdsa.SECP256k1", "line_number": 32, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 41, "usage_type": "call"}, {"api_name": "hashlib.new", "line_number": 42, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 47, "usage_type": "call"}, {"api_name": "hashlib.sha256", "line_number": 48, "usage_type": "call"}, {"api_name": "base58.b58encode", "line_number": 51, "usage_type": "call"}, {"api_name": "ecdsa.SigningKey.from_string", "line_number": 61, "usage_type": "call"}, {"api_name": "ecdsa.SigningKey", "line_number": 61, "usage_type": "attribute"}, {"api_name": "ecdsa.SECP256k1", "line_number": 61, "usage_type": "attribute"}, {"api_name": "ecdsa.VerifyingKey.from_string", "line_number": 62, "usage_type": "call"}, {"api_name": "ecdsa.VerifyingKey", "line_number": 62, "usage_type": "attribute"}, {"api_name": "ecdsa.SECP256k1", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "41208019919", "text": "import numpy as np\nimport argparse\nimport os\nimport torch\nimport pickle\n\nfrom Experiment_Engine.util import check_attribute_else_default, Config # utilities\nfrom Experiment_Engine import Acrobot, MountainCar, PuddleWorld # environments\nfrom Experiment_Engine import Agent, RegPerLayerNeuralNetwork # agent and function approximator\n\nNUMBER_OF_EPISODES = 500\n\n\nclass Experiment:\n\n def __init__(self, experiment_parameters, run_results_dir):\n self.run_results_dir = run_results_dir\n self.learning_rate = check_attribute_else_default(exp_parameters, 'lr', 0.001)\n self.environment_name = check_attribute_else_default(experiment_parameters, 'env', 'mountain_car',\n choices=['mountain_car', 'acrobot', 'puddle_world'])\n self.layer1_factor = check_attribute_else_default(experiment_parameters, 'layer1_factor', 0.01,\n choices=[0.0, 0.1, 0.01, 0.001])\n self.layer2_factor = check_attribute_else_default(experiment_parameters, 'layer2_factor', 0.01,\n choices=[0.0, 0.1, 0.01, 0.001])\n self.olayer_factor = check_attribute_else_default(experiment_parameters, 'olayer_factor', 0.01,\n choices=[0.0, 0.1, 0.01, 0.001])\n self.reg = check_attribute_else_default(experiment_parameters, 'reg', 'l1', choices=['l1', 'l2'])\n self.verbose = experiment_parameters.verbose\n\n environment_dictionary = {\n 'mountain_car': {'class': MountainCar, 'state_dims': 2, 'num_actions': 3},\n 'acrobot': {'class': Acrobot, 'state_dims': 4, 'num_actions': 3},\n 'puddle_world': {'class': PuddleWorld, 'state_dims': 2, 'num_actions': 4}\n }\n self.config = Config()\n self.config.store_summary = True\n self.summary = {}\n\n \"\"\" Parameters for the Environment \"\"\"\n self.config.max_actions = 2000\n self.config.norm_state = True\n\n \"\"\" Parameters for the Function Approximator \"\"\"\n self.config.state_dims = environment_dictionary[self.environment_name]['state_dims']\n self.config.num_actions = environment_dictionary[self.environment_name]['num_actions']\n self.config.gamma = 1.0\n self.config.epsilon = 0.1\n self.config.optim = \"adam\"\n self.config.lr = self.learning_rate\n self.config.reg_method = self.reg\n self.config.reg_factor = (self.layer1_factor, self.layer2_factor, self.olayer_factor)\n\n self.env = environment_dictionary[self.environment_name]['class'](config=self.config, summary=self.summary)\n self.fa = RegPerLayerNeuralNetwork(config=self.config, summary=self.summary)\n self.rl_agent = Agent(environment=self.env, function_approximator=self.fa, config=self.config,\n summary=self.summary)\n\n def run(self):\n saving_times = [50, 100, 250, 500]\n for i in range(NUMBER_OF_EPISODES):\n episode_number = i + 1\n self.rl_agent.train(1)\n if self.verbose and (((i+1) % 10 == 0) or i == 0):\n print(\"Episode Number:\", episode_number)\n print('\\tThe cumulative reward was:', self.summary['return_per_episode'][-1])\n print('\\tThe cumulative loss was:', np.round(self.summary['cumulative_loss_per_episode'][-1], 2))\n if episode_number in saving_times:\n self.save_network_params(suffix=str(episode_number)+'episodes')\n self.save_run_summary()\n\n def save_network_params(self, suffix='50episodes'):\n params_path = os.path.join(self.run_results_dir, 'network_weights_' + suffix + '.pt')\n torch.save(self.fa.net.state_dict(), params_path)\n\n def save_run_summary(self):\n summary_path = os.path.join(self.run_results_dir, 'summary.p')\n with open(summary_path, mode='wb') as summary_file:\n pickle.dump(self.summary, summary_file)\n config_path = os.path.join(self.run_results_dir, 'config.p')\n with open(config_path, mode='wb') as config_file:\n pickle.dump(self.config, config_file)\n\n\nif __name__ == '__main__':\n \"\"\" Experiment Parameters \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-run_number', action='store', default=1, type=int)\n parser.add_argument('-env', action='store', default='mountain_car', type=str, choices=['mountain_car', 'acrobot',\n 'puddle_world'])\n parser.add_argument('-lr', action='store', default=0.001, type=np.float64, choices=[0.004, 0.001, 0.00025])\n parser.add_argument('-reg', action='store', default='l1', type=str)\n parser.add_argument('-layer1_factor', action='store', default=0.01, type=np.float64,\n choices=[0.0, 0.1, 0.01, 0.001])\n parser.add_argument('-layer2_factor', action='store', default=0.01, type=np.float64,\n choices=[0.0, 0.1, 0.01, 0.001])\n parser.add_argument('-olayer_factor', action='store', default=0.01, type=np.float64,\n choices=[0.0, 0.1, 0.01, 0.001])\n parser.add_argument('-verbose', action='store_true')\n exp_parameters = parser.parse_args()\n\n \"\"\" General results directory \"\"\"\n results_parent_directory = os.path.join(os.getcwd(), 'Results')\n if not os.path.exists(results_parent_directory):\n os.makedirs(results_parent_directory)\n \"\"\" Directory specific to the environment and the method \"\"\"\n environment_result_directory = os.path.join(results_parent_directory, exp_parameters.env,\n exp_parameters.reg + '_regularization')\n if not os.path.exists(environment_result_directory):\n os.makedirs(environment_result_directory)\n \"\"\" Directory specific to the parameters\"\"\"\n parameters_name = 'LearningRate' + str(exp_parameters.lr) \\\n + '_Layer1Factor' + str(exp_parameters.layer1_factor) \\\n + '_Layer2Factor' + str(exp_parameters.layer2_factor) \\\n + '_OutputLayerFactor' + str(exp_parameters.olayer_factor)\n parameters_result_directory = os.path.join(environment_result_directory, parameters_name)\n if not os.path.exists(parameters_result_directory):\n os.makedirs(parameters_result_directory)\n \"\"\" Directory specific to the run \"\"\"\n agent_id = 'agent_' + str(exp_parameters.run_number)\n run_results_directory = os.path.join(parameters_result_directory, agent_id)\n os.makedirs(run_results_directory)\n\n \"\"\" Setting up and running the experiment \"\"\"\n experiment = Experiment(experiment_parameters=exp_parameters, run_results_dir=run_results_directory)\n experiment.run()\n\n# Parameter Sweep:\n# learning rate = {0.004, 0.001, 0.00025}\n# reg_factor_layer1 = {0, 0.1, 0.01, 0.001}\n# reg_factor_layer2 = {0, 0.1, 0.01, 0.001}\n# olayer_factor = {0, 0.1, 0.01, 0.001}\n", "repo_name": "JFernando4/Online_SparseRepresentations", "sub_path": "Regularization_Experiment.py", "file_name": "Regularization_Experiment.py", "file_ext": "py", "file_size_in_byte": 7085, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 18, "usage_type": "call"}, {"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 19, "usage_type": "call"}, {"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 21, "usage_type": "call"}, {"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 23, "usage_type": "call"}, {"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 25, "usage_type": "call"}, {"api_name": "Experiment_Engine.util.check_attribute_else_default", "line_number": 27, "usage_type": "call"}, {"api_name": "Experiment_Engine.MountainCar", "line_number": 31, "usage_type": "name"}, {"api_name": "Experiment_Engine.Acrobot", "line_number": 32, "usage_type": "name"}, {"api_name": "Experiment_Engine.PuddleWorld", "line_number": 33, "usage_type": "name"}, {"api_name": "Experiment_Engine.util.Config", "line_number": 35, "usage_type": "call"}, {"api_name": "Experiment_Engine.RegPerLayerNeuralNetwork", "line_number": 54, "usage_type": "call"}, {"api_name": "Experiment_Engine.Agent", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 81, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 92, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 96, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "42141557340", "text": "from django.shortcuts import render,redirect,get_object_or_404\nfrom .forms import PatientForm\nfrom .models import Patient\nfrom django.utils import timezone\n# Create your views here.\n\ndef index(request):\n\treturn render(request,'index.html')\n\t\ndef create(request):\n form = PatientForm(request.POST or None)\n if request.method==\"POST\":\n if form.is_valid():\n form.save()\n return redirect('list')\n return render(request, 'create.html',{\"form\":form})\n\n else:\n form = PatientForm()\n return render(request, 'create.html',{\"form\":form})\n \ndef detail(request,pk):\n patient=get_object_or_404(Patient,pk=pk)\n return render(request,'detail.html',{'patient':patient})\n\ndef update(request,pk):\n patient = get_object_or_404(Patient, pk=pk)\n if request.method == \"POST\":\n form = PatientForm(request.POST or None, instance=patient)\n if form.is_valid():\n form.save()\n return redirect('list')\n else:\n form = PatientForm(instance=patient)\n\n return render(request, 'update.html', {\"form\": form,\"patient\":patient})\n\n\ndef delete(request,pk):\n patient = get_object_or_404(Patient, pk=pk)\n if request.method == \"POST\":\n patient.delete()\n # messages.info(request, \"1 item Deleted !\")\n return redirect('list')\n else:\n return render(request,'delete.html', {'patient': patient})\n \n \ndef patient_list(request):\n patients=Patient.objects.all()\n context={\n \"patients\":patients,\n\n }\n return render(request,'list.html',context=context)", "repo_name": "mukesh-gupta/Hospital-Patient-Form", "sub_path": "Patient_Form/patient/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1597, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "forms.PatientForm", "line_number": 11, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 15, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.PatientForm", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 23, "usage_type": "call"}, {"api_name": "models.Patient", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Patient", "line_number": 27, "usage_type": "argument"}, {"api_name": "forms.PatientForm", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 32, "usage_type": "call"}, {"api_name": "forms.PatientForm", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 40, "usage_type": "call"}, {"api_name": "models.Patient", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Patient.objects.all", "line_number": 50, "usage_type": "call"}, {"api_name": "models.Patient.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.Patient", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "44554629907", "text": "from flask_login import LoginManager, login_user, current_user, login_required, logout_user\nfrom flask import Flask, render_template, redirect, abort, request\nfrom data import db_session\nfrom data.users import User\nfrom data.jobs import Jobs\nfrom data.departments import Departament\nfrom login import LoginForm\nfrom add_job import Add_Job\nfrom register import RegisterForm\nfrom add_departament import Add_Departament\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@login_manager.user_loader\ndef load_user(user_id):\n return session.query(User).get(user_id)\n\n@app.route('/')\ndef index():\n jobs = session.query(Jobs)\n t_l = {}\n for job in jobs:\n user = session.query(User).filter(User.id == job.team_leader).first()\n if user:\n t_l[job.id] = ' '.join([user.name, user.surname])\n else:\n t_l[job.id] = ''\n return render_template('index.html', jobs=jobs, t_l=t_l)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n user = session.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect(\"/\")\n return render_template('login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('login.html', title='Авторизация', form=form)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n@app.route('/add_job', methods=['GET', 'POST'])\n@login_required\ndef add_job():\n form = Add_Job()\n if form.validate_on_submit():\n session = db_session.create_session()\n job = Jobs()\n a = [user.id for user in session.query(User)]\n if form.team_leader.data not in a:\n return render_template('add_job.html',\n message=\"Такого пользователя не сущестсвует\",\n form=form)\n job.team_leader = form.team_leader.data\n job.job = form.job.data\n job.work_size = form.work_size.data\n job.collaborators = form.collaborators.data\n if form.start_date.data:\n job.start_date = datetime.strptime(form.start_date.data, '%d.%m.%Y').date()\n if form.end_date.data:\n job.end_date = datetime.strptime(form.end_date.data, '%d.%m.%Y').date()\n job.is_finished = form.is_finished.data\n session.add(job)\n session.commit()\n return redirect('/')\n return render_template('add_job.html', title='Добавление работы', form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.r_password.data:\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n if session.query(User).filter(User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n user = User(\n surname=form.surname.data,\n name=form.name.data,\n age=form.age.data,\n position=form.position.data,\n speciality=form.speciality.data,\n address=form.address.data,\n email=form.email.data)\n user.set_password(form.password.data)\n session.add(user)\n session.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n@app.route(\"/job_delete/\")\n@login_required\ndef job_delete(id):\n session = db_session.create_session()\n job = session.query(Jobs).filter(Jobs.id == id, (Jobs.team_leader == current_user.id) | (current_user.id == 1)).first()\n if job:\n session.delete(job)\n session.commit()\n else:\n abort(404)\n return redirect('/')\n\n\n@app.route('/edit_job/', methods=[\"POST\", \"GET\"])\n@login_required\ndef edit_job(id):\n form = Add_Job()\n if request.method == 'GET':\n session = db_session.create_session()\n job = session.query(Jobs).filter(Jobs.id == id, (Jobs.team_leader == current_user.id) | (current_user.id == 1)).first()\n if job:\n form.team_leader.data = job.team_leader\n form.job.data = job.job\n form.work_size.data = job.work_size\n form.collaborators.data = job.collaborators\n form.start_date.data = job.start_date\n form.end_date.data = job.end_date\n form.is_finished.data = job.is_finished\n else:\n abort(404)\n if form.validate_on_submit():\n session = db_session.create_session()\n job = session.query(Jobs).filter(Jobs.id == id, (Jobs.team_leader == current_user.id) | (current_user.id == 1)).first()\n if job:\n job.team_leader = form.team_leader.data\n job.job = form.job.data\n job.work_size = form.work_size.data\n job.collaborators = form.collaborators.data\n if form.start_date.data:\n try:\n job.start_date = datetime.strptime(form.start_date.data, '%d.%m.%Y %H:%M:%S').date()\n except ValueError:\n try:\n job.start_date = datetime.strptime(form.start_date.data, '%Y-%m-%d %H:%M:%S').date()\n except ValueError:\n return render_template('add_job.html', title='Редактирование новости', form=form, message=\"Неверная дата\")\n if form.end_date.data:\n try:\n job.end_date = datetime.strptime(form.end_date.data, '%d.%m.%Y %H:%M:%S')\n except ValueError:\n try:\n job.end_date = datetime.strptime(form.end_date.data, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return render_template('add_job.html', title='Редактирование новости', form=form, message=\"Неверная дата\")\n job.is_finished = form.is_finished.data\n session.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('add_job.html', title='Редактирование новости', form=form)\n\n\n@app.route('/departaments')\ndef departaments():\n deps = session.query(Departament)\n chief = {}\n for dep in deps:\n user = session.query(User).filter(User.id == dep.chief).first()\n if user:\n chief[dep.id] = ' '.join([user.name, user.surname])\n else:\n chief[dep.id] = ''\n return render_template('departaments.html', departaments=deps, chief=chief)\n\n\n@app.route('/add_departament', methods=['GET', 'POST'])\n@login_required\ndef add_departament():\n form = Add_Departament()\n if form.validate_on_submit():\n session = db_session.create_session()\n dep = Departament()\n a = [user.id for user in session.query(User)]\n if form.chief.data not in a:\n return render_template('add_departament.html',\n message=\"Такого пользователя не сущестсвует\",\n form=form)\n dep.chief = form.chief.data\n dep.title = form.title.data\n dep.members = form.members.data\n dep.email = form.email.data\n session.add(dep)\n session.commit()\n return redirect('/departaments')\n return render_template('add_departament.html', title='Добавление департамента', form=form)\n\n\n@app.route(\"/departament_delete/\")\n@login_required\ndef departament_delete(id):\n session = db_session.create_session()\n dep = session.query(Departament).filter(Departament.id == id, (Departament.chief == current_user.id) | (current_user.id == 1)).first()\n if dep:\n session.delete(dep)\n session.commit()\n else:\n abort(404)\n return redirect('/departaments')\n\n\n@app.route('/edit_departament/', methods=[\"POST\", \"GET\"])\n@login_required\ndef edit_departament(id):\n form = Add_Departament()\n if request.method == 'GET':\n session = db_session.create_session()\n dep = session.query(Departament).filter(Departament.id == id, (Departament.chief == current_user.id) | (current_user.id == 1)).first()\n if dep:\n form.title.data = dep.title\n form.chief.data = dep.chief\n form.members.data = dep.members\n form.email.data = dep.email\n else:\n abort(404)\n if form.validate_on_submit():\n session = db_session.create_session()\n dep = session.query(Departament).filter(Departament.id == id, (Departament.chief == current_user.id) | (current_user.id == 1)).first()\n if dep:\n dep.chief = form.chief.data\n dep.title = form.title.data\n dep.members = form.members.data\n dep.email = form.email.data\n session.commit()\n return redirect('/departaments')\n else:\n abort(404)\n return render_template('add_departament.html', title='Редактирование департамента', form=form)\n\n\nif __name__ == \"__main__\":\n db_session.global_init(\"db/users.sqlite\")\n session = db_session.create_session()\n app.run()", "repo_name": "danilakom/Test", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9975, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 16, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 21, "usage_type": "argument"}, {"api_name": "data.jobs.Jobs", "line_number": 25, "usage_type": "argument"}, {"api_name": "data.users.User", "line_number": 28, "usage_type": "argument"}, {"api_name": "data.users.User.id", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "login.LoginForm", "line_number": 38, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 40, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 40, "usage_type": "name"}, {"api_name": "data.users.User", "line_number": 41, "usage_type": "argument"}, {"api_name": "data.users.User.email", "line_number": 41, "usage_type": "attribute"}, {"api_name": "flask_login.login_user", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 48, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 52, "usage_type": "name"}, {"api_name": "add_job.Add_Job", "line_number": 61, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 63, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 63, "usage_type": "name"}, {"api_name": "data.jobs.Jobs", "line_number": 64, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 65, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 59, "usage_type": "name"}, {"api_name": "register.RegisterForm", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 90, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 93, "usage_type": "argument"}, {"api_name": "data.users.User.email", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 94, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 108, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 109, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 115, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 115, "usage_type": "name"}, {"api_name": "data.jobs.Jobs", "line_number": 116, "usage_type": "argument"}, {"api_name": "data.jobs.Jobs.id", "line_number": 116, "usage_type": "attribute"}, {"api_name": "data.jobs.Jobs.team_leader", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 121, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 122, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 113, "usage_type": "name"}, {"api_name": "add_job.Add_Job", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 129, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 129, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 130, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 130, "usage_type": "name"}, {"api_name": "data.jobs.Jobs", "line_number": 131, "usage_type": "argument"}, {"api_name": "data.jobs.Jobs.id", "line_number": 131, "usage_type": "attribute"}, {"api_name": "data.jobs.Jobs.team_leader", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 141, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 143, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 143, "usage_type": "name"}, {"api_name": "data.jobs.Jobs", "line_number": 144, "usage_type": "argument"}, {"api_name": "data.jobs.Jobs.id", "line_number": 144, "usage_type": "attribute"}, {"api_name": "data.jobs.Jobs.team_leader", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 144, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 152, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 152, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 155, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 155, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 157, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 160, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 165, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 170, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 171, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 126, "usage_type": "name"}, {"api_name": "data.departments.Departament", "line_number": 176, "usage_type": "argument"}, {"api_name": "data.users.User", "line_number": 179, "usage_type": "argument"}, {"api_name": "data.users.User.id", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 184, "usage_type": "call"}, {"api_name": "add_departament.Add_Departament", "line_number": 190, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 192, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 192, "usage_type": "name"}, {"api_name": "data.departments.Departament", "line_number": 193, "usage_type": "call"}, {"api_name": "data.users.User", "line_number": 194, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 206, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 188, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 212, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 212, "usage_type": "name"}, {"api_name": "data.departments.Departament", "line_number": 213, "usage_type": "argument"}, {"api_name": "data.departments.Departament.id", "line_number": 213, "usage_type": "attribute"}, {"api_name": "data.departments.Departament.chief", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 213, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 213, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 218, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 219, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 210, "usage_type": "name"}, {"api_name": "add_departament.Add_Departament", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 226, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 226, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 227, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 227, "usage_type": "name"}, {"api_name": "data.departments.Departament", "line_number": 228, "usage_type": "argument"}, {"api_name": "data.departments.Departament.id", "line_number": 228, "usage_type": "attribute"}, {"api_name": "data.departments.Departament.chief", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 228, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 235, "usage_type": "call"}, {"api_name": "data.db_session.create_session", "line_number": 237, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 237, "usage_type": "name"}, {"api_name": "data.departments.Departament", "line_number": 238, "usage_type": "argument"}, {"api_name": "data.departments.Departament.id", "line_number": 238, "usage_type": "attribute"}, {"api_name": "data.departments.Departament.chief", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask_login.current_user.id", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 248, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 223, "usage_type": "name"}, {"api_name": "data.db_session.global_init", "line_number": 252, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 252, "usage_type": "name"}, {"api_name": "data.db_session.create_session", "line_number": 253, "usage_type": "call"}, {"api_name": "data.db_session", "line_number": 253, "usage_type": "name"}]} +{"seq_id": "36624031264", "text": "import h5py\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nhf = h5py.File('../data/glove/glove-100-angular.hdf5', 'r')\r\nfeat_dim = 100\r\nbatch_size = 128\r\ntopk = 100\r\n\r\ntrain_x = np.array(hf.get('train')).astype(np.float32)\r\ntrain_x_ = tf.transpose(tf.linalg.normalize(train_x, axis=1)[0])\r\n\r\ntest_x = np.array(hf.get('test')).astype(np.float32)\r\ntest_x_ = tf.transpose(tf.linalg.normalize(test_x, axis=1)[0])\r\n\r\n\r\ndistances = np.array(hf.get('distances'))\r\nneighbors = np.array(hf.get('neighbors'))\r\n\r\nx = tf.placeholder(tf.float32,shape=[None,feat_dim])\r\nx_normalized = tf.linalg.normalize(x,axis=1)[0]\r\n\r\ndist = tf.matmul(x_normalized, train_x_)\r\n\r\nnns = tf.nn.top_k(dist, k=topk, sorted=True)\r\n\r\ntf_config = tf.ConfigProto()\r\ntf_config.gpu_options.allow_growth = True\r\nsess = tf.Session(config=tf_config)\r\n\r\n\r\ntrain_neighbors = np.zeros([train_x_.shape[1],topk],dtype=np.int32)\r\ntrain_distances = np.zeros([train_x_.shape[1],topk],dtype=np.float32)\r\n\r\n\r\nnum_batches = train_x_.shape[1]//batch_size\r\nif train_x_.shape[1]%batch_size!=0:\r\n num_batches += 1\r\n\r\nfor i in range(num_batches):\r\n start_idx = batch_size*i\r\n end_idx = min(start_idx+batch_size, train_x_.shape[1])\r\n temp = sess.run(nns, feed_dict={x: train_x[start_idx:end_idx]})\r\n train_neighbors[start_idx:end_idx] = temp[1]\r\n train_distances[start_idx:end_idx] = temp[0]\r\n\r\nfw = open('train.txt','w')\r\n\r\nfor i in range(train_neighbors.shape[0]):\r\n line = ','.join([str(train_neighbors[i][j])+':'+str(train_distances[i][j]) for j in range(topk)])+' '\r\n line += ' '.join([str(j)+':'+str(train_x[i][j]) for j in range(feat_dim)])\r\n nothing = fw.write(line+'\\n')\r\n\r\nfw.close()\r\n\r\n\r\nfw = open('test.txt','w')\r\n\r\nfor i in range(neighbors.shape[0]):\r\n line = ','.join([str(neighbors[i][j])+':'+str(distances[i][j]) for j in range(topk)])+' '\r\n line += ' '.join([str(j)+':'+str(test_x[i][j]) for j in range(feat_dim)])\r\n nothing = fw.write(line+'\\n')\r\n\r\nfw.close()\r\n\r\n\r\nfor n_cluster in n_cluster_l:\r\n print('n_cluster {}'.format(n_cluster))\r\n opt.n_clusters = n_cluster\r\n opt.n_class = n_cluster\r\n for height in height_l:\r\n run_kmkahip(height, opt, dataset, queryset, neighbors)", "repo_name": "Tharun24/IRLI", "sub_path": "src/extract_h5.py", "file_name": "extract_h5.py", "file_ext": "py", "file_size_in_byte": 2189, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "h5py.File", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.linalg.normalize", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tensorflow.transpose", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.linalg.normalize", "line_number": 14, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.linalg.normalize", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.linalg", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.nn.top_k", "line_number": 25, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 33, "usage_type": "attribute"}]} +{"seq_id": "22686355700", "text": "'''\nfilename:\nspeller_p300.py\n\ntype:\nscript\n\nDetect p300 potential for spelling purposes.\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils_p300 import _create_letter_target\n\n############################################\n# #\n# INIT SETTINGS #\n# #\n############################################\n\n# how many samples per minute is acquired\nsampling_rate = 240\nsampling_frequency = 1/float(sampling_rate)\n\n# which ranges to store (in milliseconds)\nstore_range = ((250, 500), (500, 750))\n# store_range = ((0, 400), (400, 800))\n\n# 6x6 organisation => 12 lines (6 rows, 6 cols)\nn_lines = 12\n\n# how many times line appear (is highlighted, etc.)\nline_i_times = 15\n\n# single line highlighting interval\n# line_interval = 0.015625\nline_interval = 0.100\n\n# pause (lag) between lines - ISI - inter stimulus interval\n# line_lag = 0.0625\nline_lag = 0.075\n\n# select the electrode\nchannel = 10\n\n\n############################################\n# #\n# GET DATA FROM FILE #\n# #\n############################################\n'''\n If it were real-time the data would come directly from eeg.\n For the simulation purpose it has to be read from the file.\n'''\n\nfrom scipy.io import loadmat\n\n\nmatfile = loadmat('data/AAS010R01.mat')\n\ndata = matfile['signal']\nrow_col = matfile['StimulusCode'].T[0].astype('int')-1\ntarget = matfile['StimulusType'].T[0].astype('bool')\n\nletter_target = _create_letter_target(\n row_col, sampling_rate, store_range[1][1]\n )\n\n# how many letters was presented during one session\nl_letters = np.unique(letter_target).shape[0]-1\n\n\n############################################\n# #\n# OBJECTS CREATION #\n# #\n############################################\n\nfrom p300_module import P300Manager\n\n# create brain for our speller\np300 = P300Manager(\n sampling_rate, store_range,\n l_letters, n_lines, line_i_times,\n line_interval, line_lag,\n detection_method='min'\n )\n\n\n############################################\n# #\n# DETECTING P300 ONLINE #\n# #\n############################################\n'''\nthis function iterates x time in this simulator\nas it does in the openbci\n'''\nfrom time import sleep\n\n\ndef detect_p300(sample):\n\n # get sample form the first channel (index '0')\n smp = sample.channel_data[channel]\n # smp = sample.channel_data.mean()\n\n # which letter did the subject look at\n letter = sample.aux_data[0]\n\n # which line (row or column) has been presented\n line = sample.aux_data[1]\n\n # put the sample to the storage\n p300.remember(smp, letter, line)\n\n\nfrom utils_p300 import OpenBCISample\n\n# iterate trough EEG data samples read form the file\nfor sample_num in range(len(data)):\n # create sample using openbci-like class\n sample = OpenBCISample(\n 0, data[sample_num],\n [letter_target[sample_num], row_col[sample_num], 0]\n )\n\n sleep(sampling_frequency/100.)\n\n # stoing p300 information, classifying letters\n detect_p300(sample)\n\n\nproper = [\n np.unique(row_col[np.logical_and(target == 1, letter_target == i)])\n for i in range(l_letters)\n ]\n\nprint('\\n\\nIt should be:')\nfor i in range(len(proper)):\n print(p300.letter_matrix[proper[i][1]-6][proper[i][0]])\n\n\nzzz = p300.storage\n\n\nprint(proper)\nprint('')\n\npredicted = np.zeros(np.shape(proper))\n\nfor l in range(l_letters):\n per = [\n zzz[l, i, ..., 0, :].mean() /\n zzz[l, i, ..., 1, :].mean()\n for i in range(12)\n ]\n min = [\n zzz[l, i, ..., 0, :].mean()-zzz[l, i, ..., 1, :].mean()\n for i in range(12)\n ]\n mean_0 = [zzz[l, i, ..., 0, :].mean() for i in range(12)]\n print('per: [%s, %s]' % (np.argmax(per[:6]), np.argmax(per[6:])+6))\n print('min: [%s, %s]' % (np.argmax(min[:6]), np.argmax(min[6:])+6))\n print('mean_0:[%s, %s]' % (np.argmax(mean_0[:6]), np.argmax(mean_0[6:])+6))\n print('')\n\n clf = min\n\n row_pred = np.argmax(clf[6:])+6\n col_pred = np.argmax(clf[:6])\n\n predicted[l][0] = col_pred\n predicted[l][1] = row_pred\n\naccuracy_line = (proper == predicted).sum()/float((proper == predicted).size)\naccuracy_letter = np.sum(\n [np.logical_and(i[0], i[1]) for i in (predicted == proper)]\n )/float((proper == predicted).shape[0])\nprint('----')\nprint('Line accuracy: %0.2f' % accuracy_line)\nprint('Letter accuracy: %0.2f' % accuracy_letter)\n\n# import ipdb\n# ipdb.set_trace()\n\n################################\n# PLOT P300 #\n################################\ncnt = 0\nfor i in range(l_letters):\n for j in range(12):\n plt.subplot(12, l_letters, j*l_letters+1+i)\n plt.plot(\n np.arange(\n store_range[0][0], store_range[1][1], 1000/float(sampling_rate)\n ),\n np.append(\n zzz[i, j, ..., 0, :].mean(axis=0),\n zzz[i, j, ..., 1, :].mean(axis=0)\n )\n )\n plt.axvline(x=300, c='r')\nplt.show()\n", "repo_name": "cereberus/chartula", "sub_path": "python/p300/speller_p300.py", "file_name": "speller_p300.py", "file_ext": "py", "file_size_in_byte": 5295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scipy.io.loadmat", "line_number": 61, "usage_type": "call"}, {"api_name": "utils_p300._create_letter_target", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 72, "usage_type": "call"}, {"api_name": "p300_module.P300Manager", "line_number": 84, "usage_type": "call"}, {"api_name": "utils_p300.OpenBCISample", "line_number": 125, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}]} +{"seq_id": "20421475231", "text": "import unittest\nfrom yocb.chain_struct.AcTransaction import AcTransaction\nfrom datetime import datetime\nfrom yocb.chain_struct.Block import Block\nimport json\n\n\nclass AcTest(unittest.TestCase):\n\n @unittest.skip('skip')\n def test_json(self):\n ac_transaction = AcTransaction.append_ac_transaction('0x121', str(datetime.now()), None, None, None, 5)\n self.assertEqual(ac_transaction.address, '0x121')\n p = json.dumps(obj=ac_transaction, default=AcTransaction.json_parse)\n print(p)\n l = json.loads(p, object_hook=AcTransaction.json_load)\n self.assertEqual(l.address, '0x121')\n print(l.__dict__)\n\n def test_obtain(self):\n ac_transaction = AcTransaction.append_ac_transaction('0x121', str(datetime.now()), None, None, None, 5)\n b = Block()\n b.ac_transactions.setdefault(ac_transaction.address,\n {ac_transaction.hash: json.dumps(ac_transaction, default=AcTransaction.json_parse)})\n status, output = AcTransaction.obtain_ac_transaction(ac_transaction.address, ac_transaction.hash, b)\n self.assertEqual(status, 200)\n print(output)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "LyAllence/YOCB", "sub_path": "unittest_yocb/chain_struct/unittest_yocb_ac_transaction.py", "file_name": "unittest_yocb_ac_transaction.py", "file_ext": "py", "file_size_in_byte": 1205, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.append_ac_transaction", "line_number": 12, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 12, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.json_parse", "line_number": 14, "usage_type": "attribute"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 14, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.json_load", "line_number": 16, "usage_type": "attribute"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 16, "usage_type": "name"}, {"api_name": "unittest.skip", "line_number": 10, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.append_ac_transaction", "line_number": 21, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "yocb.chain_struct.Block.Block", "line_number": 22, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.json_parse", "line_number": 24, "usage_type": "attribute"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 24, "usage_type": "name"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction.obtain_ac_transaction", "line_number": 25, "usage_type": "call"}, {"api_name": "yocb.chain_struct.AcTransaction.AcTransaction", "line_number": 25, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "15810844905", "text": "\"\"\"Add card orb and uses\n\nRevision ID: 5c488131a0ea\nRevises: ef9611d1b9a7\nCreate Date: 2022-05-25 21:33:54.966996\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5c488131a0ea'\ndown_revision = 'ef9611d1b9a7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('deck', sa.Column('card_orb', sa.Integer(), nullable=False, default=0))\n op.add_column('deck', sa.Column('card_num_uses', sa.Integer(), nullable=False, default=0))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('deck', 'card_num_uses')\n op.drop_column('deck', 'card_orb')\n # ### end Alembic commands ###\n", "repo_name": "scott-walker/roborally", "sub_path": "database/migrations/versions/5c488131a0ea_add_card_orb_and_uses.py", "file_name": "5c488131a0ea_add_card_orb_and_uses.py", "file_ext": "py", "file_size_in_byte": 811, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.add_column", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "38952678013", "text": "import pickle\nfrom datetime import date, datetime\n\nfrom arti import Artifact, View, read, write\nfrom arti.formats.pickle import Pickle\nfrom arti.internal.utils import named_temporary_file\nfrom arti.storage.local import LocalFile, LocalFilePartition\nfrom arti.views.python import Date, Datetime, Dict, Float, Int, Null, Str\n\n\ndef test_python_View() -> None:\n for val, view_class, python_type in [\n (\"\", Str, str),\n (1, Int, int),\n (1.0, Float, float),\n (None, Null, None),\n (date(1970, 1, 1), Date, date),\n (datetime(1970, 1, 1, 0), Datetime, datetime),\n ({\"a\": 1}, Dict, dict[str, int]),\n ]:\n view = View.from_annotation(python_type, mode=\"READWRITE\")\n assert isinstance(view, view_class)\n assert view.artifact_class is Artifact\n assert view.type == view.type_system.to_artigraph(python_type, hints={})\n\n test_format = Pickle()\n binary = pickle.dumps(val)\n with named_temporary_file(\"w+b\") as f:\n test_storage_partition = LocalFilePartition(path=f.name, storage=LocalFile())\n\n f.write(binary)\n f.seek(0)\n\n # read returns a list, matching the passed partitions\n data = read(\n type_=view.type,\n format=test_format,\n storage_partitions=(test_storage_partition,),\n view=view,\n )\n assert data == val\n\n f.truncate()\n write(\n data,\n type_=view.type,\n format=test_format,\n storage_partition=test_storage_partition,\n view=view,\n )\n assert f.read() == binary\n", "repo_name": "artigraph/artigraph", "sub_path": "tests/arti/views/test_python.py", "file_name": "test_python.py", "file_ext": "py", "file_size_in_byte": 1709, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 23, "dataset": "github-code", "pt": "52", "api": [{"api_name": "arti.views.python.Str", "line_number": 13, "usage_type": "name"}, {"api_name": "arti.views.python.Int", "line_number": 14, "usage_type": "name"}, {"api_name": "arti.views.python.Float", "line_number": 15, "usage_type": "name"}, {"api_name": "arti.views.python.Null", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 17, "usage_type": "call"}, {"api_name": "arti.views.python.Date", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "call"}, {"api_name": "arti.views.python.Datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "arti.views.python.Dict", "line_number": 19, "usage_type": "name"}, {"api_name": "arti.View.from_annotation", "line_number": 21, "usage_type": "call"}, {"api_name": "arti.View", "line_number": 21, "usage_type": "name"}, {"api_name": "arti.Artifact", "line_number": 23, "usage_type": "name"}, {"api_name": "arti.formats.pickle.Pickle", "line_number": 26, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 27, "usage_type": "call"}, {"api_name": "arti.internal.utils.named_temporary_file", "line_number": 28, "usage_type": "call"}, {"api_name": "arti.storage.local.LocalFilePartition", "line_number": 29, "usage_type": "call"}, {"api_name": "arti.storage.local.LocalFile", "line_number": 29, "usage_type": "call"}, {"api_name": "arti.read", "line_number": 35, "usage_type": "call"}, {"api_name": "arti.write", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "74800954403", "text": "#!/usr/bin/env python3\n\nimport tkinter as tk\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\nimport time\nimport threading\nimport logging\nimport webbrowser\n\nwith open('config.json', 'r') as json_file:\n config = json.load(json_file)\n\nUSER_AGENT = config['user_agent']\nSLEEP_INTERVAL = config['sleep_interval']\n\nLOG_EMOJIS = {'INFO': '➡️', 'ERROR': '❌', 'WARNING': '⚠️'}\n\nlogging.basicConfig(\n filename='news_scraper.log',\n level=logging.INFO,\n format='%(levelname)s: %(emoji)s [%(asctime)s] : %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n)\n\n\nclass Scraper:\n def __init__(self, urls, texts, sleep_interval=50):\n self.urls = urls\n self.texts = texts\n self.sleep_interval = sleep_interval\n self.data_list = []\n self.lock = threading.Lock()\n\n def scrape_website(self):\n while True:\n try:\n start_time = time.time()\n\n headers = {'user-agent': USER_AGENT}\n response = requests.get(self.urls, headers=headers)\n response.raise_for_status()\n\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n logging.info(\n f\"Connection successful. Time elapsed: {elapsed_time:.2f} seconds\",\n extra={'emoji': LOG_EMOJIS['INFO']}\n )\n\n soup = BeautifulSoup(response.content, 'html.parser')\n\n title_elements = soup.select(self.texts['title'])\n timestamp_elements = soup.select(self.texts['timestamp'])\n\n new_data_list = []\n for title_element, timestamp_element in zip(title_elements, timestamp_elements):\n info = title_element.text.strip()\n link_element = title_element.find('a')\n link = link_element['href'] if link_element and 'href' in link_element.attrs else \"No Link\"\n iso_timestamp = timestamp_element.get('data-est', '')\n timestamp = timestamp_element.text.strip() if timestamp_element else \"Unknown Timestamp\"\n new_data_list.append(\n {\"info\": info, \"link\": link, \"timestamp\": timestamp, \"iso_timestamp\": iso_timestamp})\n\n with self.lock:\n new_data_list.sort(key=lambda x: x['iso_timestamp'], reverse=True)\n self.data_list.clear()\n self.data_list.extend(new_data_list)\n logging.info(\n \"Data successfully scraped.\",\n extra={'emoji': LOG_EMOJIS['INFO']}\n )\n\n time.sleep(self.sleep_interval)\n\n except requests.exceptions.RequestException as request_error:\n logging.error(\n f'An error occurred during the request: {request_error}',\n exc_info=True,\n extra={'emoji': LOG_EMOJIS[\"ERROR\"]}\n )\n except Exception as error:\n logging.error(\n f'An unexpected error occurred: {error}',\n exc_info=True,\n extra={'emoji': LOG_EMOJIS[\"ERROR\"]}\n )\n\n def start_scraping(self):\n scraping_thread = threading.Thread(target=self.scrape_website)\n scraping_thread.daemon = True\n scraping_thread.start()\n\n\nclass UI:\n def __init__(self):\n self.root = tk.Tk()\n self.root.title(\"News Feed\")\n self.root.geometry(\"800x600\")\n self.root.resizable(True, True)\n\n self.text_widget = tk.Text(self.root, wrap=tk.WORD, font=(\"Helvetica\", 18))\n self.text_widget.pack(expand=True, fill=\"both\")\n\n self.scraper = None\n self.update_thread = None\n\n def start(self):\n self.scraper = Scraper(urls=config['urls']['url_1']['url'], texts=config['urls']['url_1']['texts'],\n sleep_interval=SLEEP_INTERVAL)\n self.scraper.start_scraping()\n\n self.update_thread = threading.Thread(target=self.update_news_feed)\n self.update_thread.daemon = True\n self.update_thread.start()\n\n self.root.mainloop()\n\n def update_news_feed(self):\n while True:\n with self.scraper.lock:\n self.text_widget.delete('1.0', tk.END)\n if self.scraper.data_list:\n for item in self.scraper.data_list:\n self.text_widget.insert(tk.END, f\"{item['info']}\\n{LOG_EMOJIS['INFO']} {item['timestamp']}\\n\\n\")\n else:\n logging.warning(\n \"No data available to display in the news feed.\",\n extra={'emoji': LOG_EMOJIS['WARNING']}\n )\n time.sleep(10)\n\n\nif __name__ == \"__main__\":\n ui = UI()\n ui.start()\n", "repo_name": "careerswitch/scrape_template", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4883, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 22, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 34, "usage_type": "call"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 72, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 77, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 86, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 100, "usage_type": "call"}, {"api_name": "tkinter.Text", "line_number": 105, "usage_type": "call"}, {"api_name": "tkinter.WORD", "line_number": 105, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 116, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 128, "usage_type": "attribute"}, {"api_name": "logging.warning", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "7132668006", "text": "# Use Selenium web-diver and Chrome to count how many times \"price target raised to $\" occurred.\nfrom selenium import webdriver\n\nsymbol = input(\"Enter your stock symbol: \")\nurl = \"https://thefly.com/news.php?symbol=\"+symbol\n# Setting \"options\" to suppress the DevTools debugging messages in the command prompt\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\ndriver = webdriver.Chrome(options=options)\ndriver.get(url)\n\nelements = driver.find_elements_by_partial_link_text(\"price target raised to\")\n\n# displaying the result in the command prompt\nprint(symbol + \" price target raised \" + str(len(elements)) + \" times \")\n\ndriver.close()\ndriver.quit()", "repo_name": "anush2072/PythonScripts", "sub_path": "SeleniumWordCounter.py", "file_name": "SeleniumWordCounter.py", "file_ext": "py", "file_size_in_byte": 705, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.ChromeOptions", "line_number": 7, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "70568666726", "text": "import matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport string\nimport time\n\nvocab = dict(zip(string.ascii_lowercase, range(1,27)))\nvocab[''] = 0\n\ntrain = open(\"words_random_train.txt\",\"r\",encoding = \"utf-8\").read().split(\"\\n\")\ntrain.remove(\"\")\ntrain_x = [i.split(\"\\t\")[1] for i in train]\ntrain_y = [i.split(\"\\t\")[0] for i in train]\n\ndev = open(\"words_random_dev.txt\",\"r\",encoding = \"utf-8\").read().split(\"\\n\")\ndev_x = [i.split(\"\\t\")[1] for i in dev]\ndev_y = [i.split(\"\\t\")[0] for i in dev]\n\nshort_line = ['á','é','í','ý','ú','ó']\nlittle_hook = ['ě','č','š','ž','ř']\ncircle = ['ů']\n\n\nlabel = []\n\nlittle_hook_cnt = 0\nshort_line_cnt = 0\ncircle_cnt = 0\nother_char_cnt = 0\n\nfor word in train_y:\n temp_lst = []\n word_lst = list(word)\n for char in word_lst:\n if char in short_line:\n temp_lst.append(1)\n short_line_cnt +=1\n elif char in little_hook:\n temp_lst.append(2)\n little_hook_cnt +=1\n elif char in circle:\n temp_lst.append(3)\n circle_cnt +=1\n else:\n temp_lst.append(0)\n other_char_cnt +=1\n \n label.append(temp_lst)\n \n## 0 = other characters; 1 = short_line ; 2 = little_hook; 3 = circle;##\n\n \ndev_label = []\n\nfor word in dev_y:\n temp_lst = []\n word_lst = list(word)\n for char in word_lst:\n if char in short_line:\n temp_lst.append(1)\n elif char in little_hook:\n temp_lst.append(2)\n elif char in circle:\n temp_lst.append(3)\n else:\n temp_lst.append(0)\n \n dev_label.append(temp_lst)\n \n\nfrom torch.utils.data import Dataset, DataLoader\n\nclass TaggingDataset(Dataset):\n def __init__(self, sentences, tag_sequences):\n self.sentences = sentences\n self.tag_sequences = tag_sequences\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, idx):\n sample = {\"char\": self.sentences[idx], \"class\": self.tag_sequences[idx]}\n return sample\n\ndef tagging_collate_fn(batch):\n tensors = []\n for instance in batch:\n sent_t = torch.tensor(instance[\"char\"])\n pos_t = torch.tensor(instance[\"class\"])\n tensors.append(torch.stack([sent_t, pos_t]))\n\n return torch.stack(tensors)\n\nrnn_x = []\nfor word in train_x:\n temp_lst = []\n word_lst = list(word)\n for char in word_lst:\n temp_lst.append(vocab[char])\n \n rnn_x.append(temp_lst)\n\ntrain_dataset = TaggingDataset(rnn_x, label)\ntrain_dataloader = DataLoader(train_dataset, batch_size=1, collate_fn=tagging_collate_fn)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nrnn_dev = []\nfor word in dev_x:\n temp_lst = []\n word_lst = list(word)\n for char in word_lst:\n temp_lst.append(vocab[char])\n \n rnn_dev.append(temp_lst)\n\ndev_dataset = TaggingDataset(rnn_dev, dev_label)\ndev_dataloader = DataLoader(dev_dataset, batch_size=1, collate_fn=tagging_collate_fn)\n\nfrom torch import nn\n\nclass LSTMModel(nn.Module):\n def __init__(self, hidden_dim=64, layers=2, dropout_val=0.1): \n super().__init__()\n self.embedding = nn.Embedding(27,10) #embedding size - 10\n self.lstm = nn.LSTM(10, hidden_dim, num_layers = layers, bidirectional = True)\n self.fc = nn.Linear(hidden_dim*2, hidden_dim)\n self.dropout = nn.Dropout(dropout_val)\n self.fc1 = nn.Linear(hidden_dim, 4)\n \n def forward(self, text):\n embedded = self.embedding(text)\n outputs, (hidden, cell) = self.lstm(embedded.view(len(text), 1, -1))\n predictions = self.fc(outputs.view(len(text), -1))\n predictions = self.dropout(predictions)\n predictions = self.fc1(predictions)\n return predictions\ndef accuracy(tag_scores,tag):\n _ , predicted_idx = torch.max(tag_scores, 1)\n return torch.sum(predicted_idx == tag).item()/len(tag)\n \ndef train(train_dataloader, loss, optimizer, model, device):\n model.train()\n train_total_acc = 0\n train_total_loss = 0\n \n for batch in train_dataloader: \n model.zero_grad()\n word = batch[0][0].to(device)\n label = batch [0][1].to(device)\n scores = model(word)\n \n loss_val = loss(scores, label)\n \n loss_val.backward()\n optimizer.step()\n\n train_total_loss += loss_val.item()\n \n train_total_acc += accuracy(scores, label)\n \n return train_total_loss, train_total_acc\n\ndef evaluate(dev_dataloader, loss, model, device):\n eval_total_loss = 0\n eval_total_acc = 0\n model.eval()\n for eval_batch in dev_dataloader:\n word = eval_batch[0][0].to(device)\n label = eval_batch [0][1].to(device)\n scores = model(word)\n\n loss_val_eval = loss(scores, label)\n eval_total_loss += loss_val_eval.item()\n \n eval_total_acc += accuracy(scores, label)\n \n return eval_total_loss, eval_total_acc \n\n\ndef rnnmodel():\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = LSTMModel().to(device)\n loss = nn.CrossEntropyLoss().to(device)\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n for epoch in range(10):\n print(\"Epoch : \",epoch+1)\n start_time = time.time()\n\n train_total_loss, train_total_acc = train(train_dataloader, loss, optimizer, model, device)\n eval_total_loss, eval_total_acc = evaluate(dev_dataloader, loss, model, device) \n\n print('Train : Total accuracy : ', train_total_acc/len(train_dataloader))\n print('Train : Loss : ',train_total_loss/len(train_dataloader))\n print('Dev : Total Accuracy : ',eval_total_acc/len(dev_dataloader))\n print('Dev : Total loss : ',eval_total_loss/len(dev_dataloader))\n print('Time taken per epoch : ',time.time() - start_time)\n print('________________________________________________________________________________________')\n \n return model", "repo_name": "gokulsg/Diacritization", "sub_path": "solution2.py", "file_name": "solution2.py", "file_ext": "py", "file_size_in_byte": 6000, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "string.ascii_lowercase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.utils.data.Dataset", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 183, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 186, "usage_type": "name"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "time.time", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "31766340408", "text": "import os\nimport re\nimport lxml\nimport lxml.html\nimport lxml.etree\nfrom cached_property import cached_property\nfrom .utils import flatten\nfrom .readers import JsonReader\nfrom .writers import JsonWriter\n\n\nclass Formats(object):\n\n '''\n Class for providing information on supported file formats.\n\n `TissueMAPS` supports most file formats supported by Bio-Formats.\n '''\n\n #: Some file formats require additional metadata files, which are not\n #: directly supported by Bio-Formats.\n #: For more information, please refer to\n #: :meth:`tmlib.metaconfig.default.configure_ome_metadata_from_additional_files`\n SUPPORT_FOR_ADDITIONAL_FILES = {'cellvoyager', 'visiview'}\n\n @property\n def _filename(self):\n location = os.path.dirname(os.path.abspath(__file__))\n self.__filename = os.path.join(location, 'formats',\n 'supported-formats.json')\n return self.__filename\n\n @cached_property\n def supported_formats(self):\n '''\n Returns\n -------\n Dict[str, List[str]]\n names and file extensions of supported formats as key-value pairs\n '''\n with JsonReader(self._filename) as f:\n supported_formats = f.read()\n supported_formats.update({u'Visiview': [u'.tiff']})\n supported_formats.update({u'Visiview (STK)': [u'.stk', u'.nd']})\n return supported_formats\n\n @property\n def supported_extensions(self):\n '''\n Returns\n -------\n Set[str]\n file extensions of supported formats\n '''\n all_extensions = flatten(self.supported_formats.values())\n return set(all_extensions)\n\n def extract_supported_formats(self, input_filename, support_level=0):\n '''\n Extract names and extensions of supported formats from XML or HTML file\n and save them as key-value pairs in a JSON file.\n\n The XML file can be generated via the Bio-Formats command line tool\n `formatlist `_::\n\n formatlist -xml > supported-formats.xml\n\n The HTML file can be downloaded from the Bio-Formats website, which lists\n `supported formats `_\n together with the level of support for each format::\n\n wget http://www.openmicroscopy.org/site/support/bio-formats5.1/supported-formats.html\n\n Parameters\n ----------\n input_filename: str\n absolute path to the XML or HTML file, that specifies the supported\n formats\n support_level: uint, optional\n minimum level of support for reading pixel and metadata,\n where 0 is no support, 1 is \"poor\" and 5 is \"outstanding\" support\n (Note: support information is only available for the HTML file)\n\n Raises\n ------\n OSError\n when `filename` does not exist\n '''\n if not os.path.exists(input_filename):\n raise OSError('File does not exist: %s' % input_filename)\n\n if input_filename.endswith('xml'):\n tree = lxml.etree.parse(input_filename)\n format_elements = tree.xpath('.//format')\n extensions = list()\n names = list()\n for fe in format_elements:\n names.append(fe.attrib['name'])\n children_elements = fe.getchildren()\n if children_elements:\n ext = [c.attrib['value'] for c in children_elements\n if c.attrib['name'] == 'extensions'\n and c.attrib['value']]\n if ext:\n ext = ext[0].split('|')\n ext = ['.%s' % e for e in ext]\n extensions.append(ext)\n\n elif input_filename.endswith('html'):\n tree = lxml.html.parse(input_filename)\n method_elements = tree.xpath('.//table/thead/tr/th/img/@alt')\n methods = [re.search(r'header-(\\w+).png', me).group(1)\n for me in method_elements]\n pixel_index = methods.index('pixels')\n metadata_index = methods.index('metadata')\n format_elements = tree.xpath('.//div[@id=\"supported-formats\"]/table/tbody/tr')\n extensions = list()\n names = list()\n for fe in format_elements:\n support_level_elements = fe.xpath('td/img/@alt')\n support_level = [int(re.search(r'^(\\d)', sle).group(1))\n if re.search(r'^(\\d)', sle) else 0\n for sle in support_level_elements]\n name_elements = fe.xpath('td/a/em/text()')\n pixel_support = support_level[pixel_index]\n metadata_support = support_level[metadata_index]\n if pixel_support >= 3 and metadata_support >= 3:\n extensions_element = fe.xpath('td/text()')\n if extensions_element:\n if len(extensions_element[0]) > 1:\n extensions.append(extensions_element[0].split(', '))\n names.append(name_elements[0])\n\n with JsonWriter() as writer:\n writer.write(self.filename, dict(zip(names, extensions)))\n", "repo_name": "TissueMAPS/TissueMAPS", "sub_path": "tmlibrary/tmlib/formats.py", "file_name": "formats.py", "file_ext": "py", "file_size_in_byte": 5433, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "readers.JsonReader", "line_number": 41, "usage_type": "call"}, {"api_name": "cached_property.cached_property", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.flatten", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "lxml.etree.parse", "line_number": 93, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 93, "usage_type": "attribute"}, {"api_name": "lxml.html.parse", "line_number": 110, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 110, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 112, "usage_type": "call"}, {"api_name": "re.search", "line_number": 122, "usage_type": "call"}, {"api_name": "re.search", "line_number": 121, "usage_type": "call"}, {"api_name": "writers.JsonWriter", "line_number": 134, "usage_type": "call"}]} +{"seq_id": "24056337732", "text": "import logging\n\nfrom storytime.character import Character\nfrom storytime.story import Story\nfrom storytime.time_period import TimePeriod\nfrom storytime.video import create_video\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n)\n\nlogger = logging.getLogger(__name__)\n\nstory_types = {\n \"fairy tale\": \"Fairy tales are short and interesting tales, featuring \"\n \"folkloric fantasy characters.\",\n \"science fiction comedy\": \"Science fiction comedy is a genre of \"\n \"science fiction that combines science fiction with comedy.\",\n}\n\n\ndef generate_story(\n story_type: str,\n with_images: bool,\n) -> Story:\n \"\"\"Generates a story from the archetype.\"\"\"\n if story_type == \"fairy tale\":\n time_period = TimePeriod(era=\"Medieval\")\n characters = {\n \"protagonist\": generate_character(\"princess\"),\n \"antagonist\": generate_character(\"witch\"),\n \"deuteragonist\": generate_character(\"prince\"),\n \"confidante\": generate_character(\"wizard\"),\n \"love interest\": generate_character(\"knight\"),\n \"foil\": generate_character(\"queen\"),\n \"tertiary 1\": generate_character(\"king\"),\n }\n story = Story(\n target_audience=\"children\",\n genre=\"fantasy\",\n themes=[\"friendship\", \"dreams\"],\n narrative_structure=\"Five-Act\",\n time_period=time_period,\n characters=characters,\n area=\"medieval\",\n with_images=with_images,\n medium=\"digital art\",\n style=\"pixar\",\n )\n elif story_type == \"science fiction comedy\":\n time_period = TimePeriod(era=\"Future\")\n characters = {\n \"protagonist\": generate_character(\"ensign\"),\n \"antagonist\": generate_character(\"captain\"),\n \"deuteragonist\": generate_character(\"engineer\"),\n \"confidante\": generate_character(\"chief medical officer\"),\n \"love interest\": generate_character(\"lieutenant\"),\n \"foil\": generate_character(\"technician\"),\n \"tertiary 1\": generate_character(\"crewman\"),\n }\n story = Story(\n target_audience=\"teenagers\",\n genre=\"science fiction comedy\",\n themes=[\"coming of age\", \"artificial intelligence\"],\n narrative_structure=\"Five-Act\",\n time_period=time_period,\n characters=characters,\n area=\"space\",\n with_images=with_images,\n medium=\"digital art\",\n style=\"photorealistic\",\n )\n else:\n logger.error(\n f\"Story type: {story_type} not found. Generating random \" f\"story.\"\n )\n story = Story()\n return story\n\n\ndef generate_character(character_type: str) -> Character:\n \"\"\"Generates a character from the archetype.\"\"\"\n if character_type == \"princess\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Female\",\n age=14,\n occupation=\"princess\",\n )\n elif character_type == \"prince\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Male\",\n age=10,\n occupation=\"prince\",\n )\n elif character_type == \"king\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Male\",\n age=40,\n occupation=\"king\",\n )\n elif character_type == \"queen\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Female\",\n age=35,\n occupation=\"queen\",\n )\n elif character_type == \"wizard\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Male\",\n age=62,\n occupation=\"wizard\",\n )\n elif character_type == \"witch\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Female\",\n age=66,\n occupation=\"witch\",\n )\n elif character_type == \"knight\":\n character = Character(\n era=\"Medieval\",\n ethnicity=\"old-english\",\n gender=\"Male\",\n age=25,\n occupation=\"knight\",\n )\n elif character_type == \"ensign\":\n character = Character(\n era=\"Future\",\n ethnicity=\"spanish\",\n gender=\"Male\",\n age=25,\n occupation=\"ensign\",\n )\n elif character_type == \"captain\":\n character = Character(\n era=\"Future\",\n ethnicity=\"german\",\n gender=\"Male\",\n age=35,\n occupation=\"captain\",\n )\n elif character_type == \"engineer\":\n character = Character(\n era=\"Future\",\n ethnicity=\"hindi\",\n gender=\"Female\",\n age=26,\n occupation=\"engineer\",\n )\n elif character_type == \"chief medical officer\":\n character = Character(\n era=\"Future\",\n ethnicity=\"korean\",\n gender=\"Female\",\n age=40,\n occupation=\"chief medical officer\",\n )\n elif character_type == \"lieutenant\":\n character = Character(\n era=\"Future\",\n ethnicity=\"swahili\",\n gender=\"Female\",\n age=27,\n occupation=\"lieutenant\",\n )\n elif character_type == \"technician\":\n character = Character(\n era=\"Future\",\n ethnicity=\"english\",\n gender=\"Male\",\n age=24,\n occupation=\"technician\",\n )\n elif character_type == \"crewman\":\n character = Character(\n era=\"Future\",\n ethnicity=\"japanese\",\n gender=\"Male\",\n age=32,\n occupation=\"crewman\",\n )\n else:\n logger.error(\n f\"Character type: {character_type} not found. Generating \"\n f\"random character.\"\n )\n character = Character()\n return character\n\n\nif __name__ == \"__main__\":\n fairy_tale = generate_story(\n \"science fiction comedy\",\n with_images=True,\n )\n fairy_tale.save_as_json()\n fairy_tale.download_image_set()\n fairy_tale.add_narration()\n create_video(fairy_tale)\n print(fairy_tale)\n", "repo_name": "myrontuttle/storytime", "sub_path": "storytime/archetypes.py", "file_name": "archetypes.py", "file_ext": "py", "file_size_in_byte": 6478, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "storytime.time_period.TimePeriod", "line_number": 29, "usage_type": "call"}, {"api_name": "storytime.story.Story", "line_number": 39, "usage_type": "call"}, {"api_name": "storytime.time_period.TimePeriod", "line_number": 52, "usage_type": "call"}, {"api_name": "storytime.story.Story", "line_number": 62, "usage_type": "call"}, {"api_name": "storytime.story.Story", "line_number": 78, "usage_type": "call"}, {"api_name": "storytime.story.Story", "line_number": 26, "usage_type": "name"}, {"api_name": "storytime.character.Character", "line_number": 85, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 93, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 101, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 109, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 117, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 125, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 133, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 141, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 149, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 157, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 165, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 173, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 181, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 189, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 201, "usage_type": "call"}, {"api_name": "storytime.character.Character", "line_number": 82, "usage_type": "name"}, {"api_name": "storytime.video.create_video", "line_number": 213, "usage_type": "call"}]} +{"seq_id": "33996885630", "text": "\"\"\"TDE classifiers - numba methods.\"\"\"\n\n__author__ = [\"MatthewMiddlehurst\"]\n\nfrom sktime.utils.numba.njit import njit\nfrom sktime.utils.validation._dependencies import _check_soft_dependencies\n\nif _check_soft_dependencies(\"numba\", severity=\"none\"):\n from numba import types\n\n\n@njit(fastmath=True, cache=True)\ndef _histogram_intersection_dict(first, second):\n sim = 0\n for word, val_a in first.items():\n val_b = second.get(word, types.uint32(0))\n sim += min(val_a, val_b)\n return sim\n", "repo_name": "sktime/sktime", "sub_path": "sktime/classification/dictionary_based/_tde_numba.py", "file_name": "_tde_numba.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7028, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sktime.utils.validation._dependencies._check_soft_dependencies", "line_number": 8, "usage_type": "call"}, {"api_name": "numba.types.uint32", "line_number": 16, "usage_type": "call"}, {"api_name": "numba.types", "line_number": 16, "usage_type": "name"}, {"api_name": "sktime.utils.numba.njit.njit", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "34653996318", "text": "import argparse\nimport mutagen.id3\nimport mutagen.mp3\nimport rainwave_tools.utils\n\n\ndef log(m):\n print(m)\n\n\nTAG_SPEC = {'album': 'TALB', 'apic': 'APIC', 'art': 'APIC', 'artist': 'TPE1', 'artist2': 'TPE2', 'bpm': 'TBPM',\n 'comm': 'COMM', 'comment': 'COMM', 'composer': 'TCOM', 'disc': 'TPOS', 'encoder': 'TSSE', 'genre': 'TCON',\n 'isrc': 'TSRC', 'lyric': 'USLT', 'popm': 'POPM', 'priv': 'PRIV', 'private': 'PRIV', 'rva2': 'RVA2',\n 'talb': 'TALB', 'tbpm': 'TBPM', 'tcmp': 'TCMP', 'tcom': 'TCOM', 'tcon': 'TCON', 'tcop': 'TCOP',\n 'tdrc': 'TDRC', 'tdrl': 'TDRL', 'tdtg': 'TDTG', 'tenc': 'TENC', 'text': 'TXXX', 'tflt': 'TFLT',\n 'tit1': 'TIT1', 'tit2': 'TIT2', 'tit3': 'TIT3', 'title': 'TIT2', 'tmed': 'TMED', 'toal': 'TOAL',\n 'tope': 'TOPE', 'tpe1': 'TPE1', 'tpe2': 'TPE2', 'tpos': 'TPOS', 'tpub': 'TPUB', 'track': 'TRCK',\n 'trck': 'TRCK', 'tsrc': 'TSRC', 'tsse': 'TSSE', 'tsst': 'TSST', 'txxx': 'TXXX', 'ufid': 'UFID',\n 'uslt': 'USLT', 'wcom': 'WCOM', 'woaf': 'WOAF', 'woar': 'WOAR', 'www': 'WXXX', 'wxxx': 'WXXX',\n 'year': 'TDRC'}\n\n\ndef tag_drop(args):\n for mp3 in rainwave_tools.utils.get_mp3s(args.path):\n try:\n _md = mutagen.id3.ID3(str(mp3))\n except mutagen.id3.ID3NoHeaderError:\n _md = mutagen.id3.ID3()\n _tag = TAG_SPEC.get(args.tag, args.tag)\n _md.delall(_tag)\n try:\n _md.save()\n except IOError as _ioe:\n log(f'ERROR : {_ioe}')\n continue\n log(f'{mp3} : dropped all tags of type {args.tag!r}')\n\n\ndef tag_dump(args):\n for mp3 in rainwave_tools.utils.get_mp3s(args.path):\n _md = mutagen.id3.ID3(str(mp3))\n log(_md.pprint())\n log('---------')\n\n\ndef tag_set(args):\n for mp3 in rainwave_tools.utils.get_mp3s(args.path):\n try:\n _md = mutagen.id3.ID3(str(mp3))\n except mutagen.id3.ID3NoHeaderError:\n _md = mutagen.id3.ID3()\n _tag = TAG_SPEC.get(args.tag, args.tag)\n if _tag in ['COMM', 'TALB', 'TCON', 'TDRC', 'TIT2', 'TPE1', 'TPOS', 'TRCK']:\n _md.delall(_tag)\n tag_class = getattr(mutagen.id3, _tag)\n _md.add(tag_class(encoding=3, text=[args.value]))\n elif _tag == 'WXXX':\n _md.delall(_tag)\n _md.add(mutagen.id3.WXXX(encoding=0, url=args.value))\n _md.save(str(mp3))\n log(f'{mp3}: {args.tag} set to {args.value!r}')\n\n\ndef tag_show(args):\n for mp3 in rainwave_tools.utils.get_mp3s(args.path):\n _audio = mutagen.mp3.MP3(str(mp3))\n log(f'file : {mp3}')\n log(f'length : {int(_audio.info.length)} seconds')\n _md = mutagen.id3.ID3(str(mp3))\n\n for _frame in _md.getall('TALB'):\n for _text in _frame.text:\n log(f'album : {_text}')\n\n for _frame in _md.getall('TIT2'):\n for _text in _frame:\n log(f'title : {_text}')\n\n for _frame in _md.getall('TPE1'):\n for _text in _frame.text:\n log(f'artist : {_text}')\n\n for _frame in _md.getall('TCON'):\n for _text in _frame:\n log(f'genre : {_text}')\n\n for _frame in _md.getall('TRCK'):\n for _text in _frame:\n log(f'track : {_text}')\n\n for _frame in _md.getall('TPOS'):\n for _text in _frame:\n log(f'disc : {_text}')\n\n for _frame in _md.getall('WXXX'):\n log(f'www : {_frame.url}')\n\n for _frame in _md.getall('COMM'):\n for _text in _frame:\n log(f'comment : {_text}')\n\n for _frame in _md.getall('TDRC'):\n for _text in _frame:\n log(f'year : {_text}')\n log('---------')\n\n\ndef parse_args():\n ap = argparse.ArgumentParser(description='View and edit ID3 tags on MP3 files.')\n\n sp_desc = 'Specify one of the following commands. To get help on an individual command, use \\'rwtag -h\\'.'\n sp = ap.add_subparsers(title='Available commands', description=sp_desc, dest='command')\n sp.required = True\n\n ps_drop = sp.add_parser('drop', description='Remove a tag from one or more MP3 files.', aliases=['remove', 'rm'])\n ps_drop.add_argument('tag', help='The name of the tag to remove.')\n ps_drop.add_argument('path', nargs='+', help=rainwave_tools.utils.path_help)\n ps_drop.set_defaults(func=tag_drop)\n\n ps_dump = sp.add_parser('dump', description='Show all tags on one or more MP3 files.')\n ps_dump.add_argument('path', nargs='+', help=rainwave_tools.utils.path_help)\n ps_dump.set_defaults(func=tag_dump)\n\n ps_set = sp.add_parser('set', description='Set a tag to a certain value on one or more MP3 files.')\n ps_set.add_argument('tag', help='The name of the tag to set.')\n ps_set.add_argument('value', help='The value to set for the tag.')\n ps_set.add_argument('path', nargs='+', help=rainwave_tools.utils.path_help)\n ps_set.set_defaults(func=tag_set)\n\n ps_show = sp.add_parser('show', description='Show only tags that Rainwave cares about on one or more MP3 files.')\n ps_show.add_argument('path', nargs='+', help=rainwave_tools.utils.path_help)\n ps_show.set_defaults(func=tag_show)\n\n return ap.parse_args()\n\n\ndef main():\n args = parse_args()\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "williamjacksn/rainwave-tools", "sub_path": "rainwave_tools/rwtag.py", "file_name": "rwtag.py", "file_ext": "py", "file_size_in_byte": 5387, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rainwave_tools.utils.utils.get_mp3s", "line_number": 24, "usage_type": "call"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 24, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 24, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 26, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 26, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 26, "usage_type": "name"}, {"api_name": "mutagen.id3.id3", "line_number": 27, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 27, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 28, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 28, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils.get_mp3s", "line_number": 40, "usage_type": "call"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 40, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 40, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 41, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 41, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 41, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils.get_mp3s", "line_number": 47, "usage_type": "call"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 47, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 49, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 49, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 49, "usage_type": "name"}, {"api_name": "mutagen.id3.id3", "line_number": 50, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 50, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 51, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 51, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 51, "usage_type": "name"}, {"api_name": "mutagen.id3.id3", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 55, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.WXXX", "line_number": 59, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 59, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 59, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils.get_mp3s", "line_number": 65, "usage_type": "call"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 65, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 65, "usage_type": "name"}, {"api_name": "mutagen.id3.mp3.MP3", "line_number": 66, "usage_type": "call"}, {"api_name": "mutagen.id3.mp3", "line_number": 66, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 66, "usage_type": "name"}, {"api_name": "mutagen.id3.id3.ID3", "line_number": 69, "usage_type": "call"}, {"api_name": "mutagen.id3.id3", "line_number": 69, "usage_type": "attribute"}, {"api_name": "mutagen.id3", "line_number": 69, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 109, "usage_type": "call"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 117, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 117, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 121, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 121, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 127, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 127, "usage_type": "name"}, {"api_name": "rainwave_tools.utils.utils", "line_number": 131, "usage_type": "attribute"}, {"api_name": "rainwave_tools.utils", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "10837556764", "text": "import cv2, os\nimport dlib\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\n\n\nclass Query():\n\n def __init__(self, frame):\n self.frame = frame\n self.size = self.frame.shape\n\n def open(self):\n cv2.putText(self.frame, '----- Press \"s\" to start -----', (int(self.size[1]/2-200), self.size[0]-25), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.frame, '----- Press \"q\" to quit -----', (int(self.size[1]/2-200), self.size[0]-5), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n return self.frame\n\n def pose_estimation(self, detector, predictor, count=0, if_record=False):\n\n origin_frame = self.frame.copy()\n cv2.putText(self.frame, '----- Press \"v\" to record -----', (int(self.size[1]/2-200), self.size[0]-25), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.frame, '----- Press \"q\" to quit -----', (int(self.size[1]/2-200), self.size[0]-5), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n\n # Camera internals\n size = self.frame.shape\n focal_length = size[1]\n center = (size[1]/2, size[0]/2)\n camera_matrix = np.array(\n [[focal_length, 0, center[0]],\n [0, focal_length, center[1]],\n [0, 0, 1]], dtype = \"double\"\n )\n\n # Find 68 points from dlib\n ps = []\n rects = detector(self.frame, 0)\n for i in range(len(rects)):\n landmarks = np.matrix([[p.x, p.y] for p in predictor(self.frame,rects[i]).parts()])\n for idx, point in enumerate(landmarks):\n\n # 68 points\n pos = (point[0, 0], point[0, 1])\n ps.append(pos)\n\n # Capture one face\n if len(ps) == 68: \n\n # Find 2D face points\n image_points = np.array([\n ps[30], # Nose tip\n ps[8], # Chin\n ps[36], # Left eye left corner\n ps[45], # Right eye right corner\n ps[48], # Left mouth corner\n ps[54] # Right mouth corner\n ], dtype=\"double\")\n\n # 3D model points.\n model_points = np.array([(0.0, 0.0, 0.0),\n (0.0, -300.0, -65.0),\n (-150.0, 170.0, -135.0),\n (150.0, 170.0, -135.0),\n (-150.0, -150.0, -125.0),\n (150.0, -150.0, -125.0)])\n\n for p in image_points:\n cv2.circle(self.frame, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n\n dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion\n\n # Calculate rotation vector to euler angle\n (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs)#, flags=cv2.CV_ITERATIVE)\n rv = np.array((rotation_vector[0][0], rotation_vector[1][0], rotation_vector[2][0]))\n r = R.from_rotvec(rv)\n angle = r.as_euler('zyx', degrees=True) # calculate euler angle\n\n # Print text to image\n cv2.putText(self.frame, 'x: '+ str(round(angle[0],2)), (0, 20), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.frame, 'y: '+ str(round(angle[1],2)), (0, 40), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(self.frame, 'z: '+ str(round(angle[2],2)), (0, 60), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n\n # Exclude multi-face\n elif len(ps) > 69:\n cv2.putText(self.frame, 'Warning! There are too many people', (int(size[1]/2-200), int(size[0]/2)), cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 255), 1, cv2.LINE_AA)\n \n # Record image or not\n if if_record:\n\n cv2.putText(self.frame, '----- Recording -----', (int(self.size[1]/2-200), self.size[0]-80), cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 255), 1, cv2.LINE_AA)\n cv2.putText(self.frame, '----- Press \"p\" to pause -----', (int(self.size[1]/2-200), self.size[0]-60), cv2.FONT_HERSHEY_DUPLEX, 0.7, (0, 0, 255), 1, cv2.LINE_AA)\n\n if len(ps) == 68:\n for p in image_points:\n cv2.circle(origin_frame, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n\n # Print text to image\n cv2.putText(origin_frame, 'x: '+ str(round(angle[0],2)), (0, 20), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(origin_frame, 'y: '+ str(round(angle[1],2)), (0, 40), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n cv2.putText(origin_frame, 'z: '+ str(round(angle[2],2)), (0, 60), cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 1, cv2.LINE_AA)\n \n self.record(count, origin_frame)\n\n return self.frame\n\n\n def record(self, count, frame):\n os.mkdir('../demo') if not os.path.exists('../demo') else None\n k = '%03d' % count\n cv2.imwrite(f'../demo/{k}.jpg', frame)", "repo_name": "Rayhchs/pose-estimation-dlib", "sub_path": "src/Query.py", "file_name": "Query.py", "file_ext": "py", "file_size_in_byte": 5398, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.putText", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "cv2.solvePnP", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation.from_rotvec", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.spatial.transform.Rotation", "line_number": 74, "usage_type": "name"}, {"api_name": "cv2.putText", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 78, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 78, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 79, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 79, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 80, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 90, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 97, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 97, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 97, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 98, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 99, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 99, "usage_type": "attribute"}, {"api_name": "cv2.LINE_AA", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 107, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "41382611676", "text": "from cv2 import cv2\nimport numpy as np\n\ndef read_preprogress(path):\n # read the picture and transform it to gray,and then return the array data of pic\n img = cv2.imread(path,1)\n img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n return img_gray\n\ndef strdata_progress(preprogressed):\n str_data = []\n img_y_len = len(preprogressed)\n img_x_len = len(preprogressed[0])\n str_y_len = int(img_y_len / 16)\n str_x_len = int(img_x_len / 4)\n str_data = np.zeros((str_y_len,str_x_len))\n for y in range(str_y_len):\n for x in range(str_x_len):\n data_sum = 0\n for pic_y in range(y * 16,y * 16 + 16):\n for pic_x in range(x * 4,x * 4 + 4):\n data_sum = data_sum + preprogressed[pic_y][pic_x]\n data_ave = int(data_sum / 64)\n str_data[y][x] = data_ave\n return str_data", "repo_name": "ZenithNUC/pic2str", "sub_path": "preprogress.py", "file_name": "preprogress.py", "file_ext": "py", "file_size_in_byte": 867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 6, "usage_type": "name"}, {"api_name": "cv2.cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.cv2", "line_number": 7, "usage_type": "name"}, {"api_name": "cv2.cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "73985812004", "text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.shortcuts import HttpResponseRedirect\nfrom .models import Pelis\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\n\nimport re\nfrom .serializers import PelisSerializer\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\n\n\n# ------------------------------------------------------------------------------\n# BUSCADORES\n# ------------------------------------------------------------------------------\n\ndef buscador(request):\n\n\t'''\n\tPágina que muestra el formulario para introducir el actor\n\t'''\n\n\tlogger.debug(\"Accediendo al buscador de películas\")\n\n\treturn render(request,\"mainBuscador.html\")\n\n\n# ------------------------------------------------------------------------------\n# TAREA 4\n# ------------------------------------------------------------------------------\n\ndef mongoengine_year(request, entrada):\n\n\t'''\n\tMostrar las primeras películas a partir de la entrada de un actor\n\t(haciendo uso de mongoengine)\n\t'''\n\tregex = re.compile(entrada)\n\tpelis = Pelis.objects(actors = regex)\n\n\tcontext = {\n\t\t'lista': pelis,\n\t\t'entrada': True,\n\t}\n\n\treturn render(request, \"salida.html\", context)\n\n\n# ------------------------------------------------------------------------------\n# TAREA 5\n# ------------------------------------------------------------------------------\n\ndef pelis_que_sale(request, entrada):\n\n\t'''\n\tMostrar las primeras películas a partir de la entrada de un actor\n\t(haciendo uso de mongoengine)\n\t'''\n\tregex = re.compile(entrada)\n\tpelis = Pelis.objects(actors = regex)\n\n\tcontext = {\n\t\t'lista': pelis,\n\t\t'entrada': True,\n\t\t'resultados': pelis.count(),\n\t}\n\n\tlogger.debug('Películas protagonizadas por el actor %s' % entrada)\n\n\treturn render(request, \"salida1.html\", context)\n\n# ------------------------------------------------------------------------------\n\ndef formulario(request):\n\n\t'''\n\tPágina que muestra el formulario para introducir el actor\n\t'''\n\n\tlogger.debug(\"Accediendo al formulario para el actor\")\n\n\treturn render(request,\"formulario1.html\")\n\n# ------------------------------------------------------------------------------\n\ndef peliculas_actor(request):\n\n\t'''\n\tObtenemos las películas en donde dicho actor es protagonista\n\t'''\n\n\tactor = request.POST.get('actor')\n\n\tlogger.debug('Películas protagonizadas por el actor')\n\n\treturn HttpResponseRedirect(reverse('pelis_que_sale',args=[actor]))\n\n\n# ------------------------------------------------------------------------------\n# TAREA 6\n# ------------------------------------------------------------------------------\n\n@login_required\ndef informacion_pelicula(request, id):\n\n\t'''\n\tSe muestra información relevante de la película a partir de su ID\n\t'''\n\t# Buscamos la película solo por el ID\n\tpelicula = Pelis.objects(id=id)\n\n\tcontext = {\n\t\t'pelicula': pelicula[0],\n\t\t'id': True,\n\t}\n\n\tlogger.debug('Mostrada información de la película con id %s' % id)\n\n\treturn render(request,\"informacion_pelis.html\",context)\n\n# ------------------------------------------------------------------------------\n\ndef formulario_id(request):\n\n\t'''\n\tPágina que muestra el formulario para introducir el id\n\t'''\n\n\tlogger.debug(\"Accediendo al formulario para el identificador\")\n\n\treturn render(request,\"id.html\")\n\n# ------------------------------------------------------------------------------\n\ndef peliculas_id(request):\n\n\t'''\n\tObtenemos la películas de dicho ID\n\t'''\n\n\tid = request.POST.get('id')\n\n\tlogger.debug('Mostrada información de la película')\n\n\treturn HttpResponseRedirect(reverse('informacion_pelicula',args=[id]))\n\n\n# ------------------------------------------------------------------------------\n# TAREA 7\n# ------------------------------------------------------------------------------\n\n@login_required\ndef crud(request):\n\n\t'''\n\tObtemos la página de inicio del CRUD y visualizamos las 100 primeras\n\tpelículas ordenadas por actor\n\t'''\n\n\tpeliculas = Pelis.objects().all().order_by('actor')[:100]\n\tcontext = {\n\t\t'lista' : peliculas,\n\t\t'general': True,\n\t}\n\n\tlogger.debug(\"Accediendo al CRUD\")\n\n\treturn render(request, \"crud.html\", context)\n\n# ------------------------------------------------------------------------------\n\n@login_required\ndef crear_pelicula(request):\n\n\t'''\n\tFunción que permite crearnos una película, a partir de los campos:\n\ttitulo, año, director, actores, genero, puntuacion, duracion\n\t'''\n\n\t# Para crear una película se debe de hacer uso del método POST\n\tif(request.method == \"POST\"):\n\t\tparametros = request.POST\n\t\tactores = parametros['actores'].split(\", \")\n\t\tgenero = parametros['genero'].split(\", \")\n\n\t\t# Creamos la película\n\t\tpelicula = Pelis(title = parametros['titulo'],\n\t\t\t\t\t\t year = parametros['año'],\n\t\t\t\t\t\t director = parametros['director'],\n\t\t\t\t\t\t actors = actores,\n\t\t\t\t\t\t genres = genero,\n\t\t\t\t\t\t imdb = {'rating' : parametros['puntuacion']},\n\t\t\t\t\t\t runtime = parametros['duracion'])\n\n\t\t# Guardamos la pelicula\n\t\tpelicula.save()\n\n\tlogger.debug(\"Película creada correctamente\")\n\n\treturn HttpResponseRedirect(reverse('crud'))\n\n# ------------------------------------------------------------------------------\n\n@login_required\ndef borrar_pelicula(request,id):\n\n\t'''\n\tFunción que permite borrar una película, a partir de su identificador\n\t'''\n\n\tpelicula = Pelis.objects(id=id)\n\n\tif(pelicula.count()==1):\n\t\tpelicula.delete()\n\n\tlogger.debug(\"Película borrada correctamente\")\n\n\treturn HttpResponseRedirect(reverse('crud'))\n\n# ------------------------------------------------------------------------------\n\ndef editar_pelicula(request,id):\n\n\t'''\n\tFunción que permite editar una película a partir de su identificador, y\n\tlos campos: titulo, año, director, actores, genero, puntuacion, duracion\n\t'''\n\tpelicula = Pelis.objects(id=id)\n\n\t# Se da la opción de que el usuario cambie solo datos concretos\n\tif(request.method == \"POST\"):\n\t\tparametros = request.POST\n\n\t\tif(parametros.get('titulo')!= ''):\n\t\t\tpelicula.update_one(title=parametros.get('titulo'))\n\n\t\tif (parametros.get('año') != ''):\n\t\t\tpelicula.update_one(year=parametros.get('año'))\n\n\t\tif (parametros.get('director') != ''):\n\t\t\tpelicula.update_one(director=parametros.get('director'))\n\n\t\tif (parametros.get('actores') != ''):\n\t\t\tactores = parametros.get('actores').split(\", \")\n\t\t\tpelicula.update_one(actors=actores)\n\n\t\tif (parametros.get('genero') != ''):\n\t\t\tgenero = parametros.get('genero').split(\", \")\n\t\t\tpelicula.update_one(actors=genero)\n\n\t\tif (parametros.get('puntuacion') != ''):\n\t\t\tpelicula.update_one(imdb__rating=parametros.get('puntuacion'))\n\n\t\tif (parametros.get('duracion') != ''):\n\t\t\tpelicula.update_one(runtime=parametros.get('duracion'))\n\n\tlogger.debug(\"Película editada correctamente\")\n\n\treturn HttpResponseRedirect(reverse('crud'))\n\n\n# ------------------------------------------------------------------------------\n# TAREA 12\n# ------------------------------------------------------------------------------\n\ndef api_pelis(request):\n\n\t'''\n\tFunción para la API que lista todas las películas (GET) y\n\tpermite añadir (POST)\n\t'''\n\n\t# Método para listar\n\tif request.method == 'GET':\n\t\tpelis = Pelis.objects.all()[:10]\n\t\tserializer = PelisSerializer(pelis, many=True)\n\n\t\treturn JsonResponse(serializer.data, safe=False)\n\n\t# Método para añadir\n\tif request.method == 'POST':\n\t\tdata = JSONParser().parse(request)\n\t\tserializer = PelisSerializer(data=data)\n\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\n\t\t\treturn JsonResponse(serializer.data, status=201)\n\n\tlogger.debug('Error')\n\n\treturn JsonResponse(serializers.errors, status=400)\n\n# ------------------------------------------------------------------------------\n\ndef api_peli(request, id):\n\n\t'''\n\tFunción para la API que permite listar todas las películas (GET),\n\tmodificarlas (PUT) y/o borrarlas (DELETE)\n\t'''\n\n\ttry:\n\t\tpeli = Pelis.objects().get(id=id)\n\texcept:\n\t\treturn HttpResponse(status=404) # No encontrado\n\n\t# Método para listar\n\tif request.method == 'GET':\n\t\tserializer = PelisSerializer(peli)\n\t\treturn JsonResponse(serializer.data)\n\n\t# Método para modificar\n\tif request.method == 'PUT':\n\t\tdata = JSONParser().parse(request)\n\t\tserializer = PelisSerializer(data=data)\n\n\t\tpeli.title \t\t = data.title\n\t\tpeli.title = data.title\n\t\tpeli.year = data.year\n\t\tpeli.rated = data.rated\n\t\tpeli.runtime = data.runtime\n\t\tpeli.countries = data.countries\n\t\tpeli.genres = data.genres\n\t\tpeli.director = data.director\n\t\tpeli.writers = data.writers\n\t\tpeli.actors = data.actors\n\t\tpeli.plot = data.plot\n\t\tpeli.poster = data.poster\n\t\tpeli.imdb = data.imdb\n\t\tpeli.tomato = data.tomato\n\t\tpeli.metacritic = data.metacritic\n\t\tpeli.awards = data.awards\n\t\tpeli.type = data.type\n\n\t\tpeli.save()\n\t\treturn JsonResponse(serializer.data, status=200)\n\n\t# Método para borrar\n\tif request.method == 'DELETE':\n\t\tpeli.delete()\n\t\treturn HttpResponse(status=200) # No encontrado\n", "repo_name": "Gecofer/MII_SSBW_1819", "sub_path": "codigo/pelis/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8909, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "models.Pelis.objects", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 43, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Pelis.objects", "line_number": 64, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 100, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Pelis.objects", "line_number": 114, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 114, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 123, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 107, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 149, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 149, "usage_type": "call"}, {"api_name": "models.Pelis.objects", "line_number": 164, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 164, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 172, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 156, "usage_type": "name"}, {"api_name": "models.Pelis", "line_number": 191, "usage_type": "call"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 204, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 204, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 176, "usage_type": "name"}, {"api_name": "models.Pelis.objects", "line_number": 215, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 215, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 222, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 222, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 208, "usage_type": "name"}, {"api_name": "models.Pelis.objects", "line_number": 232, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 232, "usage_type": "name"}, {"api_name": "django.shortcuts.HttpResponseRedirect", "line_number": 263, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 263, "usage_type": "call"}, {"api_name": "models.Pelis.objects.all", "line_number": 279, "usage_type": "call"}, {"api_name": "models.Pelis.objects", "line_number": 279, "usage_type": "attribute"}, {"api_name": "models.Pelis", "line_number": 279, "usage_type": "name"}, {"api_name": "serializers.PelisSerializer", "line_number": 280, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 282, "usage_type": "call"}, {"api_name": "serializers.PelisSerializer", "line_number": 287, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 292, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 296, "usage_type": "call"}, {"api_name": "serializers.errors", "line_number": 296, "usage_type": "attribute"}, {"api_name": "models.Pelis.objects", "line_number": 308, "usage_type": "call"}, {"api_name": "models.Pelis", "line_number": 308, "usage_type": "name"}, {"api_name": "serializers.PelisSerializer", "line_number": 314, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 315, "usage_type": "call"}, {"api_name": "serializers.PelisSerializer", "line_number": 320, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 341, "usage_type": "call"}]} +{"seq_id": "15138285549", "text": "\"\"\"\nthis application loads the task.csv data to the table Taskdata in\npostgres DB\n\"\"\"\nimport os\nimport sys\nimport csv\nimport inspect\n\n\napp_path = inspect.getfile(inspect.currentframe()) # gets the current file directory\nsub_dir = os.path.realpath(os.path.dirname(app_path)) # gets the file preceding directory\nmain_dir = os.path.dirname(sub_dir) # gets the main directory\n\ncsv_file = os.path.join(main_dir, \"task/task_data.csv\") # gets the directory of the task.csv file\n\nproject_home = main_dir\n\nsys.path.append(project_home)\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"data-extraction-service.settings\") # get django setting properties\nos.environ['DJANGO_SETTINGS_MODULE'] = 'data-extraction-service.settings'\n\nfrom django.core.wsgi import get_wsgi_application # noqa: E402\napplication = get_wsgi_application() # get the django web application server.\n\nfrom showdata.models import Taskdata # noqa: E402\n\nwith open(csv_file, 'r') as csvfile:\n data_loader = csv.reader(csvfile) # reads the task.csv data\n next(data_loader) # skips the header names on the csv data\n\n # create a data object and inputs the data of each columns in the table,\n # this will store all data into the DB table.\n for ind in data_loader:\n Taskdata.objects.create(\n id=ind[0],\n timestamp=ind[1],\n temperature=ind[2],\n duration=ind[3])\n", "repo_name": "olahsymbo/data-extraction-service", "sub_path": "upload_data/stack_data.py", "file_name": "stack_data.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "inspect.getfile", "line_number": 11, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.core.wsgi.get_wsgi_application", "line_number": 25, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 30, "usage_type": "call"}, {"api_name": "showdata.models.Taskdata.objects.create", "line_number": 36, "usage_type": "call"}, {"api_name": "showdata.models.Taskdata.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "showdata.models.Taskdata", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "9956006022", "text": "import numpy as np\nimport pandas as pd\n#import sklearn.linear_model.base\nfrom flask import Flask, request, jsonify, render_template\n#import pickle\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home(): \n return render_template('index.html')\n@app.route('/read-tuple')\ndef read_tuple():\n '''\n For rendering results on HTML GUI\n '''\n headings = ('sepal-length','sepal-width','petal-length','petal-width','class')\n data = (\n ('5.1','3.5','1.4','0.2','Iris-setosa'),\n ('4.9','3.0','1.4','0.2','Iris-setosa')\n )\n \n return render_template('tuple.html', headings = headings, data = data)#, prediction_text=f'The species is Iris:{output}',data=prediction)#.format(output))\n\n@app.route('/read-dict')\ndef read_dict():\n\n df = pd.read_csv('dataset\\iris.data')\n\n #print(type(df))\n\n data_dict = df.to_dict()\n for key,value in data_dict.items():\n print()\n l = len(data_dict[key])\n # print(data_dict)\n \n return render_template('dict.html', data_dict = data_dict, l= l)\n\n@app.route('/read-df')\ndef read_df():\n\n df = pd.read_csv('dataset\\iris.data')\n\n #print(type(df))\n headings = list(df[:0])\n data = list(df.to_records(index=False))\n\n\n\n # print(data_dict)\n\n return render_template('tuple.html', headings=headings, data=data)\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)", "repo_name": "hrs19/flask_table", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1348, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "72800286564", "text": "\"\"\"\nUtils files for handling optuna studies\n\"\"\"\nimport os\nimport json\nimport optuna\nimport torch\n\nfrom CONFIG import OPTUNA\nfrom lib.logger import print_, log_info\nimport lib.utils as utils\n\n\ndef create_optuna_values_file(exp_path):\n \"\"\" Creating optuna value file \"\"\"\n exp_config = os.path.join(exp_path, \"optuna_values.json\")\n with open(exp_config, \"w\") as file:\n json.dump(OPTUNA, file)\n return\n\n\ndef load_optuna_values_file(exp_path):\n \"\"\" Creating optuna value file \"\"\"\n exp_config = os.path.join(exp_path, \"optuna_values.json\")\n with open(exp_config) as file:\n data = json.load(file)\n return data\n\n\ndef load_optuna_study(exp_path, study_name=None):\n \"\"\" Loading and unpickeling an Optuna study \"\"\"\n study_file = os.path.join(exp_path, \"optuna.db\")\n study = optuna.load_study(study_name=study_name, storage=f\"sqlite:///{study_file}\")\n return study\n\n\ndef suggest_values(trial, exp_path, exp_params, verbose=False):\n \"\"\"\n Suggesting values for several hyper-parameters to optimize, and updatiing the\n experiment parameters dictionary\n \"\"\"\n values = load_optuna_values_file(exp_path)\n\n for key, values in values.items():\n # selecting value\n if values[\"val\"][0] == values[\"val\"][1]:\n val = values[\"val\"][0]\n elif values[\"type\"] == \"int\":\n val = trial.suggest_int(\n name=key,\n low=values[\"val\"][0],\n high=values[\"val\"][1],\n log=values[\"log\"]\n )\n elif values[\"type\"] == \"float\":\n val = trial.suggest_float(\n name=key,\n low=values[\"val\"][0],\n high=values[\"val\"][1],\n log=values[\"log\"]\n )\n elif values[\"type\"] == \"cat_or_log\":\n val = get_cat_or_log(\n trial=trial,\n min_val=values[\"val\"][0],\n max_val=values[\"val\"][1],\n categorical=values[\"categorical\"],\n name=key\n )\n else:\n raise NotImplementedError(\"ERROR\")\n\n # logging\n func = print_ if verbose else log_info\n func(f\" --> Setting param {key}: {values['path']} with value {val}\")\n\n # updating exp_params\n utils.set_in_dict(params=exp_params, key_list=values[\"path\"], value=val)\n\n return exp_params\n\n\ndef get_cat_or_log(trial, min_val, max_val, categorical, name):\n \"\"\"\n Sampling a value from a categorical distribution with values logarithmically distributed,\n or directly sampling from a log-distribution\n \"\"\"\n if min_val == 0 and max_val == 0:\n value = 0\n elif categorical:\n min_ = torch.log10(torch.tensor(min_val))\n max_ = torch.log10(torch.tensor(max_val))\n steps = (max_ - min_ + 1).int().item()\n val_list = torch.logspace(min_, max_, steps=steps).tolist()\n val_list = val_list + [0] # adding value 0\n value = trial.suggest_categorical(name, val_list)\n else:\n value = trial.suggest_float(name, min_val, max_val, log=True)\n return value\n\n\ndef log_optuna_stats(study):\n \"\"\" \"\"\"\n complete_trials = study.get_trials(deepcopy=False, states=[optuna.trial.TrialState.COMPLETE])\n\n print_(\"Study statistics: \")\n print_(f\" Number of finished trials: {len(study.trials)}\")\n print_(f\" Number of complete trials: {len(complete_trials)}\")\n print_(\"\")\n\n trial = study.best_trial\n print_(f\"Best trial: Trial #{trial.number}\")\n print_(f\" Value: {trial.value}\")\n print_(\" Params: \")\n for key, value in trial.params.items():\n print_(f\" {key}: {value}\")\n\n return\n\n#\n", "repo_name": "angelvillar96/TemplaTorch", "sub_path": "src/lib/optuna.py", "file_name": "optuna.py", "file_ext": "py", "file_size_in_byte": 3732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 18, "usage_type": "call"}, {"api_name": "CONFIG.OPTUNA", "line_number": 18, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "optuna.load_study", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 74, "usage_type": "name"}, {"api_name": "lib.logger.log_info", "line_number": 74, "usage_type": "name"}, {"api_name": "lib.utils.set_in_dict", "line_number": 78, "usage_type": "call"}, {"api_name": "lib.utils", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.log10", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.log10", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.logspace", "line_number": 94, "usage_type": "call"}, {"api_name": "optuna.trial", "line_number": 104, "usage_type": "attribute"}, {"api_name": "lib.logger.print_", "line_number": 106, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 107, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 108, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 109, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 112, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 113, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 114, "usage_type": "call"}, {"api_name": "lib.logger.print_", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "10724845677", "text": "import pyglet.sprite\nimport pymunk.vec2d\nfrom load_config import ConfigSectionMap as load_cfg\n\n\nclass Movable(object):\n\n def __init__(self):\n self.active = True\n self.direction = \"right\"\n\n\nclass JumpObject(object):\n\n def __init__(self):\n self.cd = 0.3\n self.cd_timer = 0.\n\n\nclass InputObject(object):\n\n def __init__(self, input_type):\n self.input_type = input_type\n if input_type == \"kb\":\n self.mapping = load_cfg(\"KeyboardMap\")\n # print(self.mapping)\n\n\nclass DirectionalSprite(object):\n\n def __init__(self, world, name):\n gt = world.get_texture\n self.textures = dict(\n left=gt(\"{0}_l\".format(name)),\n right=gt(\"{0}_r\".format(name)),\n up=gt(\"{0}_u\".format(name)),\n error=gt(\"debug\")\n )\n\n def get(self, direction):\n try:\n return self.textures[direction]\n except KeyError:\n return self.textures[\"error\"]\n\n\nclass SpriteObject(object):\n\n def __init__(self, img, x, y, w=None, h=None, batch=None):\n self.sprite = pyglet.sprite.Sprite(\n img, x, y, subpixel=False\n )\n hratio = 1\n if w:\n sw = self.sprite.width\n hratio = w / sw\n vratio = 1\n if h:\n sh = self.sprite.height\n vratio = h / sh\n self.sprite.scale = min(vratio, hratio)\n self.batch = batch\n\n\nclass ButtonIcon(object):\n\n def __init__(self, img, x, y, w=None, h=None, batch=None):\n self.sprite = pyglet.sprite.Sprite(\n img, x, y, batch=batch, subpixel=False\n )\n hratio = 1\n if w:\n sw = self.sprite.width\n hratio = w / sw\n vratio = 1\n if h:\n sh = self.sprite.height\n vratio = h / sh\n self.sprite.scale = min(vratio, hratio)\n\n\nclass FloatingSprite(object):\n\n def __init__(self, x, y):\n self.x, self.y = x, y\n\n\nclass ParallaxObject(object):\n def __init__(self, ratio=2):\n self.ratio = ratio\n\n\nclass PhysicsBody(object):\n\n def __init__(self, shape):\n self.active = False\n self.shape = shape\n self.body = shape.body\n # self.shape.collision_type = 1\n self.shape.friction = 1.\n self.shape.elasticity = 0\n self.shape.group = 0\n\n\nclass GroundingObject(object):\n\n def __init__(self):\n self.grounding = {\n 'normal': pymunk.vec2d.Vec2d.zero(),\n 'penetration': pymunk.vec2d.Vec2d.zero(),\n 'impulse': pymunk.vec2d.Vec2d.zero(),\n 'position': pymunk.vec2d.Vec2d.zero(),\n 'body': None\n }\n self.well_grounded = False\n\n\nclass StaticPhysicsBody(object):\n\n def __init__(self, shape, x, y):\n self.x, self.y = x, y\n self.shape = shape\n # self.shape.collision_type = 1\n self.shape.friction = 1.\n self.shape.elasticity = 0\n self.shape.group = 1\n\n\nclass ActionBinding(object):\n \"\"\"\n Binds input/event signals to functions.\n \"\"\"\n\n def __init__(self, action, params):\n self.action = action\n self.params = params\n\n def get(self):\n if self.params:\n # print(\"Calling with params {0}\".format(self.params))\n self.action(self.params)\n else:\n # print(\"Calling without params\")\n self.action()\n\n\nclass MouseControlled(object):\n def __init__(self, area, action=None, btn=\"left\"):\n self.area = area\n self.action = action\n self.btn = btn\n\n\nclass MouseScreenControlled(object):\n def __init__(self, action=None, params=None, btn=\"left\"):\n self.action = action\n self.params = params\n self.btn = btn\n\n\nclass MouseListen(object):\n def __init__(self, btn=None, event_type=\"click\"):\n self.btn = btn\n self.event_type = event_type\n\n\nclass KeyboardListen(object):\n def __init__(self, btn=None):\n self.btn = btn\n\n\nclass KeyboardControlled(object):\n def __init__(self, action=None, params=None, btn=None):\n self.action = action\n self.params = params\n self.btn = btn\n\n\nclass MouseClicked(object):\n def __init__(self, x, y, btn):\n self.x, self.y, self.btn = x, y, btn\n self.handled = False\n\n\nclass KeyPressed(object):\n def __init__(self, btn):\n self.btn = btn\n self.handled = False\n\n\nclass MouseBoundObject(object):\n def __init__(self, offset=(0, 0)):\n self.offset = offset\n\n\nclass SFXObject(object):\n\n def __init__(self, sound):\n self.sound = sound\n\n\nclass SoundEmitter(object):\n\n def __init__(self):\n self.sound = None\n", "repo_name": "NiclasEriksen/pyse", "sub_path": "components.py", "file_name": "components.py", "file_ext": "py", "file_size_in_byte": 4668, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "load_config.ConfigSectionMap", "line_number": 25, "usage_type": "call"}, {"api_name": "pyglet.sprite.sprite.Sprite", "line_number": 50, "usage_type": "call"}, {"api_name": "pyglet.sprite.sprite", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pyglet.sprite", "line_number": 50, "usage_type": "name"}, {"api_name": "pyglet.sprite.sprite.Sprite", "line_number": 68, "usage_type": "call"}, {"api_name": "pyglet.sprite.sprite", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pyglet.sprite", "line_number": 68, "usage_type": "name"}, {"api_name": "pymunk.vec2d.vec2d.Vec2d.zero", "line_number": 109, "usage_type": "call"}, {"api_name": "pymunk.vec2d.vec2d", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pymunk.vec2d", "line_number": 109, "usage_type": "name"}, {"api_name": "pymunk.vec2d.vec2d.Vec2d.zero", "line_number": 110, "usage_type": "call"}, {"api_name": "pymunk.vec2d.vec2d", "line_number": 110, "usage_type": "attribute"}, {"api_name": "pymunk.vec2d", "line_number": 110, "usage_type": "name"}, {"api_name": "pymunk.vec2d.vec2d.Vec2d.zero", "line_number": 111, "usage_type": "call"}, {"api_name": "pymunk.vec2d.vec2d", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pymunk.vec2d", "line_number": 111, "usage_type": "name"}, {"api_name": "pymunk.vec2d.vec2d.Vec2d.zero", "line_number": 112, "usage_type": "call"}, {"api_name": "pymunk.vec2d.vec2d", "line_number": 112, "usage_type": "attribute"}, {"api_name": "pymunk.vec2d", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "17340462296", "text": "from module import kelas\nfrom lib import wa, reply, message, numbers\nimport os, config, pandas\n\ndef auth(data):\n if kelas.getKodeDosen(data[0]) == '':\n ret = False\n else:\n ret = True\n return ret\n\ndef replymsg(driver, data):\n wmsg = reply.getWaitingMessage(os.path.basename(__file__).split('.')[0])\n wa.typeAndSendMessage(driver, wmsg)\n num = numbers.normalize(data[0])\n kodeDosen = kelas.getKodeDosen(num)\n tahun_id = '20192'\n try:\n npm = [npm for npm in data[3].split(' ') if npm.isdigit() and len(npm) == 7][0]\n msg = data[3].split(\" id \", 1)[1]\n id = [id for id in msg.split(' ') if npm.isdigit()][0]\n revisi = msg.lstrip(id).strip()\n print(npm, revisi, id)\n if checkRevisi(npm, kodeDosen, id, tahun_id):\n revisiSidang(npm, kodeDosen, revisi, tahun_id, id)\n msgreply = \"Sudah update...\\n\\n\"+listRevisi(npm, kodeDosen, tahun_id)\n else:\n msgreply = \"Salah id ato gak ada akses\"\n except Exception as e: \n msgreply = f\"Error {str(e)}\"\n \n return msgreply\n\n\ndef checkRevisi(npm, penguji, id, tahun_id):\n db=kelas.dbConnect()\n sql=f'select revisi from revisi_data where npm=\"{npm}\" and penguji=\"{penguji}\" and tahun_id=\"{tahun_id}\" and id=\"{id}\"'\n with db:\n cur=db.cursor()\n cur.execute(sql)\n row=cur.fetchone()\n if row:\n return True\n else:\n return False\n\ndef revisiSidang(npm, penguji, revisi, tahun_id, id):\n db=kelas.dbConnect()\n sql=f'UPDATE revisi_data SET revisi=\"{revisi}\" WHERE npm=\"{npm}\" and penguji=\"{penguji}\" and tahun_id=\"{tahun_id}\" and id=\"{id}\"'\n with db:\n cur=db.cursor()\n cur.execute(sql)\n \ndef listRevisi(npm, penguji, tahun_id):\n db=kelas.dbConnect()\n sql=f'select revisi, id from revisi_data where npm=\"{npm}\" and penguji=\"{penguji}\" and tahun_id=\"{tahun_id}\"'\n with db:\n cur=db.cursor()\n cur.execute(sql)\n rows=cur.fetchall()\n if rows: \n msg = f\"Revisi untuk {npm} dari {penguji}\"\n for i, row in enumerate(rows):\n msg += f\"\\n{(i+1)}. {row[0]} ({row[1]})\"\n return msg\n else:\n return False", "repo_name": "riandakarizal/ITeung", "sub_path": "module/update_revisi_sidang.py", "file_name": "update_revisi_sidang.py", "file_ext": "py", "file_size_in_byte": 2255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "module.kelas.getKodeDosen", "line_number": 6, "usage_type": "call"}, {"api_name": "module.kelas", "line_number": 6, "usage_type": "name"}, {"api_name": "lib.reply.getWaitingMessage", "line_number": 13, "usage_type": "call"}, {"api_name": "lib.reply", "line_number": 13, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "lib.wa.typeAndSendMessage", "line_number": 14, "usage_type": "call"}, {"api_name": "lib.wa", "line_number": 14, "usage_type": "name"}, {"api_name": "lib.numbers.normalize", "line_number": 15, "usage_type": "call"}, {"api_name": "lib.numbers", "line_number": 15, "usage_type": "name"}, {"api_name": "module.kelas.getKodeDosen", "line_number": 16, "usage_type": "call"}, {"api_name": "module.kelas", "line_number": 16, "usage_type": "name"}, {"api_name": "module.kelas.dbConnect", "line_number": 36, "usage_type": "call"}, {"api_name": "module.kelas", "line_number": 36, "usage_type": "name"}, {"api_name": "module.kelas.dbConnect", "line_number": 48, "usage_type": "call"}, {"api_name": "module.kelas", "line_number": 48, "usage_type": "name"}, {"api_name": "module.kelas.dbConnect", "line_number": 55, "usage_type": "call"}, {"api_name": "module.kelas", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "8468097166", "text": "from copy import deepcopy\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\nimport math\nfrom PIL import Image, ImageFilter, ImageOps\nimport random\nimport pdb\n\nclass GaussianBlur(object):\n \"\"\"\n Apply Gaussian Blur to the PIL image.\n \"\"\"\n def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):\n self.prob = p\n self.radius_min = radius_min\n self.radius_max = radius_max\n\n def __call__(self, img):\n do_it = random.random() <= self.prob\n if not do_it:\n return img\n\n return img.filter(\n ImageFilter.GaussianBlur(\n radius=random.uniform(self.radius_min, self.radius_max)\n )\n )\n\n\nclass Solarization(object):\n \"\"\"\n Apply Solarization to the PIL image.\n \"\"\"\n def __init__(self, p):\n self.p = p\n\n def __call__(self, img):\n if random.random() < self.p:\n return ImageOps.solarize(img)\n else:\n return img\n\n\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n # Cut & paste from PyTorch official master until it's in a few official releases - RW\n # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n def norm_cdf(x):\n # Computes standard normal cumulative distribution function\n return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n if (mean < a - 2 * std) or (mean > b + 2 * std):\n warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n \"The distribution of values may be incorrect.\",\n stacklevel=2)\n\n with torch.no_grad():\n # Values are generated by using a truncated uniform distribution and\n # then using the inverse CDF for the normal distribution.\n # Get upper and lower cdf values\n l = norm_cdf((a - mean) / std)\n u = norm_cdf((b - mean) / std)\n\n # Uniformly fill tensor with values from [l, u], then translate to\n # [2l-1, 2u-1].\n tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n # Use inverse cdf transform for normal distribution to get truncated\n # standard normal\n tensor.erfinv_()\n\n # Transform to proper mean, std\n tensor.mul_(std * math.sqrt(2.))\n tensor.add_(mean)\n\n # Clamp to ensure it's in the proper range\n tensor.clamp_(min=a, max=b)\n return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\nclass DINOHead(nn.Module):\n def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):\n super().__init__()\n \n self.nlayers = nlayers\n assert nlayers >= 0\n if nlayers == 0:\n self.mlp = nn.Identity()\n \n elif nlayers == 1:\n self.mlp = nn.Linear(in_dim, bottleneck_dim)\n else:\n layers = [nn.Linear(in_dim, hidden_dim)]\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n for _ in range(nlayers - 2):\n layers.append(nn.Linear(hidden_dim, hidden_dim))\n if use_bn:\n layers.append(nn.BatchNorm1d(hidden_dim))\n layers.append(nn.GELU())\n layers.append(nn.Linear(hidden_dim, bottleneck_dim))\n self.mlp = nn.Sequential(*layers)\n\n if nlayers > 0:\n self.apply(self._init_weights)\n self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))\n self.last_layer.weight_g.data.fill_(1)\n if norm_last_layer:\n self.last_layer.weight_g.requires_grad = False\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n def forward(self, x):\n x = self.mlp(x)\n x = nn.functional.normalize(x, dim=-1, p=2)\n if self.nlayers > 0:\n x = self.last_layer(x)\n return x\n\n\nclass DataAugmentationDINO(object):\n def __init__(self, global_crops_scale, local_crops_scale, local_crops_number, return_test=False):\n flip_and_color_jitter = transforms.Compose([\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply(\n [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)],\n p=0.8\n ),\n transforms.RandomGrayscale(p=0.2),\n ])\n normalize = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ])\n \n self.return_test = return_test\n self.test_transfo = transforms.Compose([\n transforms.Resize([256, 256]),\n transforms.CenterCrop(224),\n normalize,\n ])\n\n\n # weak aug\n self.weak_transfo = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n normalize,\n ])\n \n # first global crop\n self.global_transfo1 = transforms.Compose([\n transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n GaussianBlur(1.0),\n normalize,\n ])\n\n # second global crop\n self.global_transfo2 = transforms.Compose([\n transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n GaussianBlur(0.1),\n Solarization(0.2),\n normalize,\n ])\n \n self.global_transfo = transforms.Compose([\n #transforms.RandomAffine(degrees=(-15, 15)),\n transforms.RandomResizedCrop(224, scale=global_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n #Solarization(0.2),\n GaussianBlur(0.5),\n normalize,\n ])\n\n\n # transformation for the local small crops\n self.local_crops_number = local_crops_number\n self.local_transfo = transforms.Compose([\n transforms.RandomResizedCrop(96, scale=local_crops_scale, interpolation=Image.BICUBIC),\n flip_and_color_jitter,\n GaussianBlur(p=0.5),\n normalize,\n ])\n def __call__(self, image):\n crops = []\n crops.append(self.weak_transfo(image))\n crops.append(self.global_transfo(image))\n crops.append(self.global_transfo(image))\n for _ in range(self.local_crops_number):\n crops.append(self.local_transfo(image))\n\n if self.return_test:\n crops.append(self.test_transfo(image))\n\n return crops\n\n\nclass DePTDino(nn.Module):\n \"\"\"\n Build a Dino model with: a student, a teacher and centering\n https://arxiv.org/pdf/2104.14294.pdf\n \"\"\"\n\n def __init__(\n self,\n student,\n teacher,\n m=0.99,\n nlayer_head=0,\n dino_out_dim=65536,\n consistency_type='cls',\n hierarchy=False,\n norm_last_layer=True,\n checkpoint_path=None,\n ):\n \"\"\"\n m: moco momentum of updating key encoder (default: 0.999)\n \"\"\"\n super().__init__()\n \n self.consistency_type = consistency_type\n self.hierarchy = hierarchy\n # create the encoders\n self.student = student\n self.teacher = teacher\n self.m = m\n self.dino_out_dim = dino_out_dim\n self.norm_last_layer = norm_last_layer\n \n \n # create DINO head \n feature_dim = student.output_dim\n if not self.hierarchy:\n self.student_dinohead = DINOHead(feature_dim, dino_out_dim, norm_last_layer=norm_last_layer,\n nlayers=nlayer_head, hidden_dim=2048, bottleneck_dim=256)\n self.teacher_dinohead = DINOHead(feature_dim, dino_out_dim, norm_last_layer=norm_last_layer,\n nlayers=nlayer_head, hidden_dim=2048, bottleneck_dim=256)\n else:\n self.student_dinohead = nn.ModuleList()\n self.teacher_dinohead = nn.ModuleList()\n for i in range(len(self.student.encoder_list)):\n self.student_dinohead.append(DINOHead(feature_dim, dino_out_dim, norm_last_layer=norm_last_layer,\n nlayers=nlayer_head, hidden_dim=2048, bottleneck_dim=256))\n self.teacher_dinohead.append(DINOHead(feature_dim, dino_out_dim, norm_last_layer=norm_last_layer,\n nlayers=nlayer_head, hidden_dim=2048, bottleneck_dim=256))\n \n \n \n # freeze teacher model\n self.teacher.requires_grad_(False)\n self.teacher_dinohead.requires_grad_(False)\n\n\n if checkpoint_path:\n self.load_from_checkpoint(checkpoint_path)\n \n def set_m(self, m):\n self.m = m\n print(\"set momentum\", m)\n\n def get_full_params(self, fix_ss_head=False):\n backbone_params, extra_params = self.student.get_full_params()\n\n if not fix_ss_head:\n extra_params.extend(self.student_dinohead.parameters())\n else:\n self.student_dinohead.requires_grad_(False)\n\n return backbone_params, extra_params\n\n def get_prompt_params(self, fix_ss_head=False):\n backbone_params, extra_params = self.student.get_prompt_params()\n\n if not fix_ss_head:\n extra_params.extend(self.student_dinohead.parameters())\n else:\n self.student_dinohead.requires_grad_(False)\n return backbone_params, extra_params\n\n def get_ln_params(self, fix_ss_head=False):\n backbone_params, extra_params = self.student.get_ln_params()\n\n if not fix_ss_head:\n extra_params.extend(self.student_dinohead.parameters())\n else:\n self.student_dinohead.requires_grad_(False)\n\n return backbone_params, extra_params\n\n\n def load_from_checkpoint(self, checkpoint_path, same_student_teacher=False):\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n state_dict = dict()\n student_dict = dict()\n student_dinohead_dict = dict()\n for name, param in checkpoint[\"state_dict\"].items():\n # get rid of 'module.' prefix brought by DDP\n name = name[len(\"module.\") :] if name.startswith(\"module.\") else name\n state_dict[name] = param\n\n if name.startswith(\"student.\"):\n student_dict[name[len(\"student.\") :]] = param\n if name.startswith(\"student_dinohead.\"):\n student_dinohead_dict[name[len(\"student_dinohead.\") :]] = param\n\n if not same_student_teacher:\n msg = self.load_state_dict(state_dict, strict=True)\n logging.info(\n f\"Loaded from {checkpoint_path}; missing params: {msg.missing_keys}\"\n )\n else:\n self.student.load_state_dict(student_dict, strict=True)\n self.teacher.load_state_dict(student_dict, strict=True)\n self.student_dinohead.load_state_dict(student_dinohead_dict, strict=True)\n self.teacher_dinohead.load_state_dict(student_dinohead_dict, strict=True)\n\n logging.info(f\"Loaded from {checkpoint_path}; Student and Teacher use the same weights\")\n\n @torch.no_grad()\n def _momentum_update_teacher(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n # encoder_q -> encoder_k\n for param_q, param_k in zip(\n self.student.parameters(), self.teacher.parameters()\n ):\n param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)\n for param_q, param_k in zip(\n self.student_dinohead.parameters(), self.teacher_dinohead.parameters()\n ):\n param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)\n\n def multi_crop_forward(self, x, mode='student'):\n \n if mode == 'student':\n dino_head = self.student_dinohead\n model = self.student\n else:\n dino_head = self.teacher_dinohead\n model = self.teacher\n\n #feat_out, logits_out, prompt_out = model(torch.cat(x), return_feats=True)\n feat_out, logits_out, prompt_out = model(x, return_feats=True)\n\n\n \n if self.consistency_type == 'cls':\n # only add DINO consistency on CLS token\n return dino_head(feat_out), logits_out, None\n\n else:\n \n if not self.hierarchy:\n B, PN, D = prompt_out[-1].shape\n if 'cls' in self.consistency_type:\n return dino_head(feat_out), logits_out, dino_head(prompt_out[-1].reshape(B*PN, D)).reshape(B, PN, self.dino_out_dim)\n else:\n return None, logits_out, dino_head(prompt_out[-1].reshape(B*PN, D)).reshape(B, PN, self.dino_out_dim)\n\n else:\n stages = len(prompt_out)\n prompt_out_list = []\n for i in range(stages):\n B, PN, D = prompt_out[i].shape\n prompt_out_list.append(dino_head[i](prompt_out[i].reshape(B*PN, D)).reshape(B, PN, self.dino_out_dim))\n \n if 'cls' in self.consistency_type:\n return dino_head[-1](feat_out), logits_out, prompt_out_list\n else:\n return None, logits_out, prompt_out_list\n\n def forward(self, im_q, st_type='student', cls_only=False):\n \"\"\"\n Input:\n im_q: a batch of query images\n im_k: a batch of key images\n Output:\n feats_q: query image features before normalization\n logits_q: logits for class prediction from queries\n logits_ins: logits for instance prediction\n k: contrastive keys\n \"\"\"\n\n if cls_only:\n # compute query features\n if st_type == 'student':\n feats_q, logits_q, _ = self.student(im_q, return_feats=True)\n else:\n feats_q, logits_q, _ = self.teacher(im_q, return_feats=True)\n return feats_q, logits_q\n \n else:\n # compute key features\n self._momentum_update_teacher() # update the key encoder\n \n with torch.no_grad():\n teacher_output_cls, _, teacher_output_prompt = self.multi_crop_forward(im_q[0], mode='teacher')\n student_output_cls, student_logits, student_output_prompt = self.multi_crop_forward(im_q[1], mode='student')\n return teacher_output_cls, student_output_cls, student_logits, teacher_output_prompt, student_output_prompt\n\n\n", "repo_name": "yhygao/DePT", "sub_path": "model/dept_dino.py", "file_name": "dept_dino.py", "file_ext": "py", "file_size_in_byte": 15082, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "random.random", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.ImageFilter.GaussianBlur", "line_number": 28, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 28, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 29, "usage_type": "call"}, {"api_name": "random.random", "line_number": 42, "usage_type": "call"}, {"api_name": "PIL.ImageOps.solarize", "line_number": 43, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 43, "usage_type": "name"}, {"api_name": "math.erf", "line_number": 54, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 61, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Identity", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.GELU", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.GELU", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.nn.utils.weight_norm", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.utils", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 123, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.functional.normalize", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 135, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 135, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 136, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 136, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomApply", "line_number": 137, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 137, "usage_type": "name"}, {"api_name": "torchvision.transforms.ColorJitter", "line_number": 138, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 138, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomGrayscale", "line_number": 141, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 141, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 143, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 143, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 144, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 144, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 145, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 145, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 149, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 149, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 150, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 150, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 151, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 151, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 157, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 157, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 158, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 158, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomCrop", "line_number": 159, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 159, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 160, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 160, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 165, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 165, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 166, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 166, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 166, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 166, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 173, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 173, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 174, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 174, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 174, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 174, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 181, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 181, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 183, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 183, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 183, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 183, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 193, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 193, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 194, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 194, "usage_type": "name"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 194, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 213, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 254, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 254, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 255, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 307, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 323, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 332, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 334, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 412, "usage_type": "call"}]} +{"seq_id": "36164291346", "text": "import json\r\n\r\nfrom kubernetes import client, config\r\nfrom kubernetes.client import V1PodList, V1Pod, V1ObjectMeta, V1Job, \\\r\n V1OwnerReference, V1ConfigMap\r\n\r\nconfig.load_kube_config(config_file='config')\r\nconfiguration = client.Configuration()\r\n\r\ncore_v1_api = client.CoreV1Api(client.ApiClient(configuration))\r\nbatch_v1_api = client.BatchV1Api(client.ApiClient(configuration))\r\n\r\nmount_path = \"/etc/build/data/\"\r\n\r\n\r\ndef create_job(name, configmap_name, container_name, container_image,\r\n container_command, namespace=\"default\", env_vars={}):\r\n \"\"\"\r\n Create a k8 Job Object\r\n Args:\r\n name:\r\n configmap_name:\r\n container_name:\r\n container_image:\r\n container_command:list类型,执行程序的命令,例如:['python','/home/test.py']\r\n namespace:\r\n env_vars: 环境变量\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n try:\r\n # Body是对象体\r\n body = client.V1Job(api_version=\"batch/v1\", kind=\"Job\")\r\n\r\n # 对象需要 Metadata,每个JOB必须有一个不同的名称!\r\n body.metadata = client.V1ObjectMeta(namespace=namespace, name=name)\r\n\r\n # 添加 Status\r\n body.status = client.V1JobStatus()\r\n\r\n # 开始 Template...\r\n template = client.V1PodTemplate()\r\n template.template = client.V1PodTemplateSpec()\r\n\r\n # 在Env中传递Arguments:\r\n env_list = []\r\n for env_name, env_value in env_vars.items():\r\n env_list.append(client.V1EnvVar(name=env_name, value=env_value))\r\n\r\n container = client.V1Container(name=container_name,\r\n image=container_image, env=env_list)\r\n\r\n container.command = container_command\r\n container.image_pull_policy = \"IfNotPresent\"\r\n volume_mount = client.V1VolumeMount(name=\"config-volume\",\r\n mount_path=mount_path)\r\n container.volume_mounts = [volume_mount]\r\n\r\n config_map = client.V1ConfigMapVolumeSource(name=configmap_name)\r\n\r\n volumes = client.V1Volume(name=\"config-volume\", config_map=config_map)\r\n\r\n template.template.spec = client.V1PodSpec(containers=[container],\r\n restart_policy='Never',\r\n volumes=[volumes],\r\n node_selector={'gpu': 'true'})\r\n # volumes = [volumes])\r\n\r\n # 最后,创建V1JobSpec\r\n body.spec = client.V1JobSpec(ttl_seconds_after_finished=600,\r\n template=template.template)\r\n response = batch_v1_api.create_namespaced_job(namespace, body,\r\n pretty=True)\r\n return True, response\r\n except Exception as ex:\r\n print(ex)\r\n return False, \"k8 Job Object creates Failed!\"\r\n\r\n\r\ndef create_configmap(config_map):\r\n \"\"\"\r\n Create a k8 ConfigMap Object\r\n Args:\r\n config_map: json类型\r\n\r\n Returns:\r\n\r\n \"\"\"\r\n try:\r\n namespace = config_map['namespace']\r\n name = config_map['name']\r\n data = config_map['data']\r\n\r\n metadata = client.V1ObjectMeta(name=name)\r\n body = client.V1ConfigMap(data=data, metadata=metadata)\r\n\r\n if body:\r\n core_v1_api.create_namespaced_config_map(namespace=namespace,\r\n body=body, pretty=True)\r\n\r\n return True\r\n except Exception as ex:\r\n print(ex)\r\n return False\r\n\r\n\r\ndef delete_job_by_name(job_name, namespace):\r\n try:\r\n if search_job_by_name(job_name, namespace):\r\n batch_v1_api.delete_namespaced_job(name=job_name,\r\n namespace=namespace)\r\n return True\r\n return False\r\n\r\n except Exception as ex:\r\n print(ex)\r\n return False\r\n\r\n\r\ndef search_job_by_name(job_name, namespace):\r\n jobs = batch_v1_api.list_namespaced_job(namespace=namespace)\r\n for job in jobs.items:\r\n if job_name == job.metadata.name:\r\n return True\r\n\r\n return False\r\n\r\n\r\n# 先删除pod再删除job\r\ndef delete_pod_by_job(job_name, namespace):\r\n try:\r\n # 获取job的uid\r\n job_uid = batch_v1_api.read_namespaced_job(name=job_name,\r\n namespace=namespace).metadata.uid\r\n print(job_uid)\r\n # 获取所有的pod\r\n pods = core_v1_api.list_namespaced_pod(namespace=namespace)\r\n for pod in pods.items:\r\n # 得到pod的owner list\r\n owner_list = pod.metadata.owner_references\r\n if owner_list:\r\n uid_list = [owner.uid for owner in owner_list]\r\n if job_uid in uid_list:\r\n core_v1_api.delete_namespaced_pod(name=pod.metadata.name,\r\n namespace=namespace)\r\n except Exception as ex:\r\n print(ex)\r\n return False\r\n\r\n\r\ndef delete_config_map_by_name(config_name, namespace):\r\n try:\r\n config_maps = core_v1_api.list_namespaced_config_map(\r\n namespace=namespace)\r\n\r\n for config_map in config_maps.items:\r\n if config_name == config_map.metadata.name:\r\n core_v1_api.delete_namespaced_config_map(name=config_name,\r\n namespace=namespace)\r\n except Exception as ex:\r\n print(ex)\r\n return False\r\n\r\n\r\nif __name__ == '__main__':\r\n import requests\r\n requests.get()\r\n # data = {'path.json': \"{'test':1}\"}\r\n # config_map = {\"namespace\": \"default\", \"name\": \"config-map-python-zyh\",\r\n # \"data\": data}\r\n # if create_configmap(config_map):\r\n # create_job(name=\"kube-job-zyh\", configmap_name=\"config-map-python-zyh\",\r\n # container_name=\"kube-job-zyh\",\r\n # container_image=\"job-test:v0.1\",\r\n # container_command=['python3', '/usr/src/app/test.py'],\r\n # namespace=\"default\", env_vars={} )\r\n # print(delete_job_by_name('cronjob-sample-1571714760', 'default'))\r\n # delete_pod_by_job('hello-crd-1571822280', 'default')\r\n # search_job_by_name(job_name=None, namespace='default')\r\n delete_config_map_by_name(config_name='config-map-5d905d54bf8922c856561428-2019-09-08', namespace='illegal-building-inspection')\r\n", "repo_name": "ThinkBlue1991/tools", "sub_path": "k8s/create_job.py", "file_name": "create_job.py", "file_ext": "py", "file_size_in_byte": 6444, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "kubernetes.config.load_kube_config", "line_number": 7, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 7, "usage_type": "name"}, {"api_name": "kubernetes.client.Configuration", "line_number": 8, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 8, "usage_type": "name"}, {"api_name": "kubernetes.client.CoreV1Api", "line_number": 10, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 10, "usage_type": "name"}, {"api_name": "kubernetes.client.ApiClient", "line_number": 10, "usage_type": "call"}, {"api_name": "kubernetes.client.BatchV1Api", "line_number": 11, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 11, "usage_type": "name"}, {"api_name": "kubernetes.client.ApiClient", "line_number": 11, "usage_type": "call"}, {"api_name": "kubernetes.client.V1Job", "line_number": 34, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 34, "usage_type": "name"}, {"api_name": "kubernetes.client.V1ObjectMeta", "line_number": 37, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 37, "usage_type": "name"}, {"api_name": "kubernetes.client.V1JobStatus", "line_number": 40, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 40, "usage_type": "name"}, {"api_name": "kubernetes.client.V1PodTemplate", "line_number": 43, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 43, "usage_type": "name"}, {"api_name": "kubernetes.client.V1PodTemplateSpec", "line_number": 44, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 44, "usage_type": "name"}, {"api_name": "kubernetes.client.V1EnvVar", "line_number": 49, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 49, "usage_type": "name"}, {"api_name": "kubernetes.client.V1Container", "line_number": 51, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 51, "usage_type": "name"}, {"api_name": "kubernetes.client.V1VolumeMount", "line_number": 56, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 56, "usage_type": "name"}, {"api_name": "kubernetes.client.V1ConfigMapVolumeSource", "line_number": 60, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 60, "usage_type": "name"}, {"api_name": "kubernetes.client.V1Volume", "line_number": 62, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 62, "usage_type": "name"}, {"api_name": "kubernetes.client.V1PodSpec", "line_number": 64, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 64, "usage_type": "name"}, {"api_name": "kubernetes.client.V1JobSpec", "line_number": 71, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 71, "usage_type": "name"}, {"api_name": "kubernetes.client.V1ObjectMeta", "line_number": 95, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 95, "usage_type": "name"}, {"api_name": "kubernetes.client.V1ConfigMap", "line_number": 96, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 96, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "998018190", "text": "from core.models import Aluno, AlunoSchema, Participante\nimport numpy as np\nfrom numpy.linalg import norm\nfrom flask import abort\nimport io\nfrom PIL import Image\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nimport torch\nimport torchvision.transforms as transforms\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom flask_jwt_extended import get_jwt_identity, verify_jwt_in_request\nfrom functools import wraps\n\ndef token_required():\n def wrapper(fn):\n @wraps(fn)\n def decorator(*args, **kwargs):\n verify_jwt_in_request()\n user_id = get_jwt_identity()\n return fn(*args, **kwargs)\n return decorator\n return wrapper\n\n\nif torch.cuda.is_available():\n current_device = torch.device('cuda')\nelse:\n current_device = torch.device('cpu')\n\n\nface_detector = MTCNN(keep_all=True, device=current_device)\nfeature_extractor = InceptionResnetV1(pretrained='vggface2').eval()\n\n\ndef timestamp_to_datetime_object(timestamp):\n return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')\n\n\ndef find_faces(img_bytes):\n \n face_list = []\n \n img = Image.open(io.BytesIO(img_bytes))\n\n # Detect faces\n boxes, _ = face_detector.detect(img)\n \n # Check if any face was found\n found_faces = not (str(type(boxes)) == \"\")\n\n if found_faces:\n for box in boxes:\n cropped_face = img.crop(box.tolist()).resize((160,160), resample=Image.BICUBIC)\n face_list.append(cropped_face)\n\n return face_list\n\n\ndef from_img_dir_to_bytes(directory):\n img = Image.open(directory)\n imgByteArr = io.BytesIO()\n img.save(imgByteArr, format='JPEG')\n return imgByteArr.getvalue()\n\n\ndef from_array_to_bytes(array):\n imgByteArr = io.BytesIO()\n np.save(imgByteArr, array)\n imgByteArr.seek(0)\n return imgByteArr.read()\n\n\ndef resize_img_bytes(img_bytes):\n imgByteArr = io.BytesIO()\n img = Image.open(io.BytesIO(img_bytes)).resize((512, 256), resample=Image.BICUBIC)\n img.save(imgByteArr, format='JPEG')\n return imgByteArr.getvalue()\n\n\ndef obter_threshold(lista_de_faces_da_turma):\n \n similaridade_maxima = 0\n \n for face1 in lista_de_faces_da_turma:\n for face2 in lista_de_faces_da_turma:\n similaridade = cos_sim(face1[0], face2[0])\n if similaridade > similaridade_maxima and similaridade != 1.0:\n similaridade_maxima = similaridade\n \n return similaridade_maxima\n\n\ndef obter_presenca(matriculas, embedding_participantes, embeddings_do_dia, threshold=0.49):\n \n embeddings_restantes = deepcopy(embedding_participantes)\n matriculas_restantes = deepcopy(matriculas)\n status_presenca = {aluno: False for aluno in matriculas}\n\n for index, pessoa_na_aula in enumerate(embeddings_do_dia):\n \n similaridades = []\n \n for feature_aluno, matricula_aluno in zip(embeddings_restantes, matriculas_restantes):\n similaridades.append(cos_sim(feature_aluno, pessoa_na_aula))\n #print(f'Similaridade da pessoa {index+1} com {matricula_aluno} foi de: {cos_sim(feature_aluno, pessoa_na_aula)}')\n \n #print('\\n')\n\n maxima_similaridade = max(similaridades)\n index_aluno_mais_parecido = similaridades.index(maxima_similaridade)\n \n if maxima_similaridade > threshold and maxima_similaridade != 1.0:\n # Muda o status do aluno para presente\n status_presenca[matriculas_restantes[index_aluno_mais_parecido]] = True\n #print(f'Aluno {matriculas_restantes[index_aluno_mais_parecido]} esta presente')\n \n # Remove esse aluno da lista de participantes aptos a serem reconhecidos\n del matriculas_restantes[index_aluno_mais_parecido]\n del embeddings_restantes[index_aluno_mais_parecido]\n \n if len(matriculas_restantes) == 0: # Se todas as matriculas já estiverem sido analisadas, break\n break\n\n return status_presenca\n\n\ndef get_face_features(face_list):\n \n face_embeddings = []\n\n for face in face_list:\n face_as_tensor = transforms.ToTensor()(face).unsqueeze(0)\n face_embeddings.append(feature_extractor(face_as_tensor).squeeze(0).detach().numpy())\n\n return face_embeddings\n\n\ndef process_faces(img_bytes):\n faces_found = find_faces(img_bytes)\n features = get_face_features(faces_found)\n return features\n\n\ndef cos_sim(a,b): \n return np.dot(a, b)/(norm(a)*norm(b))\n\n\ndef obter_aluno_pela_matricula(lista_matricula):\n lista_de_alunos = [Aluno.query.filter_by(matricula=matricula).all()[0] for matricula in lista_matricula]\n return AlunoSchema(many=True, only=('nome', 'matricula', 'curso')).dump(lista_de_alunos)\n\n\ndef checar_presenca_da_turma(turma_codigo, img_turma):\n \n face_embeddings_do_dia = np.array(process_faces(img_turma))\n\n if len(face_embeddings_do_dia) < 1:\n abort(400, 'Não foram detectadas faces na imagem enviada.')\n\n # Obtenção da matricula dos participantes\n alunos_participantes = Participante.query.filter_by(turma_codigo=turma_codigo)\n matricula_participantes = [aluno.matricula for aluno in alunos_participantes.all()]\n\n # Obtencao dos embeddings dos participantes\n embedding_participantes = [Aluno.query.filter_by(matricula=matricula).all()[0].embedding for matricula in matricula_participantes]\n embedding_participantes = [np.load(io.BytesIO(data)) for data in embedding_participantes]\n \n # Similaridade tem que ser acima de 49%\n return obter_presenca(matricula_participantes, embedding_participantes, face_embeddings_do_dia, threshold=0.49)\n\ndef clear_parser(dictionary):\n '''\n Limpa o dicionário de chaves com valores 'None'\n '''\n return {k: v for k, v in dictionary.items() if v is not None}\n\n", "repo_name": "angelomenezes/face-attendance-api", "sub_path": "core/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5791, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask_jwt_extended.verify_jwt_in_request", "line_number": 19, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 29, "usage_type": "call"}, {"api_name": "facenet_pytorch.MTCNN", "line_number": 32, "usage_type": "call"}, {"api_name": "facenet_pytorch.InceptionResnetV1", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 44, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 44, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 54, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 54, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 61, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 62, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 69, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 75, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 76, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 76, "usage_type": "call"}, {"api_name": "PIL.Image.BICUBIC", "line_number": 76, "usage_type": "attribute"}, {"api_name": "copy.deepcopy", "line_number": 96, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 97, "usage_type": "call"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 133, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.dot", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 146, "usage_type": "call"}, {"api_name": "core.models.Aluno.query.filter_by", "line_number": 150, "usage_type": "call"}, {"api_name": "core.models.Aluno.query", "line_number": 150, "usage_type": "attribute"}, {"api_name": "core.models.Aluno", "line_number": 150, "usage_type": "name"}, {"api_name": "core.models.AlunoSchema", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 159, "usage_type": "call"}, {"api_name": "core.models.Participante.query.filter_by", "line_number": 162, "usage_type": "call"}, {"api_name": "core.models.Participante.query", "line_number": 162, "usage_type": "attribute"}, {"api_name": "core.models.Participante", "line_number": 162, "usage_type": "name"}, {"api_name": "core.models.Aluno.query.filter_by", "line_number": 166, "usage_type": "call"}, {"api_name": "core.models.Aluno.query", "line_number": 166, "usage_type": "attribute"}, {"api_name": "core.models.Aluno", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 167, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "21435346989", "text": "import pandas as pd\nimport argparse\n\nif __name__ == '__main__':\n \n # Accept Input\n parser = argparse.ArgumentParser(description='Combine ratings and titles into a single CSV file through joining MovieID.')\n parser.add_argument('-ratings', required=True, type=str, help='movieRatings file')\n parser.add_argument('-titles', required=True, type=str, help='movieTitles file')\n parser.add_argument('-output', required=True, type=str, help='Output CSV file')\n args = parser.parse_args()\n\n # Call function\n ratings_df = pd.read_csv(args.ratings)[['UserID', 'Rating', 'RatingDate', 'MovieID']]\n titles_df = pd.read_csv(args.titles)[['MovieID', 'ReleaseYear', 'MovieTitle']]\n\n # Merge dataframe\n movies_df = pd.merge(ratings_df, titles_df, how=\"left\", on=\"MovieID\")\n\n # Remove duplicates\n movies_df.drop_duplicates(inplace=True)\n\n # Data slicing\n f = ['count','mean']\n df_movie_summary = movies_df.groupby('MovieID')['Rating'].agg(f)\n df_movie_summary.index = df_movie_summary.index.map(int)\n movie_benchmark = round(df_movie_summary['count'].quantile(0.7),0)\n drop_movie_list = df_movie_summary[df_movie_summary['count'] < movie_benchmark].index\n\n df_cust_summary = movies_df.groupby('UserID')['Rating'].agg(f)\n df_cust_summary.index = df_cust_summary.index.map(int)\n cust_benchmark = round(df_cust_summary['count'].quantile(0.7),0)\n drop_cust_list = df_cust_summary[df_cust_summary['count'] < cust_benchmark].index\n \n movies_df = movies_df[~movies_df['MovieID'].isin(drop_movie_list)]\n movies_df = movies_df[~movies_df['UserID'].isin(drop_movie_list)]\n\n # Save output\n movies_df = movies_df[['MovieTitle','UserID','Rating']]\n movies_df.to_csv(args.output, index=False, header=False)\n", "repo_name": "Grg0rry/MapReduce-Recommendation-System", "sub_path": "preprocessing/CombineMovieTitles.py", "file_name": "CombineMovieTitles.py", "file_ext": "py", "file_size_in_byte": 1766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "14322943945", "text": "import logging\nfrom typing import Dict, List, Union\n\nimport requests\nfrom django.conf import settings as django_settings\nfrom requests.exceptions import RequestException, HTTPError, URLRequired\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Auth0ClientException(Exception):\n ...\n\n\nclass Auth0Client:\n\n audience: str\n\n base_url: str\n\n client_id: str\n\n client_secret: str\n\n domain: str\n\n def __init__(self):\n self.domain = django_settings.SOCIAL_AUTH_AUTH0_DOMAIN\n self.audience = f'https://{self.domain}/api/v2/'\n self.client_id = django_settings.SOCIAL_AUTH_AUTH0_KEY\n self.client_secret = django_settings.SOCIAL_AUTH_AUTH0_SECRET\n self.base_url = f\"https://{self.domain}\"\n\n def request(self, method: str, path: str, **kwargs) -> Union[Dict, List]:\n access_token = self.get_access_token()\n headers = {\n 'Authorization': f'Bearer {access_token}',\n 'Content-Type': 'application/json'\n }\n\n try:\n response = requests.request(method, f'{self.base_url}{path}', headers=headers, **kwargs)\n return response.json()\n except (HTTPError, URLRequired, RequestException) as exc:\n raise Auth0ClientException(str(exc))\n\n def get_access_token(self) -> str:\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'audience': self.audience\n }\n response = requests.post(f'{self.base_url}/oauth/token', data=payload)\n oauth = response.json()\n return oauth.get('access_token')\n\n\nauth0_client = Auth0Client()\n", "repo_name": "bboogaard/auth0_sso", "sub_path": "auth0_sso/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.settings.SOCIAL_AUTH_AUTH0_DOMAIN", "line_number": 29, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "django.conf.settings.SOCIAL_AUTH_AUTH0_KEY", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "django.conf.settings.SOCIAL_AUTH_AUTH0_SECRET", "line_number": 32, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 32, "usage_type": "name"}, {"api_name": "requests.request", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.exceptions.HTTPError", "line_number": 45, "usage_type": "name"}, {"api_name": "requests.exceptions.URLRequired", "line_number": 45, "usage_type": "name"}, {"api_name": "requests.exceptions.RequestException", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "7764051356", "text": "import pandas as pd\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import KFold\n\n\nimport tensorflow as tf\nfrom tensorflow.keras import Model, Input, backend as K\nfrom tensorflow.keras.initializers import Constant\nfrom tensorflow.keras.layers import Dense, Embedding, Bidirectional, LSTM, Dropout\nfrom tensorflow.keras.layers.experimental.preprocessing import TextVectorization\nfrom tensorflow.keras.metrics import RootMeanSquaredError\nfrom tensorflow.keras.callbacks import EarlyStopping, LearningRateScheduler\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.optimizers import Adam\nfrom transformers import TFBertModel, BertConfig, BertTokenizerFast\n\n\nmodel_name = 'bert_v13'\n\ndata_dir = Path('/commonlitreadabilityprize')\ntrain_file = data_dir / 'train.csv'\ntest_file = data_dir / 'test.csv'\nsample_file = data_dir / 'sample_submission.csv'\n\nbuild_dir = Path('./build/')\noutput_dir = build_dir / model_name\ntrn_encoded_file = output_dir / 'trn.enc.joblib'\nval_predict_file = output_dir / f'{model_name}.val.txt'\nsubmission_file = 'submission.csv'\n\npretrained_dir = '/tfbert-large-uncased'\n\nid_col = 'id'\ntarget_col = 'target'\ntext_col = 'excerpt'\n\nmax_len = 205\nn_fold = 5\nn_est = 9\nn_stop = 2\nbatch_size = 8\nseed = 42\n\n\ntrn = pd.read_csv(train_file, index_col=id_col)\ntst = pd.read_csv(test_file, index_col=id_col)\ny = trn[target_col].values\nprint(trn.shape, y.shape, tst.shape)\ntrn.head()\n\n\n\ndef load_tokenizer():\n if not os.path.exists(pretrained_dir + '/vocab.txt'):\n Path(pretrained_dir).mkdir(parents=True, exist_ok=True)\n tokenizer = BertTokenizerFast.from_pretrained(\"bert-large-uncased\")\n tokenizer.save_pretrained(pretrained_dir)\n else:\n print('loading the saved pretrained tokenizer')\n tokenizer = BertTokenizerFast.from_pretrained(pretrained_dir)\n \n model_config = BertConfig.from_pretrained(pretrained_dir)\n model_config.output_hidden_states = True\n return tokenizer, model_config\n\ndef load_bert(config):\n if not os.path.exists(pretrained_dir + '/tf_model.h5'):\n Path(pretrained_dir).mkdir(parents=True, exist_ok=True)\n bert_model = TFBertModel.from_pretrained(\"bert-large-uncased\", config=config)\n bert_model.save_pretrained(pretrained_dir)\n else:\n print('loading the saved pretrained model')\n bert_model = TFBertModel.from_pretrained(pretrained_dir, config=config)\n return bert_model\n\n\n\ndef bert_encode(texts, tokenizer, max_len=max_len):\n input_ids = []\n token_type_ids = []\n attention_mask = []\n \n for text in texts:\n token = tokenizer(text, max_length=max_len, truncation=True, padding='max_length',\n add_special_tokens=True)\n input_ids.append(token['input_ids'])\n token_type_ids.append(token['token_type_ids'])\n attention_mask.append(token['attention_mask'])\n \n return np.array(input_ids), np.array(token_type_ids), np.array(attention_mask)\n\n\n\ntokenizer, bert_config = load_tokenizer()\n\nX = bert_encode(trn[text_col].values, tokenizer, max_len=max_len)\nX_tst = bert_encode(tst[text_col].values, tokenizer, max_len=max_len)\ny = trn[target_col].values\nprint(X[0].shape, X_tst[0].shape, y.shape)\n\n\n\ndef build_model(bert_model, max_len=max_len): \n input_ids = Input(shape=(max_len,), dtype=tf.int32, name=\"input_ids\")\n token_type_ids = Input(shape=(max_len,), dtype=tf.int32, name=\"token_type_ids\")\n attention_mask = Input(shape=(max_len,), dtype=tf.int32, name=\"attention_mask\")\n\n sequence_output = bert_model(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)[0]\n clf_output = sequence_output[:, 0, :]\n clf_output = Dropout(.1)(clf_output)\n out = Dense(1, activation='linear')(clf_output)\n \n model = Model(inputs=[input_ids, token_type_ids, attention_mask], outputs=out)\n model.compile(Adam(lr=1e-5), loss='mean_squared_error', metrics=[RootMeanSquaredError()])\n \n return model\n\n\n\ndef scheduler(epoch, lr, warmup=5, decay_start=10):\n if epoch <= warmup:\n return lr / (warmup - epoch + 1)\n elif warmup < epoch <= decay_start:\n return lr\n else:\n return lr * tf.math.exp(-.1)\n\nls = LearningRateScheduler(scheduler)\nes = EarlyStopping(patience=n_stop, restore_best_weights=True)\n\ncv = KFold(n_splits=n_fold, shuffle=True, random_state=seed)\n\np = np.zeros_like(y, dtype=float)\np_tst = np.zeros((X_tst[0].shape[0], ), dtype=float)\nfor i, (i_trn, i_val) in enumerate(cv.split(X[0]), 1):\n print(f'training CV #{i}:')\n tf.random.set_seed(seed + i)\n\n bert_model = load_bert(bert_config)\n clf = build_model(bert_model, max_len=max_len)\n if i == 1:\n print(clf.summary())\n history = clf.fit([x[i_trn] for x in X], y[i_trn],\n validation_data=([x[i_val] for x in X], y[i_val]),\n epochs=n_est,\n batch_size=batch_size,\n callbacks=[ls])\n clf.save_weights(f'{model_name}_cv{i}.h5')\n\n p[i_val] = clf.predict([x[i_val] for x in X]).flatten()\n p_tst += clf.predict(X_tst).flatten() / n_fold\n \n K.clear_session()\n del clf, bert_model\n gc.collect()\n\n\n\nprint(f'CV RMSE: {mean_squared_error(y, p, squared=False):.6f}')\nnp.savetxt(val_predict_file, p, fmt='%.6f')\n\n\n\nsub = pd.read_csv(sample_file, index_col=id_col)\nsub[target_col] = p_tst\nsub.to_csv(submission_file)\nsub.head()\n", "repo_name": "Bhargav6031/readability_score", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5418, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 46, "usage_type": "call"}, {"api_name": "transformers.BertTokenizerFast.from_pretrained", "line_number": 56, "usage_type": "call"}, {"api_name": "transformers.BertTokenizerFast", "line_number": 56, "usage_type": "name"}, {"api_name": "transformers.BertTokenizerFast.from_pretrained", "line_number": 60, "usage_type": "call"}, {"api_name": "transformers.BertTokenizerFast", "line_number": 60, "usage_type": "name"}, {"api_name": "transformers.BertConfig.from_pretrained", "line_number": 62, "usage_type": "call"}, {"api_name": "transformers.BertConfig", "line_number": 62, "usage_type": "name"}, {"api_name": "transformers.TFBertModel.from_pretrained", "line_number": 69, "usage_type": "call"}, {"api_name": "transformers.TFBertModel", "line_number": 69, "usage_type": "name"}, {"api_name": "transformers.TFBertModel.from_pretrained", "line_number": 73, "usage_type": "call"}, {"api_name": "transformers.TFBertModel", "line_number": 73, "usage_type": "name"}, {"api_name": "tensorflow.keras.Input", "line_number": 104, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 104, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Input", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Input", "line_number": 106, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 111, "usage_type": "call"}, {"api_name": "tensorflow.keras.Model", "line_number": 113, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.keras.metrics.RootMeanSquaredError", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.math.exp", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.math", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.callbacks.LearningRateScheduler", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.EarlyStopping", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 131, "usage_type": "call"}, {"api_name": "tensorflow.random.set_seed", "line_number": 137, "usage_type": "call"}, {"api_name": "tensorflow.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.backend.clear_session", "line_number": 153, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend", "line_number": 153, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 159, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 164, "usage_type": "call"}]} +{"seq_id": "755541590", "text": "import os\nimport sys\nimport argparse\nfrom utils_ner import read_from_path, \\\n iob2, \\\n iob_iobes, \\\n iobes_iob, \\\n update_tag_scheme\n\n\nparser = argparse.ArgumentParser(description='Convert conll iob1 dataset to iob2 dataset.')\nparser.add_argument(\"--files\", \n nargs='*',\n default=[\"./ner_data/en/eng.train\", \"./ner_data/en/eng.testa\", \"./ner_data/en/eng.testb\"], \n help=\"Address of the files. Value-type: list(str)\")\nparser.add_argument(\"--encoding\", \n default=\"utf-8\", \n type=str, \n help=\"The encoding method that will be used to read the texts. Value-type: (str)\")\nparser.add_argument(\"--lang_dict_address\", \n default=\"./lang_dict.txt\", \n type=str, \n help=\"Exclude the seed value from the experiment. Value-type: (str)\")\nparser.add_argument(\"--rename\", \n action='store_true',\n help=\"Rename the language two char short form with standard two char short form. Value-type: (bool)\")\nparams = parser.parse_args()\n\n\ndef get_lang_dict(lang_dict_address):\n _dict = {}\n with open(lang_dict_address, \"r\") as filePtr:\n for line in filePtr:\n lang = line.strip().split()\n assert len(lang) == 2\n _dict[lang[0]] = lang[1] \n return _dict\n\n\n# lang_dict = get_lang_dict(params.lang_dict_address)\ndatasets = params.files\nfor _file in datasets:\n sentences = read_from_path(_file, params.encoding)\n update_tag_scheme(sentences, 'iob')\n \n # prepare the new file name.\n new_file = _file+\".iob2\"\n if params.rename:\n for k, v in lang_dict.items():\n if k in new_file:\n new_file = new_file.replace(k, v)\n\n flag = 0\n with open(new_file, \"w\", encoding=params.encoding) as filePtr:\n for words in sentences:\n if flag:\n filePtr.write(\"\\n\")\n for word in words:\n filePtr.write(word[0]+\" \"+word[-1]+\"\\n\")\n flag = 1", "repo_name": "sbmaruf/UXLA", "sub_path": "convert_iob1_to_iob2.py", "file_name": "convert_iob1_to_iob2.py", "file_ext": "py", "file_size_in_byte": 2197, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "utils_ner.read_from_path", "line_number": 43, "usage_type": "call"}, {"api_name": "utils_ner.update_tag_scheme", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "6803484814", "text": "from django.urls import path\r\n\r\nfrom Arts_App.club_views import IndexView, Add_Events, Add_Album, View_events, View_Participant, View_Scheduledtime\r\n\r\nurlpatterns = [\r\n\r\n path('',IndexView.as_view()),\r\n path('Add_Events', Add_Events.as_view()),\r\n path('Add_Album',Add_Album.as_view()),\r\n path('View_events',View_events.as_view()),\r\n path('participant',View_Participant.as_view()),\r\n path('setime',View_Scheduledtime.as_view())\r\n\r\n]\r\ndef urls():\r\n return urlpatterns, 'club','club'", "repo_name": "Sreelakshmi30/Artifestives", "sub_path": "club_urls.py", "file_name": "club_urls.py", "file_ext": "py", "file_size_in_byte": 501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "Arts_App.club_views.IndexView.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "Arts_App.club_views.IndexView", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "Arts_App.club_views.Add_Events.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "Arts_App.club_views.Add_Events", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "Arts_App.club_views.Add_Album.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "Arts_App.club_views.Add_Album", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_events.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_events", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_Participant.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_Participant", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_Scheduledtime.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "Arts_App.club_views.View_Scheduledtime", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "40687728102", "text": "import json\nimport datetime\nimport urllib\nimport urllib.request\nimport urllib.error\nimport requests\nimport pandas as pd\nfrom config import betfair_configuration as configuration\nfrom pandas.io.json._normalize import nested_to_record \nfrom datetime import datetime\n\n#I now only have the ssoid being generated once in it init func and the variable created is now being called where the func once was\n#This should fix the request errors \n\nclass betfair_api:\n\n bet_url : str = configuration['api']['bet_url']\n password : str = configuration['auth']['password']\n username : str = configuration['auth']['username']\n app_key : str = configuration['auth']['app_key']\n \n\n def __init__(self):\n self.ssoid = self.generate_ssoid()\n \n def generate_ssoid(self) -> str :\n headers : dict = {'X-Application': self.app_key, 'Content-Type': 'application/x-www-form-urlencoded'}\n payload : str = f'username={self.username}&password={self.password}'\n resp = requests.post('https://identitysso-cert.betfair.com/api/certlogin',data=payload,cert=('betfair.crt','betfair.pem'),headers=headers)\n json_resp : dict = resp.json()\n try:\n SSOID : str = json_resp['sessionToken']\n except KeyError:\n raise Exception('Data limit hit')\n return SSOID\n \n \n def event_req(self):\n \"\"\"use this to concat the event_req str vars are:\n the method bit and params. Params can be filters, max results, market projection. Filters can be\n event ids, market types, market times etc etc etc\"\"\"\n pass\n\n def callApi(self,event_req) -> dict :\n headers : dict = {'X-Application': self.app_key, 'X-Authentication': self.ssoid, 'content-type': 'application/json'}\n try:\n req = requests.post(self.bet_url, data=event_req.encode('utf-8'), headers=headers) \n response : dict = req.json()\n return response['result']\n except Exception as error:\n print(f'Error occured: {error}')\n return response['error']\n \n def event_type_id_mapping(self):\n \"\"\" used to populate EventType_ID_Mapping table\"\"\"\n event_req : str = '{\"jsonrpc\": \"2.0\", \"method\": \"SportsAPING/v1.0/listEventTypes\", \"params\": {\"filter\":{ }}, \"id\": 1}'\n data : dict = self.callApi(event_req)\n \n mapping : list = [element['eventType'] for element in data]\n \n return pd.DataFrame(mapping)\n \n def events(self,event_type_ids,time_from=0,time_to=0) -> pd.DataFrame :\n '''maybe let it take event type name as arg and sort mapping in func\n this defaults to inplay but if you give datetime args for time_from and time_to it will give \n events in that timeframe'''\n \n inplay='true'\n event_type_ids : str = str(event_type_ids).replace(\"'\",'\"')\n \n if time_from !=0:\n time_from : str = time_from.strftime('%Y-%m-%dT%H:%M:%SZ')\n time_to : str = time_to.strftime('%Y-%m-%dT%H:%M:%SZ')\n \n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listEvents\",\n \"params\": {\n \"filter\": {\n \"eventTypeIds\": '''+event_type_ids+''',\n \"marketStartTime\": {\n \"from\": \"'''+time_from+'''\",\n \"to\": \"'''+time_to+'''\"\n }\n }\n },\n \"id\": 1\n }'''\n else:\n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listEvents\",\n \"params\": {\n \"filter\": {\n \"eventTypeIds\": '''+event_type_ids+''',\n \"inPlayOnly\":'''+inplay+'''\n \n }\n }\n },\n \"id\": 1\n }'''\n \n return pd.DataFrame(nested_to_record(self.callApi(req), sep='_'))\n \n \n def market_info(self,event_ids) -> dict :\n '''this can be filtered in the json for specific market types or you could do this yourelf'''\n \n event_ids : str = str(event_ids).replace(\"'\",'\"')\n \n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listMarketCatalogue\",\n \"params\": {\n \"filter\": {\n \"eventIds\": '''+event_ids+'''\n },\n \"maxResults\": \"200\",\n \"marketProjection\": [\n \"COMPETITION\",\n \"EVENT\",\n \"EVENT_TYPE\",\n \"RUNNER_DESCRIPTION\",\n \"RUNNER_METADATA\",\n \"MARKET_START_TIME\"\n ]\n },\n \"id\": 1\n }'''\n \n return self.callApi(req)\n \n def market_types(self,event_ids) -> pd.DataFrame :\n \n event_ids : str = str(event_ids).replace(\"'\",'\"')\n \n req : str = '''{\"jsonrpc\": \"2.0\", \n \"method\": \"SportsAPING/v1.0/listMarketTypes\", \n \"params\": {\n \"filter\":{\n \"eventIds\":'''+event_ids+'''}},\n \"id\": 1\n }'''\n \n return pd.DataFrame(nested_to_record(self.callApi(req), sep='_'))\n \n \n def competitions(self,event_type_ids) -> dict :\n '''this can be filtered in the json for specific market types or you could do this yourelf'''\n \n event_type_ids : str = str(event_type_ids).replace(\"'\",'\"')\n \n req : str = '''{\n \"params\": {\n \"filter\": {\n \"eventTypeIds\": '''+event_type_ids+'''\n }\n },\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listCompetitions\",\n \"id\": 1\n }'''\n \n return self.callApi(req)\n \n def price_data(self,market_ids) -> dict :\n \n market_ids : str = str(market_ids).replace(\"'\",'\"')\n \n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listMarketBook\",\n \"params\": {\n \"marketIds\": '''+market_ids+''',\n \"priceProjection\": {\n \"priceData\": [\"EX_BEST_OFFERS\", \"EX_TRADED\"],\n \"virtualise\": \"true\"\n }\n },\n \"id\": 1\n }'''\n return self.callApi(req)\n \n def place_order(self,market_id,selection_id,side,size,price = 'Last',handicap = 0) -> str :\n \n market_id : str = str(market_id)\n selection_id : str = str(selection_id)\n handicap : str = str(handicap)\n side : str = str(side)\n if side not in (\"BACK\",\"LAY\"):\n print('Invalid side, BACK/LAY')\n size : str = str(size)\n price : str = str(price)\n \n if price == 'Last': \n market_prices = self.price_data([market_id])[0]\n price = str([runner['ex'][f'availableTo{side.lower().capitalize()}'][0]['price'] \\\n for runner in market_prices['runners'] if str(runner['selectionId']) == selection_id][0])\n \n \n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/placeOrders\",\n \"params\": {\n \"marketId\": \"'''+market_id+'''\",\n \"instructions\": [\n {\n \"selectionId\": \"'''+selection_id+'''\",\n \"handicap\": \"'''+handicap+'''\",\n \"side\": \"'''+side+'''\",\n \"orderType\": \"LIMIT\",\n \"limitOrder\": {\n \"size\": \"'''+size+'''\",\n \"price\": \"'''+price+'''\",\n \"persistenceType\": \"LAPSE\"\n }\n }\n ]\n },\n \"id\": 1\n }'''\n \n return self.callApi(req)\n \n def current_orders(self):\n \n event_req : str = '{\"jsonrpc\": \"2.0\", \"method\": \"SportsAPING/v1.0/listCurrentOrders\", \"params\": {\"filter\":{ }}, \"id\": 1}'\n data : dict = self.callApi(event_req)\n \n \n return data\n \n \n def todays_settled_pnl(self):\n \n today = datetime.now().strftime('%Y-%m-%d')\n \n\n req = '''{\n \"jsonrpc\": \"2.0\", \n \"method\": \"SportsAPING/v1.0/listClearedOrders\", \n \"params\": {\"betStatus\":\"SETTLED\",\"settledDateRange\":{\"from\":\"'''+today+'''T00:00:00Z\"}}, \"id\": 1}\n '''\n \n return self.callApi(req)\n \n \n def market_ids(self,event_ids,market_name,matched_lower_bound) -> pd.DataFrame :\n '''this can be filtered in the json for specific market types or you could do this yourelf'''\n \n event_ids : str = str(event_ids).replace(\"'\",'\"')\n \n req : str = '''{\n \"jsonrpc\": \"2.0\",\n \"method\": \"SportsAPING/v1.0/listMarketCatalogue\",\n \"params\": {\n \"filter\": {\n \"eventIds\": '''+event_ids+'''\n },\n \"maxResults\": \"200\",\n \"marketProjection\": [\n \"EVENT\"\n \n ]\n },\n \"id\": 1\n }'''\n \n data : pd.DataFrame = pd.DataFrame(nested_to_record(self.callApi(req), sep='_'))\n \n data_filtered : pd.DataFrame = data.loc[(data['marketName']==market_name)&(data['totalMatched']>matched_lower_bound)][['event_name','marketId','event_id']]\n data_filtered.columns=['event_name','Betfair_market_id','Betfair_event_id']\n return data_filtered\n \n \n def cancel_order(self,market_id = 'None'):\n \n if market_id == 'None':\n req = '''{\n \"jsonrpc\": \"2.0\", \n \"method\": \"SportsAPING/v1.0/cancelOrders\", \n \"params\": {}, \"id\": 1\n }'''\n \n else:\n req = '''\n {\"jsonrpc\": \"2.0\", \n \"method\": \"SportsAPING/v1.0/cancelOrders\", \n \"params\": {\"marketId\":\"'''+market_id+'''\"}, \"id\": 1\n }'''\n \n return self.callApi(req)\n \n \n \n def selection_id_player_name(self,market_ids : list) -> pd.DataFrame:\n \n market_ids_ : str = str(market_ids).replace(\"'\",'\"')\n \n \n req : str = '''{\n \"jsonrpc\": \"2.0\", \n \"method\": \"SportsAPING/v1.0/listMarketCatalogue\", \n \"params\": {\n \"filter\":{\n \"marketIds\":'''+market_ids_+'''},\n \"maxResults\":\"'''+str(len(market_ids))+'''\",\n \"marketProjection\":[\"RUNNER_DESCRIPTION\"]}, \n \"id\": 1}\n '''\n \n data : dict = self.callApi(req)\n \n runner_name_data : list = []\n for i in range(len(data)):\n market_id : str = data[i]['marketId']\n runners :list = data[i]['runners']\n for j in range(len(runners)):\n selection_id : int = int(runners[j]['selectionId'])\n runner_name : str = runners[j]['runnerName']\n runner_name_data.append([market_id,selection_id,runner_name])\n \n return pd.DataFrame(runner_name_data,columns = ['market_id','selection_id','name'])\n \n \n \n \n \n \n \n ", "repo_name": "betting-betting/public-odds-data", "sub_path": "betfair_client.py", "file_name": "betfair_client.py", "file_ext": "py", "file_size_in_byte": 10816, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.betfair_configuration", "line_number": 17, "usage_type": "name"}, {"api_name": "config.betfair_configuration", "line_number": 18, "usage_type": "name"}, {"api_name": "config.betfair_configuration", "line_number": 19, "usage_type": "name"}, {"api_name": "config.betfair_configuration", "line_number": 20, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.io.json._normalize.nested_to_record", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 63, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.io.json._normalize.nested_to_record", "line_number": 146, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 134, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 237, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 270, "usage_type": "attribute"}, {"api_name": "pandas.io.json._normalize.nested_to_record", "line_number": 270, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 249, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 324, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 297, "usage_type": "attribute"}]} +{"seq_id": "20975841104", "text": "import sqlite3 as db\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndatabase = \"/home/sachin/Thesis/data/RWO_0004_Ventilatoren_00.sqlite\"\n\ncon = db.connect(database)\ndf = pd.read_sql_query(f\"SELECT time, value FROM Value WHERE sensor_id=1\",\n con)\ndf.index = df['time']\ndf.drop('time', axis=1, inplace=True)\ncon.close()\nprint(df)\n\nax = df.plot(figsize=(10, 6))\nax.legend(['Vibration Values'])\nplt.title('Time Series Vibration Dataset')\nplt.xlabel('Timesteps (s)')\nplt.ylabel('Acceleration (g)')\nplt.savefig('plots/full_data_unixds.eps', format='eps', dpi=1200)\nplt.savefig('plots/full_data_unixds.jpg', format='jpg', dpi=1200)\nplt.show()\n", "repo_name": "sach1n1/Thesis_PoC", "sub_path": "DataAnalysis/full_plot_UNIX_ds.py", "file_name": "full_plot_UNIX_ds.py", "file_ext": "py", "file_size_in_byte": 664, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "15225925925", "text": "#!/usr/bin/python3\nfrom os import path\nfrom glob import glob\nimport textstat\nimport ebooklib\nfrom ebooklib import epub\nfrom bs4 import BeautifulSoup\n\n\ndef iter_paragraphs(chapter):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n for p in soup.find_all(\"p\"):\n yield p.get_text()\n\n\ndef iter_chapters(target):\n book = epub.read_epub(target)\n\n for x in book.get_items_of_type(ebooklib.ITEM_DOCUMENT):\n if x.is_chapter():\n yield \"\\n\".join(iter_paragraphs(x))\n\n\ndef score(text):\n text = \"\\n\".join(iter_chapters(target))\n return textstat.flesch_reading_ease(text), textstat.smog_index(text), textstat.dale_chall_readability_score(text)\n\n\nif __name__ == \"__main__\":\n\n with open(\"targets.txt\", \"r\") as f:\n sources = list(map(str.strip, f.readlines()))\n\n print(\"flesch\\tsmog\\tdale-chall\\tbook\")\n for src in sources:\n for target in glob(path.join(src, \"**\", \"*.epub\"), recursive=True):\n s = score(target)\n title = path.basename(target).replace(\".epub\", \"\")\n print(f\"{s[0]}\\t{s[1]}\\t{s[2]}\\t{title}\")", "repo_name": "jeffgreenca/ebook-complexity-score", "sub_path": "ebook-analysis.py", "file_name": "ebook-analysis.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 11, "usage_type": "call"}, {"api_name": "ebooklib.epub.read_epub", "line_number": 17, "usage_type": "call"}, {"api_name": "ebooklib.epub", "line_number": 17, "usage_type": "name"}, {"api_name": "ebooklib.ITEM_DOCUMENT", "line_number": 19, "usage_type": "attribute"}, {"api_name": "textstat.flesch_reading_ease", "line_number": 26, "usage_type": "call"}, {"api_name": "textstat.smog_index", "line_number": 26, "usage_type": "call"}, {"api_name": "textstat.dale_chall_readability_score", "line_number": 26, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "74668010405", "text": "import praw\nimport random\nfrom random import randint\nimport requests\nimport shutil\nimport os\nimport time\n\n\n\n\nreddit = praw.Reddit(client_id = \"\",\n client_secret= \"\",\n username=\"DaT1dUdE05\",\n password= \"\",\n user_agent=\"pythonmeme\",\n check_for_async=False\n )\n\n\n\nsubreddit= reddit.subreddit(\"AskReddit\")\n\n\nall_subs= []\n\n\n\ncount = 1\nfor i in range(1):\n\n\n top = subreddit.hot(limit = 100)\n\n for submission in top:\n all_subs.append(submission)\n\n random_sub = random.choice(all_subs)\n\n\n res = requests.get(random_sub.url, stream = True)\n\n if res.status_code == 200:\n with open(\"C:\\Stories\\story\"+str(count),'wb') as f:\n shutil.copyfileobj(res.raw, f)\n\n count+=1\n\n print(random_sub.url)\n print(random_sub.title)\n print(\"Done.\")\n\nurl = random_sub.url\nsubmission = reddit.submission(url = url)\n\ncomments = []\n\nnum = 5\nsubmission.comments.replace_more(limit=None)\nfor top_level_comment in submission.comments:\n if num>0:\n print(top_level_comment.body)\n comments.append(top_level_comment.body)\n print()\n num-=1\n\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\nfrom selenium.webdriver.chrome.service import Service\n\n\n\n#Finding and taking screenshot of question \npath=Service(\"C:\\chromedriver_win32 (1)\\chromedriver.exe\")\ndriver = webdriver.Chrome(service=path)\n\ndriver.get(url)\n\ndriver.maximize_window()\n\n\nimport pyautogui\n\nmyscreen = pyautogui.screenshot(region=(1000,600,1200,350))\n\nmyscreen.save('C:\\Stories\\q.png')\nprint(\"image saved\")\n\ndriver.execute_script(\"window.open('about:blank', 'secondtab');\")\ndriver.switch_to.window(\"secondtab\")\ndriver.get('https://translate.google.com/')\n\n#Finding Text Area to Type in\n#for i in comments:\ndriver.find_element(By.XPATH,'//*[@id=\"yDmH0d\"]/c-wiz/div/div[2]/c-wiz/div[2]/c-wiz/div[1]/div[2]/div[3]/c-wiz[1]/span/span/div/textarea').send_keys(random_sub.title)\ndriver.find_element(By.XPATH,'//*[@id=\"i8\"]/span[3]').click()\ntime.sleep(5)\n#driver.find_element(By.XPATH,'//*[@id=\"ow87\"]/div/span/button/div[3]').click()\n#time.sleep(2)\n \n\n\nfrom pywinauto.application import Application\n\napp = Application(backend='uia').start('C:\\Program Files\\Audacity\\Audacity.exe').connect(title = 'Audacity', timeout = 5)\n\n#app.Audacity.print_control_identifiers()\n\nrec = app.Audacity.child_window(title=\"Record\", auto_id=\"11005\", control_type=\"Button\").wrapper_object()\nrec.click()\n\n\n\n\n\n\n\n", "repo_name": "aravindanew/YoutubeBot", "sub_path": "rstories.py", "file_name": "rstories.py", "file_ext": "py", "file_size_in_byte": 2592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "praw.Reddit", "line_number": 12, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 78, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 79, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 79, "usage_type": "name"}, {"api_name": "pyautogui.screenshot", "line_number": 88, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 99, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 99, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 100, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 100, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 101, "usage_type": "call"}, {"api_name": "pywinauto.application.Application", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "41072917358", "text": "import collections\nfrom result import Result\n\n\nclass BruteForce:\n \"\"\"\n Brute Force algorithm associated simply comparing every\n substring char and pattern char until match\n \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n\n def search(self, text):\n found_indexes = []\n collisions = 0\n for i in range(len(text) - len(self.pattern) + 1):\n logic = True\n for j in range(len(self.pattern)):\n if text[i + j] != self.pattern[j]:\n logic = False\n collisions += 1\n break\n if logic:\n found_indexes.append(i)\n return Result(found_indexes, collisions, BruteForce.__name__)\n\n\nclass Hash:\n def __init__(self, pattern, hash_method):\n self.pattern = pattern\n self.hash_method = hash_method\n\n def search(self, text):\n found_indexes = []\n sample_hash_sum = \\\n self.hash_method.get_hash(self.pattern, len(self.pattern))\n subtext_hash_sum = self.hash_method.get_hash(text, len(self.pattern))\n collisions = 0\n\n for i in range(len(text) - len(self.pattern) + 1):\n full_match = True\n if i != 0:\n subtext_hash_sum = self.hash_method.hash_shift(\n i, len(self.pattern), subtext_hash_sum, text)\n\n if sample_hash_sum == subtext_hash_sum:\n for j in range(len(self.pattern)):\n if text[i + j] != self.pattern[j]:\n full_match = False\n break\n if full_match:\n found_indexes.append(i)\n else:\n collisions += 1\n return Result(\n found_indexes, collisions, self.hash_method.__name__)\n\n\nclass Linear:\n \"\"\"\n Hashing algorithm based on summarizing chars ordinals\n to simplify strings comparison\n \"\"\"\n @staticmethod\n def hash_shift(i, sample_length, current_hash, text):\n result = current_hash\n result -= ord(text[i - 1])\n result += ord(text[i + sample_length - 1])\n return result\n\n @staticmethod\n def get_hash(text, length):\n hash_sum = 0\n for i in range(length):\n if i < len(text):\n hash_sum += ord(text[i])\n return hash_sum\n\n\nclass Quad:\n \"\"\"\n Hashing algorithm based on summarizing chars ordinals in square\n to simplify strings comparison\n \"\"\"\n @staticmethod\n def hash_shift(i, sample_length, current_hash, text):\n result = current_hash\n result -= ord(text[i - 1]) ** 2\n result += ord(text[i + sample_length - 1]) ** 2\n return result\n\n @staticmethod\n def get_hash(text, length):\n hash_sum = 0\n for i in range(length):\n if i < len(text):\n hash_sum += ord(text[i]) ** 2\n return hash_sum\n\n\nclass RabinKarph:\n \"\"\"\n Hashing algorithm based on summarizing chars ordinals with special\n Rabin Karph formula to simplify strings comparison\n \"\"\"\n @staticmethod\n def hash_shift(i, length, current_hash, text):\n result = current_hash\n result -= ord(text[i - 1]) * (2 ** (length - 1))\n result *= 2\n result += ord(text[i + length - 1])\n return result\n\n @staticmethod\n def get_hash(text, length):\n hash_sum = 0\n for i in range(length):\n if i < len(text):\n hash_sum += ord(text[i]) * (2 ** (length - i - 1))\n return hash_sum\n\n\nclass Automate:\n \"\"\"\n Automate algorithm has a preprocessing component it builds a table of\n shifts out of pattern which is used during method execution\n \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n self.table = self.get_table()\n\n def get_table(self):\n length = len(self.pattern)\n alphabet = []\n table = collections.defaultdict(collections.defaultdict)\n\n for i in range(length):\n alphabet.append(self.pattern[i])\n for i in alphabet:\n table[0][i] = 0\n\n for j in range(length):\n prev = table[j][self.pattern[j]]\n table[j][self.pattern[j]] = j + 1\n for i in alphabet:\n table[j + 1][i] = table[prev][i]\n return table\n\n def search(self, text):\n found_indexes = []\n collisions = 0\n current_state = 0\n sample_length = len(self.pattern)\n for i in range(len(text)):\n if text[i] not in self.table[current_state]:\n collisions += 1\n current_state = 0\n continue\n current_state = self.table[current_state][text[i]]\n if current_state == sample_length:\n found_indexes.append(i - sample_length + 1)\n\n return Result(found_indexes, collisions, 'Automate')\n\n\nclass BoyerMoore:\n \"\"\"\n Boyer Moore algorithm preprocess a pattern and builds shift tables for\n \"bad char\" anb \"good suffix\" heuristics\n \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n self.tx = ('*' * len(self.pattern)) + self.pattern\n self.bc_table = self.get_table_of_last_char_appearance()\n self.rpr = self.get_rpr_table()\n self.gs_table = self.get_shift_table()\n\n def get_table_of_last_char_appearance(self):\n m = len(self.pattern) - 1\n return {c: (m - i) for (i, c) in enumerate(self.pattern)}\n\n def is_equal(self, a, b, m):\n for k in range(a, b):\n if self.tx[k] == '*':\n m += 1\n continue\n if self.tx[k] != self.pattern[m]:\n return False\n else:\n m += 1\n return True\n\n def get_rpr_table(self):\n m = len(self.pattern)\n rpr = {}\n for p in range(m + 1):\n for k in range(m - p + 1, -m, -1):\n is_bad_suffix = self.is_equal(k + m - 1, k + m + p - 1, m - p)\n if (is_bad_suffix and ((k - 2 >= 0 and self.pattern[k - 2]\n != self.pattern[m - p - 1])\n or k - 2 < 0) and (p != m or k != 1)):\n rpr[p] = k\n break\n return rpr\n\n def get_shift_table(self):\n m = len(self.pattern)\n shift = {}\n for l in range(m + 1):\n shift[l] = m - self.rpr[l] - l + 1\n return shift\n\n def search(self, text):\n collisions = 0\n m = len(self.pattern)\n i = 0\n match_streak = 0\n execute = True\n indexes = []\n while execute:\n if self.pattern == '':\n for s in range(text.length):\n indexes.append(s)\n execute = False\n if i + m > len(text):\n break\n for j in range(i + m - 1, i - 1, -1):\n if text[j] == self.pattern[j - i]:\n match_streak += 1\n if match_streak == m:\n indexes.append(i)\n i += self.gs_table[match_streak]\n match_streak = 0\n break\n else:\n if match_streak == 0:\n if text[j] not in self.bc_table.keys():\n i += m\n else:\n i += self.bc_table[text[j]]\n else:\n collisions += 1\n i += self.gs_table[match_streak]\n match_streak = 0\n break\n return Result(indexes, collisions, 'Boyer moore')\n\n\nclass KMP:\n \"\"\"\n KMP or Knuth-Morris-Pratt's algorithm builds shift table with least common\n subsequence method then uses it during search\n \"\"\"\n\n def __init__(self, pattern):\n self.pattern = pattern\n self.partial = self.partial()\n\n def partial(self):\n table = [0]\n for i in range(1, len(self.pattern)):\n j = table[i - 1]\n while j > 0 and self.pattern[j] != self.pattern[i]:\n j = table[j - 1]\n if self.pattern[j] == self.pattern[i]:\n table.append(j + 1)\n else:\n table.append(j)\n return table\n\n def search(self, text):\n indexes = []\n collisions = 0\n j = 0\n\n for i in range(len(text)):\n while j > 0 and text[i] != self.pattern[j]:\n collisions += 1\n j = self.partial[j - 1]\n if text[i] == self.pattern[j]:\n j += 1\n if j == len(self.pattern):\n indexes.append(i - (j - 1))\n j = self.partial[j - 1]\n\n return Result(indexes, collisions, \"KMP\")\n\n\nclass SuffixArray:\n \"\"\"\n Suffix Array algorithm preprocess a text string with making a suffix\n array then uses a binary search to find left and right answer borders\n \"\"\"\n def __init__(self, pattern):\n self.pattern = pattern\n\n @staticmethod\n def build_suffix_array(text):\n suffixes = []\n for i in range(len(text)):\n suffix = text[-(i + 1):]\n suffixes.append((suffix, len(text) - i - 1))\n suffixes.sort(key=lambda tup: tup[0])\n return suffixes\n\n def left_border(self, sarray):\n left = -1\n right = len(sarray)\n while left < right:\n m = int(left + (right - left) / 2)\n if left + 1 == right:\n break\n if sarray[m][0] >= self.pattern:\n right = m\n if sarray[m][0] < self.pattern:\n left = m\n return left\n\n def right_border(self, sarray):\n left = -1\n right = len(sarray)\n while left < right:\n m = int(left + (right - left) / 2)\n if left + 1 == right:\n break\n if sarray[m][0] >= self.pattern:\n if sarray[m][0].startswith(self.pattern):\n left = m\n else:\n right = m\n else:\n left = m\n\n return right\n\n def search(self, text):\n sarray = self.build_suffix_array(text)\n left = self.left_border(sarray)\n right = self.right_border(sarray)\n\n return Result([sarray[i][1] for i in range(right - 1, left, -1)],\n 0, \"Suffix Array\")\n", "repo_name": "irusland/find_str", "sub_path": "findstr/finder.py", "file_name": "finder.py", "file_ext": "py", "file_size_in_byte": 10466, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "result.Result", "line_number": 26, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 56, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 137, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 165, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 249, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 289, "usage_type": "call"}, {"api_name": "result.Result", "line_number": 344, "usage_type": "call"}]} +{"seq_id": "23724466961", "text": "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import transforms\n\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nfrom data_loading import *\nfrom utils import *\nfrom model import *\nimport scipy.io\n\nlabel_path = \"./\" \nimage_path = \"../Top_tracheal_images\" \ntest_image_path = \"../Top_tracheal_healthy_images\"\n\nnew_size = 512\ninitial_epoch = 0\n\nnfold = 1\ntrain_fold = set(list(np.arange(1,6)))-set([nfold])\n\nEPOCHS = 400\nBATCH_SIZE = 16\nPATH = './resnet50_transformer_DA_ep%d_fold%d'%(EPOCHS,nfold)\n\ndevice = 0\nif device == 0:\n num_workers, prefetch_factor= 6,4\nelse:\n num_workers, prefetch_factor= 0,2\n\nimage_transform = transforms.Compose(\n [\n ToTensor(),\n RandomAugment(new_size),\n ]\n)\nvalid_transform = transforms.Compose(\n [ToTensor(),\n ValidTransform(new_size),\n ]\n)\ntrainset = topTrachealDataset(csv_file=os.path.join(label_path, 'train_labels_folds.csv'),\n nfold=list(train_fold),\n transform=image_transform,\n root_dir=image_path)\ntrainloader = DataLoader(trainset,batch_size=BATCH_SIZE,shuffle=True,num_workers=num_workers,prefetch_factor=prefetch_factor,)\n\nvalidset = topTrachealDataset(csv_file=os.path.join(label_path, 'train_labels_folds.csv'),\n nfold=[nfold],\n transform=valid_transform,\n root_dir=image_path)\n\nvalidloader = DataLoader(validset,batch_size=BATCH_SIZE,shuffle=False,num_workers=num_workers,prefetch_factor=prefetch_factor,)\n\nmodel = lDETR()\n\nparams_to_update = []\nfor param in model.parameters():\n if param.requires_grad == True:\n params_to_update.append(param)\nmodel.to(device)\n\nimport torch.optim as optim\ncriterion = nn.MSELoss()\noptimizer = optim.Adam(params_to_update,lr=0.0001)\n\ntrain_loss = []\ntest_loss = []\nif not os.path.exists(PATH):\n os.mkdir(PATH)\n\nprint('Ready to train\\n')\n\nmodel.train()\nfor epoch in range(1+initial_epoch,EPOCHS+1):\n\n ep_train_loss = 0.0\n ep_test_loss = 0.0\n print('.........training %d epoch...........'%epoch)\n\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs = data['image'].to(device)\n labels = data['landmarks'].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n ep_train_loss += loss.item()\n \n \n train_loss.append(ep_train_loss / len(trainloader))\n\n model.eval()\n with torch.no_grad():\n for data in validloader:\n inputs = data['image'].to(device)\n labels = data['landmarks'].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n ep_test_loss += loss.item()\n test_loss.append(ep_test_loss/len(validloader))\n model.train()\n\n if epoch%1 == 0:\n print('[%d] loss: %4f' %(epoch,ep_train_loss / len(trainloader)))\n\n if epoch %40 == 0:\n plt.figure()\n ax = plt.subplot(111)\n ax.plot(train_loss,'r',label='train')\n ax.plot(test_loss,'b',label='test')\n plt.yscale(\"log\")\n plt.legend()\n plt.savefig(os.path.join(PATH,'losses.png'))\n plt.close()\n\n scipy.io.savemat(os.path.join(PATH,'losses.mat'),{'train_loss':train_loss,'test_loss':test_loss})\n\n model.eval()\n with torch.no_grad():\n for data in validloader:\n inputs = data['image'].to(device)\n labels = data['landmarks'].to(device)\n outputs = model(inputs)\n \n plt.figure(figsize=(12,9)) \n for i in range(4):\n error = new_size*mean_distance_error(labels[i,:],outputs[i,:])\n \n ax = plt.subplot(1, 4, i + 1)\n plt.tight_layout()\n ax.set_title('MDE %.4f'%(error))\n show_landmarks(inputs[i,:].detach().to('cpu'),new_size*labels[i,:].detach().to('cpu'),new_size*outputs[i,:].detach().to('cpu'))\n \n plt.savefig(os.path.join(PATH,'test_ep%d.png'%epoch))\n plt.close()\n break\n model.train()\n\n if epoch%40 == 0:\n torch.save(model.state_dict(), os.path.join(PATH,'model_ep%d.pth'%epoch))\n torch.save(optimizer.state_dict(), os.path.join(PATH,'optimizer_ep%d'%epoch))\n model.eval()\n ep_train_loss = 0.0\n ep_test_loss = 0.0\n with torch.no_grad():\n for data in trainloader:\n inputs = data['image'].to(device)\n labels = data['landmarks'].to(device)\n outputs = model(inputs)\n ep_train_loss += mean_distance_error(labels,outputs)\n for data in validloader:\n inputs = data['image'].to(device)\n labels = data['landmarks'].to(device)\n outputs = model(inputs)\n ep_test_loss += mean_distance_error(labels,outputs)\n\n with open(os.path.join(PATH,'metrics.txt'), 'a') as f:\n f.write(\"Epoch :%d\"%epoch)\n f.write(\"Training set distance: %.4f \\n\"%(ep_train_loss/len(trainloader)*new_size))\n f.write(\"Testing set distance: %.4f \\n\\n\"%(ep_test_loss/len(validloader)*new_size))\n\n model.train()\nprint('Finished Training \\n')", "repo_name": "kshu0414/lDETR_laryngeal_tracking", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5630, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 37, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 37, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "model.parameters", "line_number": 64, "usage_type": "call"}, {"api_name": "model.to", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 71, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 76, "usage_type": "call"}, {"api_name": "model.train", "line_number": 80, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 108, "usage_type": "call"}, {"api_name": "model.train", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "scipy.io.io.savemat", "line_number": 131, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 131, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 131, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "model.eval", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "model.train", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 155, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path", "line_number": 156, "usage_type": "attribute"}, {"api_name": "model.eval", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}, {"api_name": "model.train", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "16144283680", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport librosa\nimport numpy as np\nclass Bpgan_VGGExtractor(nn.Module):\n def __init__(self,d):\n super(Bpgan_VGGExtractor, self).__init__()\n self.in_channel = int(d/40)\n self.freq_dim = 40\n self.out_dim = 1280\n\n self.conv1 = nn.Conv2d(self.in_channel, 64, 3, stride=1, padding=1)\n self.conv2 = nn.Conv2d( 64, 64, 3, stride=1, padding=1)\n self.pool1 = nn.MaxPool2d(2, stride=2) # Half-time dimension\n self.conv3 = nn.Conv2d( 64,128, 3, stride=1, padding=1)\n self.conv4 = nn.Conv2d( 128,128, 3, stride=1, padding=1)\n self.pool2 = nn.MaxPool2d(2, stride=2) # Half-time dimension\n def view_input(self,feature,xlen):\n # drop time\n xlen = [x//4 for x in xlen]\n if feature.shape[1]%4 != 0:\n feature = feature[:,:-(feature.shape[1]%4),:].contiguous()\n bs,ts,ds = feature.shape\n # reshape\n feature = feature.view(bs,ts,self.in_channel,self.freq_dim)\n feature = feature.transpose(1,2)\n\n return feature,xlen\n\n def forward(self,feature,xlen):\n # Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)\n feature,xlen = self.view_input(feature,xlen)\n feature = F.relu(self.conv1(feature))\n feature = F.relu(self.conv2(feature))\n feature = self.pool1(feature) # BSx64xT/2xD/2\n feature = F.relu(self.conv3(feature))\n feature = F.relu(self.conv4(feature))\n feature = self.pool2(feature) # BSx128xT/4xD/4\n # BSx128xT/4xD/4 -> BSxT/4x128xD/4\n feature = feature.transpose(1,2)\n # BS x T/4 x 128 x D/4 -> BS x T/4 x 32D\n feature = feature.contiguous().view(feature.shape[0],feature.shape[1],self.out_dim)\n return feature,xlen\n\n def feature_map(self,feature):\n feature,xlen = self.view_input(feature,[128])\n feature = F.relu(self.conv1(feature))\n feature = F.relu(self.conv2(feature))\n out_feature1 = self.pool1(feature)\n feature = F.relu(self.conv3(out_feature1))\n feature = F.relu(self.conv4(feature))\n out_feature2 = self.pool2(feature)\n return out_feature1,out_feature2\n def load_param(self,path):\n self.load_state_dict(torch.load(path))\n\nclass Bpgan_VGGLoss(nn.Module):\n def __init__(self, d=40,sampling_ratio = 16000, n_fft=512, n_mels = 128,path = None):\n super(Bpgan_VGGLoss, self).__init__()\n self.vgg = Bpgan_VGGExtractor(d=d).cuda()\n self.criterion = nn.L1Loss()\n self.weights = [1.0/2, 1.0]\n A = librosa.filters.mel(sr=sampling_ratio,n_fft=n_fft,n_mels=d)\n B = librosa.filters.mel(sr=sampling_ratio,n_fft=n_fft,n_mels=n_mels)\n C = A.dot(np.linalg.pinv(B))\n self.Transform_tensor = torch.Tensor(C).cuda()\n if path != None:\n self.vgg.load_param(path)\n else:\n if sampling_ratio == 16000:\n self.vgg.load_param(\"./models/VGG_Extractor_16k.pt\")\n elif sampling_ratio == 8000:\n self.vgg.load_param(\"./models/VGG_Extractor_8k.pt\")\n else:\n raise NotImplementedError\n\n def forward(self, x, y):\n x_img = torch.einsum(\"mj,idjk->idmk\", [self.Transform_tensor, x])\n x_img = x_img[:, 0, :, :]\n x_img = x_img.transpose(1, 2)\n y_img = torch.einsum(\"mj,idjk->idmk\", [self.Transform_tensor, y])\n y_img = y_img[:,0,:,:]\n y_img = y_img.transpose(1,2)\n x_vgg, y_vgg = self.vgg.feature_map(x_img), self.vgg.feature_map(y_img)\n loss = 0\n for i in range(len(x_vgg)):\n loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())\n return loss\n def load_param(self,path):\n self.vgg.load_param(path)\n", "repo_name": "BowenL0218/BPGAN-Signal-Compression", "sub_path": "models/Bpgan_VGG_Extractor.py", "file_name": "Bpgan_VGG_Extractor.py", "file_ext": "py", "file_size_in_byte": 3829, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 6, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "librosa.filters.mel", "line_number": 64, "usage_type": "call"}, {"api_name": "librosa.filters", "line_number": 64, "usage_type": "attribute"}, {"api_name": "librosa.filters.mel", "line_number": 65, "usage_type": "call"}, {"api_name": "librosa.filters", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.linalg.pinv", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 66, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.einsum", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "38488206644", "text": "import pytest\nfrom brownie import exceptions\nfrom scripts.helpful_modules.network_check import isNetworkLocal\nfrom scripts.helpful_modules.get_account import get_account\nfrom scripts.helpful_modules.deploy_token_yield_and_vistula_token import (\n deploy_token_yield_and_vistula_token_contracts,\n)\n\n\ndef test_add_allowed_tokens():\n # Arrange\n isNetworkLocal()\n account = get_account()\n non_owner = get_account(index=1)\n yield_token, vistula_token = deploy_token_yield_and_vistula_token_contracts()\n # Act\n yield_token.addAllowedTokens(vistula_token.address, {\"from\": account})\n # Assert\n assert yield_token.allowedTokens(0) == vistula_token.address\n with pytest.raises(exceptions.VirtualMachineError):\n yield_token.addAllowedTokens(vistula_token.address, {\"from\": non_owner})\n\n\ndef test_token_is_allowed():\n # Arrange\n isNetworkLocal()\n account = get_account()\n yield_token, vistula_token = deploy_token_yield_and_vistula_token_contracts()\n # Act\n yield_token.tokenIsAllowed(vistula_token.address, {\"from\": account})\n # Assert\n assert yield_token.tokenIsAllowed(vistula_token.address) == True\n", "repo_name": "pawlovskiii/stake-yield-defi-protocol", "sub_path": "tests/unit_testing/test_allowed_tokens.py", "file_name": "test_allowed_tokens.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "scripts.helpful_modules.network_check.isNetworkLocal", "line_number": 12, "usage_type": "call"}, {"api_name": "scripts.helpful_modules.get_account.get_account", "line_number": 13, "usage_type": "call"}, {"api_name": "scripts.helpful_modules.get_account.get_account", "line_number": 14, "usage_type": "call"}, {"api_name": "scripts.helpful_modules.deploy_token_yield_and_vistula_token.deploy_token_yield_and_vistula_token_contracts", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 20, "usage_type": "call"}, {"api_name": "brownie.exceptions.VirtualMachineError", "line_number": 20, "usage_type": "attribute"}, {"api_name": "brownie.exceptions", "line_number": 20, "usage_type": "name"}, {"api_name": "scripts.helpful_modules.network_check.isNetworkLocal", "line_number": 26, "usage_type": "call"}, {"api_name": "scripts.helpful_modules.get_account.get_account", "line_number": 27, "usage_type": "call"}, {"api_name": "scripts.helpful_modules.deploy_token_yield_and_vistula_token.deploy_token_yield_and_vistula_token_contracts", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "28249294619", "text": "# internal imports\nimport logger\n\n# external imports\nimport os\nimport sys\nimport importlib\nimport tqdm\nimport time\n\ndeps = importlib.import_module(\"dependency_list\").deps\npath = sys.executable\n\n\ndef save_stamp(number):\n \"\"\"\n save_file\n\n Save the file.\n\n number: The number to save.\n\n return: None\n \"\"\"\n os.environ.update({\"VORTEX_DEPENDENCY_STAMP\": str(number)})\n\n\ndef load_stamp():\n \"\"\"\n load_file\n\n Load the file.\n\n return: None\n \"\"\"\n return os.environ.get(\"VORTEX_DEPENDENCY_STAMP\", \"0\")\n\n\ndef update_deps():\n if os.path.isfile(\"last_update.pickle\"):\n last_update = load_stamp()\n if last_update < time.time() - 3600 * 12:\n logger.log(\"VortexDependencyManager\",\n \"Last update was more than 10 hours ago. Updating dependencies...\")\n for dep in tqdm.tqdm(deps):\n os.system(f'{path} -m pip install --upgrade {dep}')\n save_stamp(time.time())\n else:\n logger.log(\"VortexDependencyManager\",\n \"Dependencies are up to date.\")\n else:\n for dep in tqdm.tqdm(deps):\n os.system(f'{path} -m pip install --upgrade {dep}')\n save_stamp(time.time())\n\n\nif __name__ == '__main__':\n update_deps()\n", "repo_name": "N3RDIUM/Vortex", "sub_path": "Vortex/dependency_updater.py", "file_name": "dependency_updater.py", "file_ext": "py", "file_size_in_byte": 1274, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "importlib.import_module", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.executable", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.environ.update", "line_number": 25, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 36, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "logger.log", "line_number": 43, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 45, "usage_type": "call"}, {"api_name": "os.system", "line_number": 46, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "logger.log", "line_number": 49, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 52, "usage_type": "call"}, {"api_name": "os.system", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "15490177984", "text": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom torch import optim\nfrom PIL import Image\nimport cv2\n\nimport dataset\nimport model\n\ndef load(model, device='cpu', reset = False, load_path = None):\n model = model\n\n if reset == False : \n if load_path is None :\n print('give path for load model')\n if load_path is not None:\n if device == 'cpu':\n sate = torch.load(load_path,map_location=torch.device('cpu'))\n else :\n sate = torch.load(load_path)\n \n model.load_state_dict(sate['state_dict'])\n \n return model\n\ndef accuracy(model, data_loader):\n acc_list = []\n for img, age,_ in data_loader:\n age_p = model(img)\n _,perd = age_p.max(1)\n correct = perd.eq(age).sum().item() / img.shape[0]\n acc_list.append(correct)\n \n return sum(acc_list) / len(acc_list)\n\nfrom torchvision import transforms as T\n\nRGB_MEAN = [ 0.485, 0.456, 0.406 ]\nRGB_STD = [ 0.229, 0.224, 0.225 ]\n\ntransform = T.Compose([\n T.Resize((128,128)), \n T.ToTensor(),\n T.Normalize(mean = RGB_MEAN, std = RGB_STD),\n ])\n\n\n\nif __name__=='__main__':\n mae = nn.L1Loss()\n data_loader = dataset.CACD(train=False)(batch_size=284)#dataset.UTKFace(train=False)(batch_size=164)\n\n\n load_path = './model/' + 'new_dataset_model_loss0.07474' + \".pth\"\n\n model = model.Resnet(120, reset=False)\n model = load(model, device='cpu', load_path = load_path)\n model.eval()\n # model = torch.load()\n\n imr = cv2.imread('./14_Aaron_Johnson_0001.jpg')\n # im = cv2.cvtColor(imr, cv2.COLOR_RGB2BGR)\n im = Image.fromarray(cv2.cvtColor(imr, cv2.COLOR_BGR2RGB))\n # print(transform(torch.FloatTensor(im).permute(-1, 1, 0)).unsqueeze(0).shape)\n # print(model(transform(torch.FloatTensor(im).permute(-1, 1, 0)).unsqueeze(0)).max(1))\n\n # im = transform(Image.open('./14_Aaron_Johnson_0001.jpg'))\n im = transform(im)\n print(model(im.unsqueeze(0)).max(1))\n\n img, age = next(iter(data_loader))\n # print(age[0].shape, img[0].shape)\n print(img.shape)\n\n age_p = model(img)\n _,perd = age_p.max(1)\n print(f'MAE : {mae(perd.float(), age.float())}')\n correct = perd.eq(age).sum().item()\n print(correct)\n # print(f'Accuracyy : {accuracy(model, data_loader)}')\n\n print(f'Target : {age[10]} -- Periction : {age_p.max(1)[1][10]}')#age_p.argmax(dim=1).item()}')\n plt.imshow(img[10].permute(1,-1,0))\n # plt.imshow(im)#permute(1,-1,0)\n plt.show()", "repo_name": "Se2007/Age-Estimation", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2598, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.load", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 22, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 24, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 45, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 45, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 46, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.L1Loss", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "dataset.CACD", "line_number": 53, "usage_type": "call"}, {"api_name": "model.Resnet", "line_number": 58, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 63, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 65, "usage_type": "name"}, {"api_name": "cv2.cvtColor", "line_number": 65, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 65, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "29834971607", "text": "#!/usr/bin/python\n#Working as of 11/19\n\nimport csv, requests, time, os\nimport json, collections\n#import re\nimport mimetypes\nimport pprint\n\n#working_path = '/path/to/the/folder/housing/the/bulk_assign_avatars/script/' # Base working path of this script, where everything will be housed\nworking_path = './' # Base working path of this script, where everything will be housed\n\ncsv_filename = 'sample.csv' # Example: 'API_Testing/users_provisioning.csv', relative to\n # working_path. This file contains the three columns needed\n # by this script. Those columns are \n # - user_id \n # This is the SIS user_id.\n # - image_filename\n # the name of the image file, relative to the working path folder\n # - image_filetype \n # the filetype of the image. This will be something like\n # jpeg, png, gif. This field is needed right now because\n # the script does not auto-detect the filetype\n\"\"\"\nuser_id,image_filename,image_filetype\n4098275,DogHouse1.jpg,jpeg\n\"\"\"\n \nimages_path = 'images/' # This is the name of the folder than houses the images. It\n # should be relative to the working_path\n # script output\n # relative to working_path\n\ndomain = 'domain.instructure.com' #Replace domain\naccess_token = os.environ['ACCESS_TOKEN']\n#access_token = \"\"\n\n##############################################################################\n##############################################################################\n################ Don't edit anything past here unless you know what you are doing.\n################ NOTE: No offense, you probably do know what you're doing. This is for\n################ those that do not. \n\n\nheader = {'Authorization' : 'Bearer {0}'.format(access_token)}\nvalid_mimetypes = ('image/jpeg','image/png','image/gif')\n\nwith open(f\"{working_path}/{csv_filename}\") as csv_file:\n read_csv = csv.DictReader(csv_file)\n for row in read_csv:\n # Step 1: Start upload file to user's file storage in Canvas\n inform_api_url = f\"https://{domain}/api/v1/users/self/files\"\n image_path = f\"{working_path}{images_path}{row['image_filename']}\"\n if not os.path.isfile(image_path):\n print(image_path, \"does not exist, skipping to next record\")\n continue\n mime_type = mimetypes.guess_type(image_path)\n inform_parameters = {\n 'name': row['image_filename'],\n 'content_type': mime_type,\n 'size': os.path.getsize(image_path),\n 'parent_folder_path': 'profile pictures', \n 'as_user_id': f\"sis_user_id:{row['user_id']}\"\n }\n res = requests.post(inform_api_url,headers=header,data=inform_parameters)\n print(\"Done prepping Canvas for upload, now sending the data...\")\n data = res.json()\n # Step 2: Upload data\n files = {'file':open(image_path,'rb').read()}\n upload_params = data.get('upload_params')\n upload_url = data.get('upload_url')\n upload_file_res = requests.post(upload_url, data=upload_params, files=files, allow_redirects=False)\n # Step 3: Confirm upload\n confirmation_url = upload_file_res.headers['location']\n confirmation = requests.post(confirmation_url,headers=header)\n if 'id' in confirmation.json():\n file_id = confirmation.json()['id']\n else:\n print('no id here')\n params = {'as_user_id': f\"sis_user_id:{row['user_id']}\"}\n # Step 4: Find recently uploaded image and get the user token\n avatar_options = requests.get(f\"https://{domain}/api/v1/users/sis_user_id:{row['user_id']}/avatars\", headers=header, params=params)\n \n for ao in avatar_options.json():\n if ao.get('display_name') == row['image_filename']:\n print('found')\n token = ao.get('token')\n params['user[avatar][token]'] = token\n set_avatar_user = requests.put(f\"https://{domain}/api/v1/users/sis_user_id:{row['user_id']}\", headers=header, params=params)\n print(set_avatar_user)\n if set_avatar_user.status_code == 200:\n print(f'Profile image set for user - {row[\"user_id\"]}')\n else:\n print('Failed to set profile image for user - {row[\"user_id\"]}')", "repo_name": "unsupported/canvas", "sub_path": "api/bulk_assign_avatars/python/bulk_assign_avatars.py", "file_name": "bulk_assign_avatars.py", "file_ext": "py", "file_size_in_byte": 4641, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 103, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "csv.DictReader", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "mimetypes.guess_type", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 65, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 75, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "17803395069", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom collections import OrderedDict\nfrom sklearn.ensemble import RandomForestClassifier#,ExtraTreesClassifier\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n#from sklearn.metrics import accuracy_score\n#import copy\nimport warnings\nwarnings.filterwarnings('ignore')\n#import random\n\ntitanic = sns.load_dataset('titanic')\ndf = pd.read_csv('train.csv')\ndf_test = pd.read_csv('test.csv')\n\n#データの前処理:単一代入法によって、欠損値を補完する\ndf = df[[\"Survived\",\"Pclass\",\"Sex\",\"Age\",\"SibSp\",\"Parch\",\"Fare\",\"Cabin\",\"Embarked\"]]\nX = df[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare','Cabin','Embarked']].values\nY = df['Survived'].values\n\ntrain,test=train_test_split(df,test_size=0.4,random_state=0)\n\n#Sexで男性を1、女性を0に置換する\ndf[\"Sex\"][df[\"Sex\"] == \"male\"] = 0\ndf[\"Sex\"][df[\"Sex\"] == \"female\"] = 1\n#Embarkedの欠損値をSで補完する\ndf[\"Embarked\"] = df[\"Embarked\"].fillna(\"S\")\n#EmbarkedをSを0、Cを1、Qを2に置換する\ndf[\"Embarked\"][df[\"Embarked\"] == \"S\" ] = 0\ndf[\"Embarked\"][df[\"Embarked\"] == \"C\" ] = 1\ndf[\"Embarked\"][df[\"Embarked\"] == \"Q\" ] = 2\n\n#訓練用データと検証用データのAgeを平均値で補完する\ntrain_mean=train.copy()\ntrain_mean[\"Age\"] = train_mean[\"Age\"].fillna(train_mean[\"Age\"].mean())\ntest_mean=test.copy()\ntest_mean[\"Age\"] = test_mean[\"Age\"].fillna(test_mean[\"Age\"].mean())\n \n#RandomForestClassifier:決定木のアンサンブル学習で補完したデータを使って学習させる\nRANDOM_STATE=123\nX,y=make_classification(n_samples=500,n_features=25,n_informative=15,n_clusters_per_class=1,random_state=RANDOM_STATE)\nensemble_clfs=[\n (\"RandomForestClassifier,max_features='sqrt'\",RandomForestClassifier(warm_start=True,oob_score=True,max_features='sqrt',random_state=RANDOM_STATE)),(\"RandomForestClassifier,max_features='log2'\",RandomForestClassifier(warm_start=True,max_features='log2',oob_score=True,random_state=RANDOM_STATE)),(\"RandomForestClassifier,max_features=None\",RandomForestClassifier(warm_start=True,max_features='None',oob_score=True,random_state=RANDOM_STATE))\n ]\nerror_rate=OrderedDict((label,[])for label,_ in ensemble_clfs)\nmin_estimates=15\nmax_estimates=175\n\n\nfor label, clf in ensemble_clfs:\n for i in range(min_estimates,max_estimates+1):\n clf.set_params(n_estimators=i)\n clf.fit(X,y)\n \n oob_error=1-clf.oob_score_\n error_rate[label].append((i,oob_error))\n \nfor label,clf in ensemble_clfs:\n for i in range(min_estimates,max_estimates+1):\n clf.set_params(n_estimators=i)\n clf.fit(X,y)\n oob_error=1-clf.oob_score_\n error_rate[label].append((i,oob_error))\n \nforest=RandomForestClassifier(criterion='entropy',n_estimators=10,random_state=1,n_jobs=2)\nforest.fit(x_train,y_train) \n\n#交差検定で精度を確認する\n\n#提出用の形式に変換する\n \n \n", "repo_name": "Ma-chan/Python", "sub_path": "titanic_challenge.py", "file_name": "titanic_challenge.py", "file_ext": "py", "file_size_in_byte": 3081, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 18, "usage_type": "call"}, {"api_name": "seaborn.load_dataset", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 52, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "17464151097", "text": "from django.utils import timezone\n#\nfrom django.shortcuts import render\n#\nfrom rest_framework.generics import (\n ListAPIView, CreateAPIView\n \n)\nfrom rest_framework.response import Response\n\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom .models import Sale,SaleDetail\n\nfrom applications.producto.models import Product\n\nfrom .serializers import (\n VentaReporteSerializers,\n ProcesoVentaSerializer\n )\n# Create your views here.\n\n\n# 256 Json dentro de Json\n# realizar un reporte de ventas y los detalles de cada venta\nclass ReporteVentasList(ListAPIView):\n serializer_class=VentaReporteSerializers\n\n def get_queryset(self):\n \n return Sale.objects.all()\n\n# 258 Serializador sobre serializadores\n# Registrar una venta\n\nclass RegistroVentaApiViews(CreateAPIView):\n authentication_classes=(TokenAuthentication,)\n permission_classes=[IsAuthenticated]\n\n serializer_class=ProcesoVentaSerializer\n\n #sobreescribiendo la funcion create dado que los serializadores no estan conectados\n def create(self, request, *args, **kwargs):\n des_serializer=ProcesoVentaSerializer(data=request.data) #obteniendo los datos\n # importante validar los datos\n des_serializer.is_valid(raise_exception=True)\n # confirmar datos (opcional)\n respuesta={\n \"tipo_recibo\":des_serializer.validated_data[\"type_invoce\"],\n \"tipo_venta\":des_serializer.validated_data[\"type_payment\"]\n \n }\n print(\"***********Respuesta************\")\n print(respuesta)\n #259\n venta=Sale.objects.create(\n date_sale=timezone.now(),\n amount=0,\n count=0,\n type_invoce= des_serializer.validated_data[\"type_invoce\"],\n type_payment=des_serializer.validated_data[\"type_payment\"],\n adreese_send=des_serializer.validated_data[\"adreese_send\"],\n user=self.request.user,\n )\n #259 recuperando productos\n productos=des_serializer.validated_data[\"productos\"]\n print(productos)\n\n #260 Bull create\n #iterar para bull create\n ventas_detalle=[]\n amount=0\n count=0\n \n\n for producto in productos:\n prod=Product.objects.get(id=producto['pk'])\n venta_detalle=SaleDetail(\n sale=venta,\n product=prod,\n count=producto['count'],\n price_purchase=prod.price_purchase,\n price_sale=prod.price_sale,\n )\n #260\n amount=amount+prod.price_sale*producto[\"count\"]\n count=count+producto[\"count\"]\n ventas_detalle.append(venta_detalle)\n \n venta.amount=amount\n venta.count=count\n venta.save()\n\n SaleDetail.objects.bulk_create(ventas_detalle)\n\n return Response({'code':'ok'})\n \n", "repo_name": "Alex29Script/DjangoUdemy", "sub_path": "tiendadj/tienda/applications/venta/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2969, "program_lang": "python", "lang": "es", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.generics.ListAPIView", "line_number": 26, "usage_type": "name"}, {"api_name": "serializers.VentaReporteSerializers", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Sale.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Sale.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Sale", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.generics.CreateAPIView", "line_number": 36, "usage_type": "name"}, {"api_name": "rest_framework.authentication.TokenAuthentication", "line_number": 37, "usage_type": "name"}, {"api_name": "rest_framework.permissions.IsAuthenticated", "line_number": 38, "usage_type": "name"}, {"api_name": "serializers.ProcesoVentaSerializer", "line_number": 40, "usage_type": "name"}, {"api_name": "serializers.ProcesoVentaSerializer", "line_number": 44, "usage_type": "call"}, {"api_name": "models.Sale.objects.create", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Sale.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.Sale", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.timezone.now", "line_number": 57, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 57, "usage_type": "name"}, {"api_name": "applications.producto.models.Product.objects.get", "line_number": 77, "usage_type": "call"}, {"api_name": "applications.producto.models.Product.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "applications.producto.models.Product", "line_number": 77, "usage_type": "name"}, {"api_name": "models.SaleDetail", "line_number": 78, "usage_type": "call"}, {"api_name": "models.SaleDetail.objects.bulk_create", "line_number": 94, "usage_type": "call"}, {"api_name": "models.SaleDetail.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.SaleDetail", "line_number": 94, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "19860185207", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ast2000tools.space_mission import SpaceMission\nimport ast2000tools.utils as utils\nfrom ast2000tools.constants import *\nfrom ast2000tools.solar_system import SolarSystem\n\n\nutils.check_for_newer_version()\n# Construct SpaceMission instance for my mission\nseed = utils.get_seed('Sgfrette')\nmission = SpaceMission(seed)\nsystem = SolarSystem(seed)\n\nsunmass = system.star_mass\nplanetmass = system.masses[0]\nmy = sunmass*planetmass/(sunmass+planetmass)\nmasstot = sunmass+planetmass\n\nGr = G_sol\n\n#Numerical integrator\ndef integrator(planet,sun):\n\n def gravity_on_sun(x_sun,y_sun,x_planet,y_planet):\n\n r = np.asarray([x_sun-x_planet, y_sun-y_planet])\n grav = -(Gr*sunmass*planetmass)/(np.linalg.norm(r))**2\n ax = grav*r[0] /(np.linalg.norm(r))\n ay = grav*r[1]/(np.linalg.norm(r))\n\n return np.asarray([ax,ay],float)\n\n x0x,x0y,v0x,v0y = planet\n x0xsun,x0ysun,v0xsun,v0ysun = sun\n\n N = 1000000\n time = 0.02 #time for 20 rotations for our home planet\n #print(time)\n dt = time/N\n t = dt\n\n x_planet = np.zeros((N,2),float)\n x_sun = np.zeros((N,2),float)\n\n cm = np.array([0,0])\n\n x_planet[0,:] = [x0x,x0y]\n x_sun[0,:] = [x0xsun,x0ysun]\n\n v_planet = np.asarray([v0x,v0y],float)\n v_sun = np.asarray([v0xsun,v0ysun],float)\n F_sun0 = gravity_on_sun(x0xsun,x0ysun,x0x,x0y)\n a_i_sun = F_sun0/sunmass\n a_i_planet = -F_sun0/planetmass\n for i in range(N-1):\n t+=dt\n cm = np.asarray([(sunmass*x_sun[i,0] + planetmass*\\\n x_planet[i,0])/(sunmass+planetmass),\\\n (sunmass*x_sun[i,1] + planetmass*\\\n x_planet[i,1])/(sunmass+planetmass)])\n\n x_planet[i+1,:] = x_planet[i,:]+(v_planet*dt + 0.5*a_i_planet*dt**2)-cm\n x_sun[i+1,:] = x_sun[i,:]+(v_sun*dt + 0.5*a_i_sun*dt**2)-cm\n F_sun = gravity_on_sun(x_sun[i+1,0],x_sun[i+1,1],x_planet[i+1,0],\\\n x_planet[i+1,1])\n a_iplus1_sun = F_sun/sunmass\n a_iplus1_pl = -F_sun/planetmass\n v_planet += 0.5*( a_i_planet + a_iplus1_pl)*dt\n a_i_planet = a_iplus1_pl\n v_sun += 0.5*( a_i_sun + a_iplus1_sun )*dt\n a_i_sun = a_iplus1_sun\n\n return x_planet,x_sun , dt, N\n\nPlanet0 = [system.initial_positions[0,0],system.initial_positions[1,0],\\\nsystem.initial_velocities[0][0],system.initial_velocities[1][0]]\n\nSun = [0,0,0,0]\n\nplanet_orbit, sun_orbit, dt, N = integrator(Planet0,Sun)\n\n\n#From mid to top of sun in y coordinates\nUpper = system.radii[0]*1000/AU + system.star_radius*1000/AU\nx_planet = np.copy(planet_orbit)\ndt = dt*yr/3600\n\n\n#Vectorized code for calculating eclipse data\n\"\"\"Must be 1 planet radii above x axis and beneath top of sun in y axis\"\"\"\ncondition1 = np.logical_and(np.greater_equal(x_planet[:,1],0),np.greater_equal(x_planet[:,0],0))\ncondition2 = np.logical_and(np.less_equal(x_planet[:,1],Upper),np.greater_equal(x_planet[:,0],0))\n\n\"\"\"Counts each timestep with planet above x-axis\"\"\"\ndt_in_intervall1 = np.logical_and(condition1,condition2).astype(np.float32)\ntime_in_int1 = sum(dt_in_intervall1)*dt\n\n\nUpper = system.star_radius*1000/AU + system.radii[0]*1000/AU\nUnder = system.star_radius*1000/AU - system.radii[0]*1000/AU\n\n\"\"\"Must be above sun botton and underneath sun top i y axis\"\"\"\ncondition3 = np.logical_and(np.greater_equal(x_planet[:,1],Under),np.greater_equal(x_planet[:,0],0))\ncondition4 = np.logical_and(np.less_equal(x_planet[:,1],Upper),np.greater_equal(x_planet[:,0],0))\n\ndt_in_intervall2 = np.logical_and(condition3,condition4).astype(np.float32)\ntime_in_int2 = sum(dt_in_intervall2*dt)\n\ndt_in_intervall3 = dt_in_intervall1-dt_in_intervall2\n\nAreal_sun = 2*np.pi*(system.star_radius*1000/AU)\nAreal_planet = 2*np.pi*(system.radii[0]*1000/AU)\n\nFlux_drop = (Areal_planet/Areal_sun)\nv = (Flux_drop)/time_in_int2 #rate of change for curve\n\ntime = np.linspace(0,N*dt,N)\ncurve = np.zeros(N)\n\n\"\"\"Sets values in curve to be constant as long as planet in between sun top and\nsun bottom\"\"\"\ncurve = np.where(dt_in_intervall1 == 1, dt_in_intervall1-Flux_drop, dt_in_intervall1)\n\n\"\"\"Calculates rate of change for curve as a numerical integral and long as the planet\nis less than a planet radii insiden sun area\"\"\"\nfor i in range(len(dt_in_intervall2)):\n if dt_in_intervall2[i] == 1:\n curve[i] = curve[i-1] + v*dt\n\n#Sets all other values equal to 1, as baseline for flux\ncurve = np.where(curve==0,curve+1,curve)\n\ntime = np.linspace(0,N*dt,N)\nsigma = (Areal_planet/Areal_sun)/100\nnoise = np.random.normal(0,sigma, size = int(N))\ncurve +=noise\n\ninfile = open(\"Light_curve_data.txt\", \"w\")\nfor i in range(len(curve[int(140/dt):int(210/dt)])):\n infile.write(\"{} {}\\n\".format(i*dt,curve[i]))\ninfile.close()\n\nplt.title(\"Light-curve\")\nplt.plot(time[0:int(25/dt)],curve[0:int(25/dt)])\nplt.xlabel(\"Time in hours\")\nplt.ylabel(\"Flux\")\nplt.show()\n", "repo_name": "marithso/AST2000", "sub_path": "Prosjekt/Part2/light_curve.py", "file_name": "light_curve.py", "file_ext": "py", "file_size_in_byte": 4825, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ast2000tools.utils.check_for_newer_version", "line_number": 10, "usage_type": "call"}, {"api_name": "ast2000tools.utils", "line_number": 10, "usage_type": "name"}, {"api_name": "ast2000tools.utils.get_seed", "line_number": 12, "usage_type": "call"}, {"api_name": "ast2000tools.utils", "line_number": 12, "usage_type": "name"}, {"api_name": "ast2000tools.space_mission.SpaceMission", "line_number": 13, "usage_type": "call"}, {"api_name": "ast2000tools.solar_system.SolarSystem", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.less_equal", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.logical_and", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.less_equal", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.greater_equal", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 114, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "32683665470", "text": "import torch\nimport torchvision\nfrom torch import nn\nfrom torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential\nfrom torch.utils.data import DataLoader\n\ndataset=torchvision.datasets.CIFAR10(\"./data\",train=False,transform=torchvision.transforms.ToTensor(),download=True)\ndataloader=DataLoader(dataset,batch_size=1)\n\n\nclass Zj(nn.Module):\n def __init__(self):\n super(Zj, self).__init__()\n self.model1 = Sequential(\n Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2),\n MaxPool2d(kernel_size=2),\n Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2),\n MaxPool2d(kernel_size=2),\n Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2),\n MaxPool2d(kernel_size=2),\n Flatten(),\n Linear(in_features=1024, out_features=64),\n Linear(in_features=64, out_features=10)\n\n )\n def forward(self,x):\n x=self.model1(x)\n return x\n\nloss=nn.CrossEntropyLoss()\nzj=Zj()\noptim=torch.optim.SGD(zj.parameters(),lr=0.01)\n\n\nfor epoch in range(20):\n running_loss=0.0\n for data in dataloader:\n imgs,targets=data\n # torch.reshape(imgs,(-1,3,32,8))\n outputs=zj(imgs)\n # print(outputs)\n # print(targets)\n result_loss=loss(outputs,targets)\n optim.zero_grad()\n result_loss.backward()\n optim.step()\n running_loss=running_loss+result_loss\n print(running_loss)\n\n\n\n # print(result_loss)", "repo_name": "mmmikezhang/shengdu", "sub_path": "nn_optim.py", "file_name": "nn_optim.py", "file_ext": "py", "file_size_in_byte": 1526, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.datasets.CIFAR10", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.Flatten", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 32, "usage_type": "attribute"}]} +{"seq_id": "38072372819", "text": "import torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nimport torch.nn.functional as F\r\n\r\nfrom transformers import Wav2Vec2Model\r\nfrom transformers import Wav2Vec2PreTrainedModel\r\nfrom transformers import AutoModel, XLMRobertaModel\r\n\r\nfrom .layer import SelfAttention, MultiModalMixer\r\n\r\n#################################################################\r\n################ Uni Modal (Wav) ################\r\n#################################################################\r\nclass Wav2Vec2ClassificationHead(nn.Module):\r\n \"\"\"Head for wav2vec classification task.\"\"\"\r\n\r\n def __init__(self, config):\r\n super().__init__()\r\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\r\n self.dropout = nn.Dropout(config.final_dropout)\r\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\r\n\r\n def forward(self, features, **kwargs):\r\n x = features\r\n x = self.dropout(x)\r\n x = self.dense(x)\r\n x = torch.tanh(x)\r\n x = self.dropout(x)\r\n x = self.out_proj(x)\r\n return x\r\n\r\nclass Wav2Vec2ForSpeechClassification(Wav2Vec2PreTrainedModel):\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.num_labels = config.num_labels\r\n self.pooling_mode = config.pooling_mode\r\n self.config = config\r\n\r\n self.wav2vec2 = Wav2Vec2Model(config)\r\n self.classifier = Wav2Vec2ClassificationHead(config)\r\n\r\n self.init_weights()\r\n\r\n def freeze_feature_extractor(self):\r\n self.wav2vec2.feature_extractor._freeze_parameters()\r\n\r\n def merged_strategy(\r\n self,\r\n hidden_states,\r\n mode=\"mean\"\r\n ):\r\n if mode == \"mean\":\r\n outputs = torch.mean(hidden_states, dim=1)\r\n elif mode == \"sum\":\r\n outputs = torch.sum(hidden_states, dim=1)\r\n elif mode == \"max\":\r\n outputs = torch.max(hidden_states, dim=1)[0]\r\n else:\r\n raise Exception(\r\n \"The pooling method hasn't been defined! Your pooling mode must be one of these ['mean', 'sum', 'max']\")\r\n\r\n return outputs\r\n\r\n def forward(\r\n self,\r\n input_values,\r\n attention_mask=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n labels=None,\r\n ):\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n outputs = self.wav2vec2(\r\n input_values,\r\n attention_mask=attention_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n hidden_states = outputs[0]\r\n hidden_states = self.merged_strategy(hidden_states, mode=self.pooling_mode)\r\n logits = self.classifier(hidden_states)\r\n\r\n return logits\r\n\r\n\r\n#################################################################\r\n################ Uni Modal (text) ################\r\n#################################################################\r\nclass xlm_RoBertaBase(XLMRobertaModel):\r\n def __init__(self, config):\r\n super(xlm_RoBertaBase, self).__init__(config)\r\n self.roberta = XLMRobertaModel(config)\r\n self.norm = nn.LayerNorm(config.hidden_size)\r\n self.classifier = nn.Linear(config.hidden_size, config.num_classes)\r\n self.init_weights()\r\n\r\n def forward(self, input_ids, attention_mask=None, token_type_ids=None):\r\n outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)['last_hidden_state']\r\n embeddings = torch.mean(outputs, axis=1)\r\n embeddings = self.norm(embeddings)\r\n logits = self.classifier(embeddings)\r\n return logits\r\n\r\n\r\n#################################################################\r\n################ Multi Modal (Wav) ################\r\n#################################################################\r\nclass MultiModalClassificationHead(nn.Module):\r\n def __init__(self, config):\r\n super().__init__()\r\n\r\n self.multimodal_method = config.multimodal_method\r\n self.num_classes = config.rnn_config.num_classes\r\n\r\n if self.multimodal_method=='early_fusion':\r\n concat_size = config.wav_config.hidden_size + config.rnn_config.hidden_size\r\n self.norm = nn.LayerNorm(concat_size)\r\n self.classifier = nn.Linear(concat_size, self.num_classes)\r\n \r\n elif self.multimodal_method=='late_fusion':\r\n self.wave_classifier = nn.Linear(config.wav_config.hidden_size, self.num_classes)\r\n self.text_classifier = nn.Linear(config.rnn_config.hidden_size, self.num_classes)\r\n self.norm = nn.LayerNorm(self.num_classes)\r\n\r\n elif self.multimodal_method=='stack':\r\n concat_size = config.wav_config.hidden_size + config.rnn_config.hidden_size\r\n self.norm = nn.LayerNorm(concat_size)\r\n self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)])\r\n self.classifier = nn.Linear(concat_size, self.num_classes)\r\n\r\n elif self.multimodal_method=='residual':\r\n concat_size = config.wav_config.hidden_size + config.rnn_config.hidden_size\r\n self.norm = nn.LayerNorm(concat_size)\r\n self.res_block = nn.Sequential(\r\n nn.Linear(concat_size, 512),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.res_block2 = nn.Sequential(\r\n nn.Linear(concat_size+512, concat_size),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.classifier = nn.Linear(concat_size, self.num_classes)\r\n \r\n elif self.multimodal_method=='rsa':\r\n concat_size = config.wav_config.hidden_size + config.rnn_config.hidden_size\r\n self.attn = SelfAttention(1, 256)\r\n self.norm = nn.LayerNorm(concat_size)\r\n self.res_block = nn.Sequential(\r\n nn.Linear(concat_size, 512),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.res_block2 = nn.Sequential(\r\n nn.Linear(concat_size+512, concat_size),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.classifier = nn.Linear(concat_size, self.num_classes)\r\n \r\n elif self.multimodal_method=='rsa_cfn':\r\n self.wav_proj = nn.Linear(config.wav_config.hidden_size, 128)\r\n self.rnn_proj = nn.Linear(config.rnn_config.hidden_size, 128)\r\n \r\n self.wave_attn = SelfAttention(1, 96)\r\n self.text_attn = SelfAttention(1, 96)\r\n \r\n self.wav_norm = nn.LayerNorm(128)\r\n self.rnn_norm = nn.LayerNorm(128)\r\n \r\n cross_fusion_size = (128+1) * (128+1)\r\n \r\n self.res_block = nn.Sequential(\r\n nn.Linear(cross_fusion_size, 1024),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.res_block2 = nn.Sequential(\r\n nn.Linear(cross_fusion_size+1024, cross_fusion_size),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.classifier = nn.Sequential(\r\n nn.Linear(cross_fusion_size, 1024),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(1024, self.num_classes)\r\n )\r\n \r\n\r\n elif self.multimodal_method=='hybrid_fusion':\r\n concat_size = config.wav_config.hidden_size + config.rnn_config.hidden_size\r\n \r\n # base\r\n self.base_norm = nn.LayerNorm(concat_size)\r\n self.base_classifier = nn.Linear(concat_size, self.num_classes)\r\n \r\n # late fusion\r\n self.wave_classifier = nn.Linear(config.wav_config.hidden_size, self.num_classes)\r\n self.text_classifier = nn.Linear(config.rnn_config.hidden_size, self.num_classes)\r\n self.lf_norm = nn.LayerNorm(self.num_classes)\r\n \r\n # residuals\r\n self.res_norm = nn.LayerNorm(concat_size)\r\n self.res_block = nn.Sequential(\r\n nn.Linear(concat_size, 512),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.res_block2 = nn.Sequential(\r\n nn.Linear(concat_size+512, concat_size),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.res_classifier = nn.Linear(concat_size, self.num_classes)\r\n \r\n # residuals_attn_cf\r\n self.wav_proj = nn.Linear(config.wav_config.hidden_size, 128)\r\n self.rnn_proj = nn.Linear(config.rnn_config.hidden_size, 128)\r\n \r\n self.wave_attn = SelfAttention(1, 96)\r\n self.text_attn = SelfAttention(1, 96)\r\n \r\n self.wav_norm = nn.LayerNorm(128)\r\n self.rnn_norm = nn.LayerNorm(128)\r\n \r\n cross_fusion_size = (128+1) * (128+1)\r\n \r\n self.rac_res_block = nn.Sequential(\r\n nn.Linear(cross_fusion_size, 1024),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.rac_res_block2 = nn.Sequential(\r\n nn.Linear(cross_fusion_size+1024, cross_fusion_size),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n )\r\n self.rac_classifier = nn.Sequential(\r\n nn.Linear(cross_fusion_size, 1024),\r\n nn.ReLU(),\r\n nn.Dropout(0.3),\r\n nn.Linear(1024, self.num_classes)\r\n )\r\n \r\n self.hybrid_norm = nn.LayerNorm(self.num_classes)\r\n\r\n elif self.multimodal_method=='mlp_mixer':\r\n self.mixer_layer = MultiModalMixer()\r\n \r\n def forward(self, wav_vec, rnn_vec):\r\n if self.multimodal_method=='early_fusion':\r\n x = torch.cat([wav_vec, rnn_vec], dim=1)\r\n x = self.norm(x)\r\n outputs = self.classifier(x)\r\n \r\n elif self.multimodal_method=='late_fusion':\r\n wave_output = self.wave_classifier(wav_vec)\r\n text_output = self.text_classifier(rnn_vec)\r\n outputs = self.norm(torch.stack([wave_output, text_output], dim=1))\r\n outputs = torch.mean(outputs, dim=1)\r\n \r\n elif self.multimodal_method=='stack':\r\n x = torch.cat([wav_vec, rnn_vec], dim=1)\r\n x = self.norm(x)\r\n for i, dropout in enumerate(self.dropouts):\r\n if i==0:\r\n outputs = self.classifier(dropout(x))\r\n else:\r\n outputs += self.classifier(dropout(x))\r\n else:\r\n outputs /= len(self.dropouts)\r\n \r\n elif self.multimodal_method=='residual':\r\n x = torch.cat([wav_vec, rnn_vec], dim=1)\r\n x = self.norm(x)\r\n\r\n res = self.res_block(x)\r\n res_concat = torch.cat([x, res], dim=1)\r\n\r\n res2 = self.res_block2(res_concat)\r\n outputs = self.classifier(x + res2)\r\n \r\n elif self.multimodal_method=='rsa':\r\n x = torch.cat([wav_vec, rnn_vec], dim=1)\r\n x = self.norm(x)\r\n\r\n attn_x, attn_weights = self.attn(x)\r\n \r\n res = self.res_block(attn_x)\r\n res_concat = torch.cat([attn_x, res], dim=1)\r\n\r\n res2 = self.res_block2(res_concat)\r\n outputs = self.classifier(x + res2)\r\n\r\n return outputs, attn_weights\r\n\r\n elif self.multimodal_method=='rsa_cfn':\r\n bs = wav_vec.size()[0]\r\n wav_vec_proj = self.wav_proj(wav_vec)\r\n rnn_vec_proj = self.rnn_proj(rnn_vec)\r\n \r\n wav_vec = self.wav_norm(self.wave_attn(wav_vec_proj)[0] + wav_vec_proj)\r\n rnn_vec = self.rnn_norm(self.text_attn(rnn_vec_proj)[0] + rnn_vec_proj)\r\n \r\n _wav = torch.cat((\r\n Variable(torch.ones(bs, 1).type(wav_vec.dtype).to(wav_vec.device), requires_grad=False), wav_vec), dim=1)\r\n _rnn = torch.cat((\r\n Variable(torch.ones(bs, 1).type(rnn_vec.dtype).to(rnn_vec.device), requires_grad=False), rnn_vec), dim=1)\r\n \r\n cross_fusion = torch.matmul(_wav.unsqueeze(2), _rnn.unsqueeze(1)).view(bs, -1)\r\n \r\n res = self.res_block(cross_fusion)\r\n res_concat = torch.cat([cross_fusion, res], dim=1)\r\n\r\n res2 = self.res_block2(res_concat)\r\n outputs = self.classifier(cross_fusion + res2) \r\n\r\n elif self.multimodal_method=='hybrid_fusion':\r\n bs = wav_vec.size()[0]\r\n \r\n # base\r\n base = torch.cat([wav_vec, rnn_vec], dim=1)\r\n base_outputs = self.base_classifier(self.base_norm(base))\r\n\r\n # late fusion\r\n wave_output = self.wave_classifier(wav_vec)\r\n text_output = self.text_classifier(rnn_vec)\r\n lf_outputs = torch.mean(self.lf_norm(torch.stack([wave_output, text_output], dim=1)), dim=1)\r\n \r\n # residuals\r\n x = self.res_norm(torch.cat([wav_vec, rnn_vec], dim=1))\r\n res = self.res_block(x)\r\n res_concat = torch.cat([x, res], dim=1)\r\n res2 = self.res_block2(res_concat)\r\n res_outputs = self.res_classifier(x + res2)\r\n\r\n # residuals_attn_cf\r\n wav_vec_proj = self.wav_proj(wav_vec)\r\n rnn_vec_proj = self.rnn_proj(rnn_vec)\r\n \r\n wav_vec = self.wav_norm(self.wave_attn(wav_vec_proj)[0] + wav_vec_proj)\r\n rnn_vec = self.rnn_norm(self.text_attn(rnn_vec_proj)[0] + rnn_vec_proj)\r\n \r\n _wav = torch.cat((\r\n Variable(torch.ones(bs, 1).type(wav_vec.dtype).to(wav_vec.device), requires_grad=False), wav_vec), dim=1)\r\n _rnn = torch.cat((\r\n Variable(torch.ones(bs, 1).type(rnn_vec.dtype).to(rnn_vec.device), requires_grad=False), rnn_vec), dim=1)\r\n \r\n cross_fusion = torch.matmul(_wav.unsqueeze(2), _rnn.unsqueeze(1)).view(bs, -1)\r\n \r\n res = self.rac_res_block(cross_fusion)\r\n res_concat = torch.cat([cross_fusion, res], dim=1)\r\n\r\n res2 = self.rac_res_block2(res_concat)\r\n rac_outputs = self.rac_classifier(cross_fusion + res2)\r\n \r\n # soft voting\r\n outputs = self.hybrid_norm(torch.stack([base_outputs, lf_outputs, res_outputs, rac_outputs], dim=1))\r\n outputs = torch.mean(outputs, dim=1)\r\n \r\n elif self.multimodal_method=='mlp_mixer':\r\n outputs = self.mixer_layer(wav_vec, rnn_vec)\r\n \r\n return outputs\r\n\r\nclass MultiModel(nn.Module):\r\n \"\"\"wav2vec, text Multi Modal Model\"\"\"\r\n def __init__(self, config):\r\n super().__init__()\r\n\r\n self.wav_model = Wav2Vec2Model.from_pretrained(config.wav_model, config=config.wav_config)\r\n if 'xlm' in config.rnn_model:\r\n self.rnn_model = XLMRobertaModel.from_pretrained(config.rnn_model, config=config.rnn_config)\r\n else:\r\n self.rnn_model = AutoModel.from_pretrained(config.rnn_model, config=config.rnn_config)\r\n \r\n self.multimodal_method = config.multimodal_method\r\n self.classifier = MultiModalClassificationHead(config)\r\n\r\n def wav_freeze_feature_extractor(self):\r\n self.wav_model.feature_extractor._freeze_parameters()\r\n\r\n def forward(self, input_values, input_ids, attention_mask, token_type_ids):\r\n\r\n if self.multimodal_method != 'mlp_mixer':\r\n wav_vec = torch.mean(\r\n self.wav_model(input_values)[0],\r\n dim=1)\r\n rnn_vec = torch.mean(\r\n self.rnn_model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)['last_hidden_state'],\r\n dim=1)\r\n else:\r\n wav_vec = self.wav_model(input_values)[0]\r\n rnn_vec = self.rnn_model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)['last_hidden_state']\r\n \r\n outputs = self.classifier(wav_vec, rnn_vec)\r\n\r\n return outputs\r\n \r\n", "repo_name": "hyeonho1028/RSA-CFN", "sub_path": "src/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 16485, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.tanh", "line_number": 28, "usage_type": "call"}, {"api_name": "transformers.Wav2Vec2PreTrainedModel", "line_number": 33, "usage_type": "name"}, {"api_name": "transformers.Wav2Vec2Model", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 58, "usage_type": "call"}, {"api_name": "transformers.XLMRobertaModel", "line_number": 92, "usage_type": "name"}, {"api_name": "transformers.XLMRobertaModel", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 97, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 111, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 120, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 121, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 138, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 138, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 140, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 147, "usage_type": "name"}, {"api_name": "layer.SelfAttention", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 152, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 153, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 154, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 155, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 158, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 166, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "layer.SelfAttention", "line_number": 169, "usage_type": "call"}, {"api_name": "layer.SelfAttention", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 178, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 179, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 179, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 182, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 183, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 200, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 203, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 204, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 209, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 211, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 212, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 214, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 215, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 216, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 217, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 219, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 222, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 223, "usage_type": "name"}, {"api_name": "layer.SelfAttention", "line_number": 225, "usage_type": "call"}, {"api_name": "layer.SelfAttention", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.nn.LayerNorm", "line_number": 228, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 228, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 229, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 233, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 234, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 234, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 235, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 236, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 236, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 238, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 239, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 240, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 241, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 243, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 243, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 244, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 244, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 245, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 247, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 247, "usage_type": "name"}, {"api_name": "torch.nn.LayerNorm", "line_number": 250, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 250, "usage_type": "name"}, {"api_name": "layer.MultiModalMixer", "line_number": 253, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 257, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 264, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 265, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 283, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 318, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 327, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 333, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 349, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 350, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 350, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 351, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.matmul", "line_number": 354, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 357, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 363, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 364, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 371, "usage_type": "name"}, {"api_name": "transformers.Wav2Vec2Model.from_pretrained", "line_number": 376, "usage_type": "call"}, {"api_name": "transformers.Wav2Vec2Model", "line_number": 376, "usage_type": "name"}, {"api_name": "transformers.XLMRobertaModel.from_pretrained", "line_number": 378, "usage_type": "call"}, {"api_name": "transformers.XLMRobertaModel", "line_number": 378, "usage_type": "name"}, {"api_name": "transformers.AutoModel.from_pretrained", "line_number": 380, "usage_type": "call"}, {"api_name": "transformers.AutoModel", "line_number": 380, "usage_type": "name"}, {"api_name": "torch.mean", "line_number": 391, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 394, "usage_type": "call"}]} +{"seq_id": "37282107308", "text": "#\n# @lc app=leetcode id=1296 lang=python3\n#\n# [1296] Divide Array in Sets of K Consecutive Numbers\n#\n# https://leetcode.com/problems/divide-array-in-sets-of-k-consecutive-numbers/description/\n#\n# algorithms\n# Medium (48.09%)\n# Likes: 135\n# Dislikes: 10\n# Total Accepted: 8.8K\n# Total Submissions: 18.3K\n# Testcase Example: '[1,2,3,3,4,4,5,6]\\n4'\n#\n# Given an array of integers nums and a positive integer k, find whether it's\n# possible to divide this array into sets of k consecutive numbers\n# Return True if its possible otherwise return False.\n# \n# \n# Example 1:\n# \n# \n# Input: nums = [1,2,3,3,4,4,5,6], k = 4\n# Output: true\n# Explanation: Array can be divided into [1,2,3,4] and [3,4,5,6].\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [3,2,1,2,3,4,3,4,5,9,10,11], k = 3\n# Output: true\n# Explanation: Array can be divided into [1,2,3] , [2,3,4] , [3,4,5] and\n# [9,10,11].\n# \n# \n# Example 3:\n# \n# \n# Input: nums = [3,3,2,2,1,1], k = 3\n# Output: true\n# \n# \n# Example 4:\n# \n# \n# Input: nums = [1,2,3,4], k = 3\n# Output: false\n# Explanation: Each array should be divided in subarrays of size 3.\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= nums.length <= 10^5\n# 1 <= nums[i] <= 10^9\n# 1 <= k <= nums.length\n# \n#\n\n# @lc code=start\nfrom collections import Counter\n\nclass Solution:\n def isPossibleDivide(self, nums: List[int], k: int) -> bool:\n if not len(nums) % k == 0:\n return False\n\n count = Counter(nums)\n keys = sorted(count.keys())\n\n for n in keys:\n if count[n] > 0:\n minus = count[n]\n for i in range(n, n + k):\n if count[i] < minus:\n return False\n count[i] -= minus\n \n return True\n \n# @lc code=end\n\n", "repo_name": "chenxu0602/LeetCode", "sub_path": "1296.divide-array-in-sets-of-k-consecutive-numbers.py", "file_name": "1296.divide-array-in-sets-of-k-consecutive-numbers.py", "file_ext": "py", "file_size_in_byte": 1772, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "28381959860", "text": "from tkinter import *\r\n#from PIL import Image,ImageTk\r\nimport threading\r\nimport Database as dbase\r\nimport time\r\nimport PIL.Image\r\nimport PIL.ImageTk\r\n#from gtts import gTTS\r\nimport gtts\r\nimport pygame\r\nimport tkinter\r\nimport tkinter.dialog\r\nfrom tkinter import messagebox\r\n\r\n\r\n\r\nimage1=None\r\nimage2=None\r\nimage3=None\r\n\r\nsecondtop=None\r\nsecondbottom=None\r\n\r\nthirdcontroller=None\r\n\r\nfourthimage=None\r\nnextBtnImage=None\r\n\r\ningredientsList=[]\r\nmainItem=''\r\n\r\nind=0\r\n\r\nclass Brain:\r\n def __init__(self):\r\n #threading.Thread(target=self.GUIStuffs).start()\r\n self.GUIStuffs()\r\n \r\n def Speak(self,word):\r\n SONG_END=pygame.USEREVENT+1\r\n \r\n pygame.mixer.music.set_endevent(SONG_END) \r\n global ind\r\n pygame.mixer.music.set_endevent(SONG_END)\r\n tts=gtts.gTTS(text=str(word),lang='en')\r\n name='song'+str(ind)+'.mp3'\r\n ind+=1\r\n tts.save(name)\r\n pygame.mixer.music.load(name)\r\n pygame.mixer.music.play()\r\n marker=0\r\n while(1):\r\n for e in pygame.event.get():\r\n if e.type==SONG_END:\r\n marker=1\r\n break\r\n if marker==1:\r\n break\r\n\r\n\r\n def FinalOp(self,recepies):\r\n s1=1\r\n for g in recepies:\r\n step,gas,activity,staytime=g.split(',')\r\n\r\n WaitTime=float(staytime)\r\n\r\n t1=step\r\n t2= 'And turn the gas to '+gas\r\n t3='after that '+activity\r\n t4=' ok then wait for '+staytime+' Seconds'\r\n temptext=step+' And turn the gas to '+gas+' after that '+activity+' , ok then wait for '+staytime+' Seconds'\r\n #self.Speak(temptext)\r\n\r\n self.Speak(t1)\r\n self.Speak(t2)\r\n self.Speak(t3)\r\n self.Speak(t4)\r\n\r\n \r\n self.T.insert(END,'step '+str(s1)+': '+temptext+'\\n')\r\n s1+=1\r\n time.sleep(WaitTime)\r\n messagebox.showinfo(title=\"Finished\",message=\"Food successfully coooked\")\r\n self.root.destroy()\r\n \r\n \r\n \r\n def SixthPage(self):\r\n self.frame4.pack_forget()\r\n self.frame5.pack()\r\n recepies=dbase.RetrieveRecepie(mainItem)\r\n self.T=Text(self.frame5,height=30)\r\n self.T.pack()\r\n threading.Thread(target=self.FinalOp,args=(recepies,)).start()\r\n \r\n\r\n\r\n def FifthPage(self):\r\n self.frame3.pack_forget()\r\n self.frame4.pack()\r\n T=Text(self.frame4,height=28)\r\n T.pack()\r\n\r\n \r\n \r\n nextBtn=Button(self.frame4,text='Next',command=self.SixthPage)\r\n nextBtn.pack()\r\n \r\n textcombo=''\r\n for a in ingredientsList:\r\n textcombo+=a+'\\n'\r\n T.insert(END,textcombo)\r\n T.config(state=DISABLED)\r\n\r\n nextBtn['state']='disabled'\r\n\r\n ComputerIns=\"so you are preparing \"+mainItem+\" today here are the ingredients you should make ready before going to next step,once you are done click on the next button to start cooking today's dish\"\r\n self.Speak(ComputerIns)\r\n nextBtn['state']='normal'\r\n \r\n \r\n def ClickOnItem(self,evt):\r\n global ingredientsList,mainItem\r\n w=evt.widget\r\n index=int(w.curselection()[0])\r\n value=w.get(index)\r\n ingredients=dbase.RetrieveIngredients(value)\r\n mainItem=value\r\n ingredientsList=ingredients\r\n threading.Thread(target=self.FifthPage).start()\r\n #self.FifthPage()\r\n \r\n def FourthPage(self):\r\n global fourthimage\r\n try:\r\n self.frame2.pack_forget()\r\n self.frame1.pack_forget()\r\n except:\r\n \r\n self.frame1.pack_forget()\r\n \r\n self.frame3.pack()\r\n\r\n MenuItems=dbase.RetrieveMenu()\r\n \r\n fourthimage=PIL.ImageTk.PhotoImage(file='Menu.png')\r\n\r\n lbl=Label(self.frame3,text=\"MENU\",image=fourthimage)\r\n\r\n lbl.pack()\r\n \r\n lstbox=Listbox(self.frame3,width=640,height=280)\r\n #lstbox.grid(row=1,column=0)\r\n lstbox.pack()\r\n \r\n for i in MenuItems:\r\n lstbox.insert(END,i)\r\n lstbox.bind('<>',self.ClickOnItem)\r\n\r\n ComputerText='Okay now you are ready to see our menu, select any item that you wanna cook today '\r\n self.Speak(ComputerText)\r\n \r\n def ThirdPage(self):\r\n global thirdcontroller\r\n Tips_text=\"Okay so you are new user then look carefully to the picture describing you the stove flames level which will be useful in later cooking instructions, After you are done you can click on the picture\"\r\n \r\n\r\n self.frame1.pack_forget()\r\n self.frame2.pack()\r\n\r\n thirdcontroller=PIL.ImageTk.PhotoImage(file='controller.png')\r\n thirdbutton=Button(self.frame2,borderwidth=0,image=thirdcontroller,highlightthickness=0,command=self.FourthPage)\r\n thirdbutton.pack()\r\n thirdbutton['state']='disabled'\r\n self.Speak(Tips_text)\r\n thirdbutton['state']='normal'\r\n \r\n \r\n def SecondPage(self):\r\n global image2,secondtop,secondbottom\r\n \r\n self.frame0.pack_forget()\r\n self.frame1.pack()\r\n \r\n ##Second Page\r\n Greetings_Text='Hello, Welcome to cooking guide, you can click on the screen for next step based on given instructions , new if you are new user and old if you are an old user' \r\n \r\n\r\n \r\n \r\n secondtop=PIL.ImageTk.PhotoImage(file='secondtop.png')\r\n secondbottom=PIL.ImageTk.PhotoImage(file='secondbottom.png')\r\n\r\n topbtn=Button(self.frame1,borderwidth=0,image=secondtop,highlightthickness=0,command=self.ThirdPage)\r\n topbtn.pack()\r\n\r\n downbtn=Button(self.frame1,borderwidth=0,image=secondbottom,highlightthickness=0,command=self.FourthPage)\r\n downbtn.pack()\r\n topbtn['state']='disabled'\r\n downbtn['state']='disabled'\r\n \r\n self.Speak(Greetings_Text)\r\n topbtn['state']='normal'\r\n downbtn['state']='normal'\r\n\r\n ##\r\n \r\n def FrontPage(self):\r\n global image1\r\n #self.frame9.grid_forget()\r\n self.frame0.pack()\r\n image1=PhotoImage(file='first.png')\r\n BtnStart=Button(self.frame0,borderwidth=0,image=image1,highlightthickness=0,command=self.SecondPage)\r\n BtnStart.pack()\r\n \r\n ##\r\n def TempButton(self):\r\n self.frame9=Frame(self.root)\r\n self.frame9.pack()\r\n tempbtn=Button(self.frame9,text='start',command=self.FrontPage)\r\n tempbtn.pack()\r\n \r\n\r\n def GUIStuffs(self):\r\n self.root=Tk()\r\n self.root.title('Foody')\r\n\r\n screen_width=self.root.winfo_screenwidth()\r\n screen_height=self.root.winfo_screenheight()\r\n\r\n x=(screen_width/2)-(640/2)\r\n y=(screen_height/2)-(480/2)\r\n \r\n \r\n self.root.geometry('%dx%d+%d+%d'%(640,480,x,y))\r\n\r\n self.root.resizable(0,0)\r\n self.root.configure(background='#008080')\r\n self.frame0=Frame(self.root)\r\n self.frame1=Frame(self.root)\r\n self.frame2=Frame(self.root)\r\n self.frame3=Frame(self.root)\r\n self.frame4=Frame(self.root)\r\n self.frame5=Frame(self.root)\r\n self.FrontPage()\r\n self.root.mainloop()\r\n\r\npygame.init()\r\npygame.mixer.init()\r\nb=Brain()\r\n\r\n", "repo_name": "Sasuke214/FoodCooker", "sub_path": "brainAudio.py", "file_name": "brainAudio.py", "file_ext": "py", "file_size_in_byte": 9372, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.USEREVENT", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_endevent", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.set_endevent", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "gtts.gTTS", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mixer.music.load", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 49, "usage_type": "attribute"}, {"api_name": "pygame.mixer.music.play", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 53, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 83, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 84, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 84, "usage_type": "name"}, {"api_name": "Database.RetrieveRecepie", "line_number": 92, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 95, "usage_type": "call"}, {"api_name": "Database.RetrieveIngredients", "line_number": 128, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 131, "usage_type": "call"}, {"api_name": "Database.RetrieveMenu", "line_number": 145, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 147, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 147, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 147, "usage_type": "name"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 172, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 172, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 172, "usage_type": "name"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 192, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 192, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 192, "usage_type": "name"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 193, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 193, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 193, "usage_type": "name"}, {"api_name": "pygame.init", "line_number": 249, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 250, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 250, "usage_type": "attribute"}]} +{"seq_id": "73810354724", "text": "import numpy as np\nfrom sklearn.svm import LinearSVC,SVC\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n#1. 데이터\nx_data = [[0, 0],[0, 1],[1, 0],[1, 1]]\ny_data = [0, 1, 1, 0]\nx_data = np.array(x_data)\ny_data = np.array(y_data)\n\nx_data=np.asarray(x_data).astype(np.int)\ny_data=np.asarray(y_data).astype(np.int)\n\n\n# x_data=np.asarray(x_data).astype(np.float)\n# y_data=np.asarray(y_data).astype(np.float)\n\n\n#2. 모델\n# model = LinearSVC()\n# model = Perceptron()\n# model = SVC()\n\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=2, activation='relu'))\n# model.add(Dense(8, input_dim=2))#, activation='relu'))\n# model.add(Dense(4, input_dim=2, activation='relu'))\n# model.add(Dense(2, input_dim=2))#, activation='relu'))\nmodel.add(Dense(1, input_dim=2, activation='sigmoid'))\n\n#3. 훈련\nmodel.compile(loss='mse', optimizer='adam') \nmodel.fit(x_data, y_data)\n\n#4. 평가\n\ny_pred = model.predict(x_data)\n\nresults = model.evaluate(x_data, y_data)\n\nprint(x_data, \"의 예측결과 :\", y_pred)\n# print('matrics_acc : ', results[1])\n\n\nacc = accuracy_score(y_data, np.round(y_pred,0))\n\n# result = model.predict([1,1])\nprint('acc 의 예측값 : ', acc)\n", "repo_name": "jangsejong/STUDY", "sub_path": "machine_running/ml01_02/ml02_5_xor_keras.py", "file_name": "ml02_5_xor_keras.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "73054757926", "text": "\"\"\"A skiplist implementation of the List interface\n\nW. Pugh. Skip Lists: A probabilistic alternative to balanced trees. \n In Communications of the ACM, 33(6), pp. 668-676, June 1990.\n\nW. Pugh. A skip list cookbook. CS-TR-2286.1, University of Maryland, \n College Park, 1990.\n\"\"\"\nimport random\nimport numpy\nfrom utils import new_array\nfrom base import BaseList\n\n \nclass SkiplistList(BaseList):\n class Node(object):\n \"\"\"A node in a skip list\"\"\"\n def __init__(self, x, h):\n self.x = x\n self.next = new_array(h+1)\n self.length = numpy.ones(h+1, dtype=int)\n\n def height(self):\n return len(self.next) - 1\n\n def _new_node(self, x, h):\n return SkiplistList.Node(x, h)\n \n def __init__(self, iterable=[]):\n self._initialize()\n self.add_all(iterable)\n \n def _initialize(self):\n self.h = 0\n self.n = 0\n self.sentinel = self._new_node(None, 32)\n self.stack = new_array(self.sentinel.height()+1)\n \n def find_pred(self, i):\n u = self.sentinel\n r = self.h\n j = -1\n while r >= 0:\n while u.next[r] is not None and j + u.length[r] < i:\n j += u.length[r]\n u = u.next[r] # go right in list r\n r -= 1 # go down into list r-1\n return u\n\n def get(self, i):\n if i < 0 or i > self.n-1: raise IndexError()\n return self.find_pred(i).next[0].x\n\n def set(self, i, x):\n if i < 0 or i > self.n-1: raise IndexError()\n u = self.find_pred(i).next[0]\n y = u.x\n u.x = x\n return y\n \n def _add(self, i, w):\n u = self.sentinel\n k = w.height()\n r = self.h\n j = -1\n while r >= 0:\n while u.next[r] is not None and j+u.length[r] < i:\n j += u.length[r]\n u = u.next[r]\n u.length[r] += 1\n if r <= k:\n w.next[r] = u.next[r]\n u.next[r] = w\n w.length[r] = u.length[r] - (i-j)\n u.length[r] = i - j\n r -= 1\n self.n += 1\n return u\n \n def add(self, i, x):\n if i < 0 or i > self.n: raise IndexError()\n w = self._new_node(x, self.pick_height())\n if w.height() > self.h:\n self.h = w.height()\n self._add(i, w)\n \n def remove(self, i):\n if i < 0 or i > self.n-1: raise IndexError()\n u = self.sentinel\n r = self.h\n j = -1\n while r >= 0:\n while u.next[r] is not None and j + u.length[r] < i:\n j += u.length[r]\n u = u.next[r]\n u.length[r] -= 1\n if j + u.length[r] + 1 == i and u.next[r] is not None:\n x = u.next[r].x\n u.length[r] += u.next[r].length[r]\n u.next[r] = u.next[r].next[r]\n if u == self.sentinel and u.next[r] is None:\n self.h -= 1\n r -= 1\n self.n -= 1\n return x \n\n def __iter__(self):\n u = self.sentinel.next[0]\n while u is not None:\n yield u.x\n u = u.next[0]\n\n def pick_height(self):\n z = random.getrandbits(32)\n k = 0\n while z & 1:\n k += 1\n z = z // 2\n return k\n\n def truncate(self, i):\n lista = SkiplistList() #para criar um novo nó sentinela\n lista.n = self.n - i # tamanho da nova lista\n lista.h = self.h # a principio de mesma altura da antiga\n \n #percorreremos a lista da mesma forma que remove() porém ajustando o next[r] do nó anterior ao truncamento (novo ultimo nó)\n u = self.sentinel\n r = self.h\n j = -1\n \n while r >= 0:\n while u.next[r] is not None and j + u.length[r] < i:\n j += u.length[r]\n u = u.next[r] # go right in list r\n if j + u.length[r] >= i:\n lista.sentinel.next[r] = u.next[r] # o novo sentinela aponta para onde o nó u apontava\n u.next[r] = None #o nó u na altura r passa a ser o ultimo, logo aponta para None\n\n # como não sabemos se o nó com maior altura ficara na lista nova ou antiga, precisamos ajustar o tamanho\n if self.sentinel.next[r] == None: # caso o maior esteja na Nova lista\n self.h -= 1\n if lista.sentinel.next[r] == None: # caso o maior esteja na Antiga lista\n lista.h -= 1\n r -= 1 # go down into list r-1\n \n self.n = i # tamanho da lista antiga atualizado\n \n # por fim retornamos a lista Nova, criada a partir da Antiga\n return lista\n\n\n \n\n\n\n", "repo_name": "luismigsantana/opendata-structure", "sub_path": "skiplistlist.py", "file_name": "skiplistlist.py", "file_ext": "py", "file_size_in_byte": 4761, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base.BaseList", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.new_array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.new_array", "line_number": 37, "usage_type": "call"}, {"api_name": "random.getrandbits", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "30609705057", "text": "import io\nimport sys\n\n_INPUT = \"\"\"\\\n6\n4 2 3 3 2\n6 4 2 1 1\n100 1 1 10 10\n\"\"\"\n\nsys.stdin = io.StringIO(_INPUT)\ncase_no=int(input())\nfor __ in range(case_no):\n mod=998244353\n N,A,B,P,Q=map(int,input().split())\n p=pow(P,mod-2,mod)\n q=pow(Q,mod-2,mod)\n dp=[0]*(2*100**2)\n def idx(i,j,k):return i*200+j*2+k\n for i in range(100):\n for j in range(100):\n for k in range(2):\n if k==0:\n if i==0: dp[idx(i,j,k)]=1\n else:\n if j==0: dp[idx(i,j,k)]=0\n else:\n for l in range(P):\n dp[idx(i,j,k)]+=dp[idx(max(i-l-1,0),j,1)]*p\n dp[idx(i,j,k)]%=mod\n else:\n if i==0: dp[idx(i,j,k)]=1\n else:\n if j==0: dp[idx(i,j,k)]=0\n else:\n for l in range(Q):\n dp[idx(i,j,k)]+=dp[idx(i,max(j-l-1,0),0)]*q\n dp[idx(i,j,k)]%=mod\n print(dp[idx(N-A,N-B,0)])", "repo_name": "katonyonko/ABC298", "sub_path": "ABC298_E.py", "file_name": "ABC298_E.py", "file_ext": "py", "file_size_in_byte": 908, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin", "line_number": 11, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "28639374116", "text": "from django.contrib.auth import get_user_model\nfrom django.core.validators import RegexValidator, MinValueValidator\nfrom django.db import models\n\nUser = get_user_model()\n\n\nclass Ingredient(models.Model):\n name = models.CharField(max_length=200, verbose_name='Название')\n measurement_unit = models.CharField(\n max_length=200, verbose_name='Единицы измерения'\n )\n\n class Meta:\n verbose_name = 'Ингредиент'\n verbose_name_plural = 'Ингредиенты'\n\n def __str__(self):\n return f'{self.name} ({self.measurement_unit})'\n\n\nclass Recipe(models.Model):\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='recipes',\n verbose_name='Автор')\n name = models.CharField(\n max_length=200,\n verbose_name='Название рецепта')\n image = models.ImageField(upload_to='images/', blank=True)\n\n text = models.TextField(verbose_name='Текст')\n ingredients = models.ManyToManyField(\n Ingredient,\n through='RecipeIngredient',\n through_fields=('recipe', 'ingredient'),\n verbose_name='Ингредиенты',\n\n )\n tags = models.ManyToManyField('Tag',\n verbose_name='Тег')\n pub_date = models.DateTimeField(\n verbose_name='Дата публикации',\n auto_now_add=True,\n db_index=True\n )\n cooking_time = models.PositiveIntegerField(validators=[MinValueValidator(1)])\n\n class Meta:\n ordering = ('-pub_date', )\n verbose_name = 'Рецепт'\n verbose_name_plural = 'Рецепты'\n\n def __str__(self):\n return self.name\n\n\nclass RecipeIngredient(models.Model):\n amount = models.PositiveIntegerField(\n verbose_name='Количество',\n validators=[MinValueValidator(1)],\n )\n ingredient = models.ForeignKey(\n Ingredient,\n on_delete=models.CASCADE,\n verbose_name='Ингредиент',\n related_name='recipe_ingredients'\n )\n recipe = models.ForeignKey(\n Recipe,\n on_delete=models.CASCADE,\n verbose_name='Рецепт',\n related_name='recipe_ingredients'\n )\n\n def __str__(self):\n return f'{self.ingredient} в {self.recipe}'\n\n class Meta:\n verbose_name = 'Ингредиент в рецепте'\n verbose_name_plural = 'Ингредиенты в рецептах'\n\n\nclass Cart(models.Model):\n \"\"\"Корзина\"\"\"\n recipe = models.ForeignKey(Recipe,\n on_delete=models.CASCADE,\n related_name='carts')\n owner = models.ForeignKey(User,\n on_delete=models.CASCADE,\n related_name='carts')\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['recipe', 'owner'],\n name='recipe_owner_unique'\n )\n ]\n verbose_name = 'Корзина'\n verbose_name_plural = 'Корзины'\n\n def __str__(self):\n return f'{self.owner}:{self.recipe}'\n\n\nclass FavoriteRecipes(models.Model):\n \"\"\"Избранные рецепты пользоваьелей\"\"\"\n user = models.ForeignKey(User,\n related_name='favorite_recipes',\n on_delete=models.CASCADE)\n recipe = models.ForeignKey(Recipe,\n on_delete=models.CASCADE,\n related_name='favorite_recipes')\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['recipe', 'user'],\n name='recipe_user_unique'\n )\n ]\n verbose_name = 'Избранный рецепт'\n verbose_name_plural = 'Избранные рецепты'\n\n def __str__(self):\n return f'Пользователь: {self.user} Избранный рецепт: {self.recipe}'\n\n\nclass Tag(models.Model):\n \"\"\"Тэг\"\"\"\n name = models.CharField(max_length=200,\n unique=True)\n color = models.CharField(max_length=7,\n unique=True, )\n slug = models.SlugField(max_length=200,\n unique=True,\n validators=[RegexValidator(\n regex='^[-a-zA-Z0-9_]+$')]\n )\n\n class Meta:\n verbose_name = 'Тег'\n verbose_name_plural = 'Теги'\n\n def __str__(self):\n return self.slug\n", "repo_name": "MiskivEA/foodgram-project-react", "sub_path": "backend/foodgram/app/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 4611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.get_user_model", "line_number": 5, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 10, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.core.validators.MinValueValidator", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models.ForeignKey", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 66, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 85, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 88, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 91, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "django.db.models.UniqueConstraint", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 108, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 108, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 110, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 112, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 113, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 113, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 114, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.db.models.UniqueConstraint", "line_number": 119, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 119, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 131, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 131, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 133, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 133, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 135, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 135, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 137, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 137, "usage_type": "name"}, {"api_name": "django.core.validators.RegexValidator", "line_number": 139, "usage_type": "call"}]} +{"seq_id": "5884325267", "text": "import argparse\nimport pandas as pd\nimport cv2\nimport os\nimport shutil\nfrom fastprogress import progress_bar\nfrom PIL import Image\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--image_dir', help=\"Direcotry containning the image data\")\n# parser.add_argument(\"--target_dir\", help=\"Direcotry which save propressing img data\")\n\nBANDS_NAMES = ['_red.png', '_green.png', '_blue.png', '_yellow.png']\n\ndef propressing(csv_file, img_dir, target_dir):\n csv_pd = pd.read_csv(csv_file)\n total = len(csv_pd)\n for index in progress_bar(range(total)):\n name = csv_pd.iloc[index].Id\n path = os.path.join(img_dir, name)\n image_bands = []\n for brand in BANDS_NAMES:\n image_path = path + brand\n image_bands.append(Image.open(image_path))\n image = Image.merge('RGBA', bands=image_bands)\n image = image.convert(\"RGB\")\n new_path = os.path.join(target_dir, name+\".png\")\n image.save(new_path)\n\ndef get_small_data_by_random(data_dir):\n csv_file = os.path.join(data_dir, 'train.csv')\n img_dir = os.path.join(data_dir, 'processing_train')\n target_dir = os.path.join(data_dir, 'small_train')\n small_csv_file = os.path.join(data_dir, \"small_data.csv\")\n\n csv_pd = pd.read_csv(csv_file)\n small_data = csv_pd.sample(frac=0.1)\n total = len(small_data)\n for index in progress_bar(range(total)):\n name = csv_pd.iloc[index].Id\n path = os.path.join(img_dir, name+\".png\")\n new_path = os.path.join(target_dir, name+\".png\")\n shutil.copy(path, new_path)\n \n small_data.to_csv(small_csv_file)\n\n\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n # propressing(csv_file, img_dir, target_dir)\n get_small_data_by_random(args.image_dir)", "repo_name": "HaiwenZhang/human-protein-atlas-image-classification", "sub_path": "data/prepare.py", "file_name": "prepare.py", "file_ext": "py", "file_size_in_byte": 1767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "fastprogress.progress_bar", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 25, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 25, "usage_type": "name"}, {"api_name": "PIL.Image.merge", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 37, "usage_type": "call"}, {"api_name": "fastprogress.progress_bar", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "13013884951", "text": "import datetime\n\nfrom django.db import models\nfrom django.http import Http404\nfrom django.shortcuts import redirect\n\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom wagtail.admin.panels import FieldPanel\nfrom wagtail.search import index\n\nfrom apps.common.models import HipBasePage\nfrom apps.disease_control.models import DiseaseAndConditionDetailPage\n\nfrom .utils import zipcode_validator\n\n\nclass HealthAlertListPage(HipBasePage):\n # There can be only one HealthAlertListPage\n max_count = 1\n parent_page_types = [\"hip.HomePage\"]\n subpage_types = [\"health_alerts.HealthAlertDetailPage\"]\n\n def get_context(self, request):\n \"\"\"\n Add health_alerts queryset and right_nav_headings to context.\n \"\"\"\n context = super().get_context(request)\n\n # Get all live HealthAlerts, ordered date descending.\n health_alerts = (\n HealthAlertDetailPage.objects.child_of(self).order_by(\"-alert_date\").live()\n )\n context[\"health_alerts\"] = health_alerts\n\n # Get list of each year that we have an alert for. This is used for grouping of\n # alerts on the page, as well as to trigger the right scroll bar to be created.\n years = [alert.alert_date.year for alert in health_alerts]\n # the following line removes duplicates but keeps things ordered (unlike sets)\n years = list(dict.fromkeys(years))\n context[\"right_nav_headings\"] = years\n\n # Get list of conditions attached to all of our health alerts, ordered by title\n conditions = DiseaseAndConditionDetailPage.objects.exclude(\n health_alerts=None\n ).order_by(\"title\")\n context[\"conditions\"] = conditions\n return context\n\n\nclass HealthAlertDetailPage(HipBasePage):\n parent_page_types = [\"health_alerts.HealthAlertListPage\"]\n subpage_types = []\n alert_file = models.ForeignKey(\n \"hip.HIPDocument\", null=True, blank=True, on_delete=models.SET_NULL\n )\n\n class Priority(models.IntegerChoices):\n UPDATE = 1\n NOTIFICATION = 2\n ADVISORY = 3\n ALERT = 4\n\n priority = models.IntegerField(choices=Priority.choices)\n\n alert_date = models.DateField(default=datetime.date.today)\n\n disease = models.ForeignKey(\n DiseaseAndConditionDetailPage,\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name=\"health_alerts\",\n )\n\n content_panels = HipBasePage.content_panels + [\n FieldPanel(\"alert_file\"),\n FieldPanel(\"priority\"),\n FieldPanel(\"alert_date\"),\n FieldPanel(\"disease\"),\n ]\n search_fields = HipBasePage.search_fields + [\n index.SearchField(\"get_priority_display\"),\n ]\n\n def get_priority_icon(self):\n \"\"\"\n Get the proper icon for this priority.\n \"\"\"\n if self.priority == self.Priority.UPDATE:\n return \"fa-arrow-alt-circle-up\"\n elif self.priority == self.Priority.NOTIFICATION:\n return \"fa-info-circle\"\n elif self.priority == self.Priority.ADVISORY:\n return \"fa-exclamation-circle\"\n elif self.priority == self.Priority.ALERT:\n return \"fa-exclamation-triangle\"\n return \"\"\n\n def get_priority_color(self):\n \"\"\"Get the proper color for the priority.\"\"\"\n if self.priority == self.Priority.UPDATE:\n return \"update-hip\"\n elif self.priority == self.Priority.NOTIFICATION:\n return \"notification-hip\"\n elif self.priority == self.Priority.ADVISORY:\n return \"advisory-hip\"\n elif self.priority == self.Priority.ALERT:\n return \"alert-hip\"\n return \"\"\n\n def serve(self, request):\n \"\"\"Return the URL for the HealthAlertDetailPage's alert_file (or a 404 page).\"\"\"\n # If the HealthAlertDetailPage does not have an alert_file, then return a 404 page.\n if not self.alert_file:\n raise Http404()\n return redirect(self.alert_file.url)\n\n # Because we have overridden the serve() method of this model, we also need to\n # override serve_preview in order for the live preview panel to work in Wagtail admin\n def serve_preview(self, request, mode_name):\n return self.serve(request)\n\n\nclass HealthAlertSubscriber(models.Model):\n \"\"\"Stores users that indicate they would like to\n subscribe to health alerts\n\n This model is a standard django model used\n to store health alert subscribers. Instances\n are made available in the django admin as opposed\n to the wagtail cms. From the information stored here\n admins will be able to send out newsletters. The\n actual sending of newsletters is a process not\n managed by the current iteration of this\n application.\n \"\"\"\n\n personal_first_name = models.CharField(\n \"First Name*\",\n max_length=255,\n default=\"\",\n )\n personal_last_name = models.CharField(\n \"Last Name*\",\n max_length=255,\n default=\"\",\n )\n personal_medical_expertise = models.CharField(\n \"Medical Specialty/Expertise*\",\n max_length=255,\n default=\"\",\n )\n personal_professional_license = models.CharField(\n \"Professional License*\",\n max_length=255,\n default=\"\",\n )\n agency_name = models.CharField(\"Agency Name*\", max_length=255, default=\"\")\n\n class AGENCY_TYPE_CHOICES(models.TextChoices):\n ANIMAL_VETERINARY_CLINICS = \"AV\", \"Animal and Veterinary Clinics\"\n BUSINESSES_COMMUNITY_ORGANIZATIONS = (\n \"BCO\",\n \"Businesses and Community Organizations\",\n )\n CHILDCARE_SERVICES_DAYCARES = \"CSD\", \"Child Care Services and Daycares\"\n DENTAL_OFFICES_CLINICS = \"DOC\", \"Dental Offices and Clinics\"\n HOSPITALS_HEALTHCARE = \"HH\", \"Hospitals and Healthcare\"\n NURSING_PERSONAL_CARE_HOMES = \"NPH\", \"Nursing and Personal Care Homes\"\n PDPH_INTERNAL = \"PDPH\", \"PDPH (Internal)\"\n PHARMACY = \"P\", \"Pharmacy\"\n PUBLIC_HEALTH_REGIONAL_PARTNERS = \"PHRP\", \"Public Health and Regional Partners\"\n UNIVERSITY_STUDENT_HEALTH = \"USH\", \"University and Student Health\"\n OTHER = \"O\", \"Other\"\n\n agency_type = models.CharField(\n \"Agency Type*\", max_length=4, choices=AGENCY_TYPE_CHOICES.choices\n )\n agency_zip_code = models.CharField(\n \"Agency Zip Code*\", max_length=10, default=\"\", validators=[zipcode_validator]\n )\n agency_position = models.CharField(\"Position/Title*\", max_length=255, default=\"\")\n agency_work_phone = PhoneNumberField(\"Work Phone\", null=True, blank=True)\n network_email = models.EmailField(\"Email Address*\", default=\"\")\n network_fax = PhoneNumberField(\"Fax Number*\")\n", "repo_name": "caktus/philly-hip", "sub_path": "apps/health_alerts/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 6708, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "apps.common.models.HipBasePage", "line_number": 17, "usage_type": "name"}, {"api_name": "apps.disease_control.models.DiseaseAndConditionDetailPage.objects.exclude", "line_number": 43, "usage_type": "call"}, {"api_name": "apps.disease_control.models.DiseaseAndConditionDetailPage.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "apps.disease_control.models.DiseaseAndConditionDetailPage", "line_number": 43, "usage_type": "name"}, {"api_name": "apps.common.models.HipBasePage", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 54, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.IntegerChoices", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.db.models.ForeignKey", "line_number": 67, "usage_type": "call"}, {"api_name": "apps.disease_control.models.DiseaseAndConditionDetailPage", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.models.SET_NULL", "line_number": 69, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "apps.common.models.HipBasePage.content_panels", "line_number": 75, "usage_type": "attribute"}, {"api_name": "apps.common.models.HipBasePage", "line_number": 75, "usage_type": "name"}, {"api_name": "wagtail.admin.panels.FieldPanel", "line_number": 76, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.FieldPanel", "line_number": 77, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.FieldPanel", "line_number": 78, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.FieldPanel", "line_number": 79, "usage_type": "call"}, {"api_name": "apps.common.models.HipBasePage.search_fields", "line_number": 81, "usage_type": "attribute"}, {"api_name": "apps.common.models.HipBasePage", "line_number": 81, "usage_type": "name"}, {"api_name": "wagtail.search.index.SearchField", "line_number": 82, "usage_type": "call"}, {"api_name": "wagtail.search.index", "line_number": 82, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 115, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 116, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 124, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 138, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 143, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 148, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 148, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 153, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 153, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 158, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 158, "usage_type": "name"}, {"api_name": "django.db.models.TextChoices", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 160, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 176, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 176, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 179, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 179, "usage_type": "name"}, {"api_name": "utils.zipcode_validator", "line_number": 180, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 182, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 182, "usage_type": "name"}, {"api_name": "phonenumber_field.modelfields.PhoneNumberField", "line_number": 183, "usage_type": "call"}, {"api_name": "django.db.models.EmailField", "line_number": 184, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 184, "usage_type": "name"}, {"api_name": "phonenumber_field.modelfields.PhoneNumberField", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "9448534916", "text": "import os\nfrom setuptools import setup, find_packages\n\nBASEDIR = os.path.dirname(os.path.abspath(__file__))\nVERSION = open(os.path.join(BASEDIR, 'VERSION')).read().strip()\n\nBASE_DEPENDENCIES = [\n 'wf-gqlpycgen>=0.5.9',\n 'requests>=2.21',\n 'Jinja2>=2.10',\n 'gql>=0.1.0',\n 'PyYAML>=3.13',\n 'click>=6.7',\n 'boto3>=1.9.213'\n]\n\nTEST_DEPENDENCIES = [\n 'pytest==3.0.6',\n 'pytest-cov==2.4.0',\n 'pytest-mock==1.5.0',\n 'pylint==1.6.5',\n 'httpretty==0.8.14'\n]\n\nLOCAL_DEPENDENCIES = [\n 'tox==2.6.0',\n 'tox-pyenv==1.0.3'\n]\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(BASEDIR))\n\nsetup(\n name='wildflower-honeycomb-sdk',\n version=VERSION,\n packages=find_packages(),\n include_package_data=True,\n description='SDK for use with the Wildflower Honeycomb API',\n long_description='Provides uniform access to all aspects of the honeycomb API as well as a direct GraphQL interface for more complex queries.',\n url='https://github.com/WildflowerSchools/wildflower-honeycomb-sdk-py',\n author='optimuspaul',\n author_email='paul.decoursey@wildflowerschools.org',\n install_requires= BASE_DEPENDENCIES,\n tests_require = TEST_DEPENDENCIES,\n extras_require = {\n 'test': TEST_DEPENDENCIES,\n 'local': LOCAL_DEPENDENCIES\n },\n entry_points={\n 'console_scripts': [\n 'honeycomb=cli:cli',\n ],\n }\n)\n", "repo_name": "WildflowerSchools/wildflower-honeycomb-sdk-py", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1410, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 4, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path.normpath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 33, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "22513498984", "text": "from dataclasses import asdict, dataclass\n\nfrom sqlalchemy import Column, ForeignKey, Integer, String\n\nfrom app.core.database import db\n\n\n@dataclass\nclass AddressModel(db.Model):\n __tablename__ = \"addresses\"\n\n address_id: int = Column(Integer, primary_key=True)\n zip_code: str = Column(String(8), nullable=False) # CEP\n state: str = Column(String(15), nullable=False)\n city: str = Column(String(50), nullable=False)\n public_place: str = Column(String(60), nullable=False)\n number: int = Column(Integer, nullable=False)\n\n def asdict(self):\n return asdict(self)\n", "repo_name": "ezms/pc-builder-api", "sub_path": "app/models/address_model.py", "file_name": "address_model.py", "file_ext": "py", "file_size_in_byte": 592, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "app.core.database.db.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.core.database.db", "line_number": 9, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 12, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 14, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 17, "usage_type": "argument"}, {"api_name": "dataclasses.asdict", "line_number": 20, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "40851621817", "text": "import itertools\nimport os\nimport urllib.error\nfrom collections import defaultdict\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport skimage\nimport skimage.io\nimport skimage.transform\nimport utils\nfrom config import parse_config, to_int_tup\nfrom well_dict import well_dict as WELL_DICT\n\ncfg = parse_config()\ncfg_stitch = cfg[\"image_stitching\"]\n\n\nOUTPUT_DIR = cfg_stitch[\"output_dir\"]\nMISSING_WELL_IMG = cfg_stitch[\"missing_well_path\"]\nHARMONY_NAME_IP_MAP = dict(cfg[\"harmony_mappings\"])\nMAX_INTENSITY_DAPI = cfg_stitch.getint(\"max_intensity_dapi\")\nMAX_INTENSITY_ALEXA488 = cfg_stitch.getint(\"max_intensity_alexa488\")\nIMG_SIZE_SAMPLE = to_int_tup(cfg_stitch[\"img_size_sample\"])\nIMG_SIZE_PLATE_WELL = to_int_tup(cfg_stitch[\"img_size_plate_well\"])\nCHANNELS = to_int_tup(cfg_stitch[\"channels\"])\nDILUTIONS = to_int_tup(cfg_stitch[\"dilutions\"])\nPLATE_DIMS = (16, 24)\nSAMPLE_DIMS = (2, 4)\n\n\nclass ImageStitcher:\n \"\"\"\n Image stitching class, for both sample and whole plate images.\n - Channels are stitched and saved as separate images, everything is\n grayscale.\n - Uses the Phenix indexfile to fetch images from a URL.\n - Images are a single field per well which simplifies things.\n - Sometimes certain wells fail to image, these are then missing as rows\n in the indexfile. These are replaced by a placeholder image to show as\n missing and keep plates & samples consistent dimensions.\n - Images are saved to a directory on Nemo.\n - Image paths are not recorded as they are consistent and can be\n constructed from the metadata such as plate barcode and well position.\n - Raw images are unsigned 16-bit tiffs, stitched images are saved as\n unsigned 8-bit pngs, with values clipped at a maximum to increase\n contrast.\n \"\"\"\n\n def __init__(\n self,\n indexfile_path: str,\n output_dir: str = OUTPUT_DIR,\n harmony_name_map: Dict = HARMONY_NAME_IP_MAP,\n max_dapi: int = MAX_INTENSITY_DAPI,\n max_alexa488: int = MAX_INTENSITY_ALEXA488,\n missing_well_img_path: str = MISSING_WELL_IMG,\n img_size_sample: Tuple[int] = IMG_SIZE_SAMPLE,\n img_size_plate_well: Tuple[int] = IMG_SIZE_PLATE_WELL,\n ):\n self.indexfile_path = indexfile_path\n self.missing_well_img_path = missing_well_img_path\n self.harmony_name_map = harmony_name_map\n indexfile = pd.read_csv(indexfile_path, sep=\"\\t\")\n self.indexfile = self.fix_indexfile(indexfile)\n self.output_dir = output_dir\n self.plate_images = None\n self.dilution_images = None\n self.max_intensity_channel = {1: max_dapi, 2: max_alexa488}\n self.img_size_sample = img_size_sample\n self.img_size_plate_well = img_size_plate_well\n # these are present in the indexfile, can't be loaded\n self.missing_images = []\n\n def fix_missing_wells(self, indexfile: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n find missing wells in the indexfile and add\n them in with the URL pointing to a placeholder\n \"\"\"\n n_expected_rows = 768 # 384 wells * 2 channels\n if indexfile.shape[0] == n_expected_rows:\n # no missing wells, just return the indexfile as it is\n return indexfile\n # create a dataframe with complete \"Row\", \"Column\", \"Channel ID\" column values\n rows, cols = zip(*itertools.product(range(1, 17), range(1, 25)))\n temp_df_1 = pd.DataFrame({\"Row\": rows, \"Column\": cols, \"Channel ID\": 1})\n temp_df_2 = pd.DataFrame({\"Row\": rows, \"Column\": cols, \"Channel ID\": 2})\n temp_df = pd.concat([temp_df_1, temp_df_2]).sort_values(\n [\"Row\", \"Column\", \"Channel ID\"]\n )\n merged = indexfile.merge(temp_df, how=\"outer\")\n assert merged.shape[0] == n_expected_rows\n # replace missing URLs with the placeholder URL\n merged[\"URL\"] = merged[\"URL\"].fillna(self.missing_well_img_path)\n merged = merged.sort_values([\"Row\", \"Column\", \"Channel ID\"])\n return merged\n\n def fix_urls(self, df: pd.DataFrame) -> pd.DataFrame:\n # not a regex, but needed for pandas substring replacement\n df.URL = df.URL.replace(self.harmony_name_map, regex=True)\n return df\n\n def fix_indexfile(self, indexfile: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n replace missing wells with placeholder image, and replace\n computer names with ip addresses\n \"\"\"\n indexfile = self.fix_urls(indexfile)\n indexfile = self.fix_missing_wells(indexfile)\n return indexfile\n\n def stitch_plate(self) -> None:\n \"\"\"stitch well images into a plate montage\"\"\"\n ch_images = defaultdict(list)\n plate_images = dict()\n for channel, group in self.indexfile.groupby(\"Channel ID\"):\n for _, row in group.iterrows():\n img = self.load_img(row)\n img = skimage.transform.resize(\n img,\n self.img_size_plate_well,\n anti_aliasing=True,\n preserve_range=True,\n )\n ch_images[channel].append(img)\n img_stack = np.stack(ch_images[channel])\n img_plate = img_stack.reshape(384, *self.img_size_plate_well)\n # rescale intensity\n img_plate /= self.max_intensity_channel[channel]\n img_plate[img_plate > 1.0] = 1.0\n img_plate = skimage.img_as_float(img_plate)\n img_montage = skimage.util.montage(\n img_plate,\n fill=1.0,\n padding_width=3,\n grid_shape=PLATE_DIMS,\n rescale_intensity=False,\n )\n plate_images[channel] = img_montage\n self.plate_images = plate_images\n\n def stitch_sample(self, well: str) -> np.ndarray:\n \"\"\"stitch individual sample\"\"\"\n df = self.indexfile.copy()\n sample_dict = defaultdict(dict)\n images = []\n # as we're dealing with the 96-well labels, but the indexfile is using\n # the original 384-well labels, we need to get the 4 384-well labels\n # which correspond to the given sample well label\n wells_384 = WELL_DICT[well]\n for well_384 in wells_384:\n row, column = utils.well_to_row_col(well_384)\n # subset dataframe to just correct row/columns\n df_subset = df[(df[\"Row\"] == row) & (df[\"Column\"] == column)]\n for channel_name, group in df_subset.groupby(\"Channel ID\"):\n for _, group_row in group.iterrows():\n dilution = utils.get_dilution_from_row_col(\n group_row[\"Row\"], group_row[\"Column\"]\n )\n img = self.load_img(group_row)\n sample_dict[channel_name].update({dilution: img})\n for channel in CHANNELS:\n for dilution in DILUTIONS:\n img = sample_dict[channel][dilution]\n img = skimage.transform.resize(\n img, self.img_size_sample, anti_aliasing=True, preserve_range=True\n )\n # rescale image intensities\n img /= self.max_intensity_channel[channel]\n img[img > 1.0] = 1\n img = skimage.img_as_float(img)\n images.append(img)\n img_stack = np.stack(images).reshape(8, *self.img_size_sample)\n img_montage = skimage.util.montage(\n arr_in=img_stack,\n fill=1.0, # white if rescale_intensity is True\n grid_shape=SAMPLE_DIMS,\n rescale_intensity=False,\n padding_width=10,\n )\n return img_montage\n\n def stitch_all_samples(self):\n \"\"\"stitch but don't save sample images\"\"\"\n dilution_images = {}\n for well in WELL_DICT.keys():\n sample_img = self.stitch_sample(well)\n dilution_images[well] = sample_img\n self.dilution_images = dilution_images\n\n def create_img_store(self) -> None:\n \"\"\"\n This loads all images from an indexfile, and stores the resized\n and intensity-scaled images in a dictionary. The images are stored\n twice for the plate images and the sample images, as they require\n different sizes for each.\n The image store is stored in the class as `self.img_store`.\n ---\n img_store:\n {\n \"sample\": {\n \"A01\": {\n 1: {1: np.ndarray, 2: np.ndarray, 3:np.ndarray, 4:np.ndarray},\n 2: {1: np.ndarray, 2: np.ndarray, 3:np.ndarray, 4:np.ndarray},\n },\n ...\n \"H12\": {\n 1: {1: np.ndarray, 2: np.ndarray, 3:np.ndarray, 4:np.ndarray},\n 2: {1: np.ndarray, 2: np.ndarray, 3:np.ndarray, 4:np.ndarray},\n },\n },\n \"plate\": {1: list[np.ndarray], 2: list[np.ndarray]}\n }\n \"\"\"\n sample_dict = defaultdict(lambda: defaultdict(dict))\n plate_dict = defaultdict(list)\n for _, row in self.indexfile.iterrows():\n img = self.load_img(row)\n well_384 = utils.row_col_to_well(int(row[\"Row\"]), int(row[\"Column\"]))\n dilution = utils.dilution_from_well(well_384)\n well_96 = utils.convert_well_384_to_96(well_384)\n channel = int(row[\"Channel ID\"])\n img = self.rescale_intensity(img, channel)\n img_resized_plate_well = skimage.transform.resize(\n img, self.img_size_plate_well, anti_aliasing=True, preserve_range=True\n )\n img_resized_sample = skimage.transform.resize(\n img, self.img_size_sample, anti_aliasing=True, preserve_range=True\n )\n sample_dict[well_96][channel][dilution] = img_resized_sample\n plate_dict[channel].append(img_resized_plate_well)\n self.img_store = {\"sample\": sample_dict, \"plate\": plate_dict}\n\n def load_img(self, row: pd.Series):\n \"\"\"\n Load image from indexfile row.\n If the image is missing then load the placeholder image and add\n row to self.missing_images.\n \"\"\"\n try:\n img = skimage.io.imread(row[\"URL\"], as_gray=True)\n except (urllib.error.HTTPError, OSError):\n self.missing_images.append(row)\n img = skimage.io.imread(self.missing_well_img_path, as_gray=True)\n return img\n\n def rescale_intensity(self, img: np.ndarray, channel: int) -> np.ndarray:\n \"\"\"rescale image intensity, clip values to 1 over this limit\"\"\"\n img = img.astype(np.float64)\n img /= self.max_intensity_channel[channel]\n img[img > 1.0] = 1.0\n img = skimage.img_as_float(img)\n return img\n\n def stitch_and_save_plates(self):\n # stitch and save plates images\n for channel_num in CHANNELS:\n img_stack_plate = np.stack(self.img_store[\"plate\"][channel_num])\n img_montage_plate = skimage.util.montage(\n img_stack_plate,\n fill=1.0,\n padding_width=3,\n grid_shape=PLATE_DIMS,\n rescale_intensity=False,\n )\n plate_path = os.path.join(self.output_dir_path, f\"plate_{channel_num}.png\")\n plate_arr = skimage.img_as_ubyte(img_montage_plate)\n skimage.io.imsave(fname=plate_path, arr=plate_arr)\n\n def stitch_and_save_samples(self):\n # stitch and save sample images\n for well in WELL_DICT.keys():\n sample_well = self.img_store[\"sample\"][well]\n sample_imgs = []\n for channel in CHANNELS:\n for dilution in [1, 2, 3, 4]:\n img = sample_well[channel][dilution]\n sample_imgs.append(img)\n sample_stack = np.stack(sample_imgs)\n sample_montage = skimage.util.montage(\n arr_in=sample_stack,\n fill=1.0, # white if rescale_intensity is True\n grid_shape=SAMPLE_DIMS,\n rescale_intensity=False,\n padding_width=10,\n )\n sample_montage = skimage.img_as_ubyte(sample_montage)\n well_path = os.path.join(self.output_dir_path, f\"well_{well}.png\")\n skimage.io.imsave(fname=well_path, arr=sample_montage)\n\n def stitch_and_save_all_samples_and_plates(self):\n \"\"\"\n Stitch all samples and build up whole 384-well plate image as we go,\n this saves loading each image from Harmony twice per standard\n workflow.\n This saves the stitched images immediately after they are stitched\n rather than storing them in `self.dilution_images` and\n `self.plate_images` to reduce memory usage.\n \"\"\"\n self.create_output_dir()\n self.create_img_store()\n self.stitch_and_save_plates()\n self.stitch_and_save_samples()\n\n def save_plates(self):\n \"\"\"save stitched plates\"\"\"\n self.create_output_dir()\n if self.plate_images is None:\n raise RuntimeError(\"no plate images, have you run stitch_plate()?\")\n for channel_num, plate_arr in self.plate_images.items():\n plate_path = os.path.join(self.output_dir_path, f\"plate_{channel_num}.png\")\n plate_arr = skimage.img_as_ubyte(plate_arr)\n skimage.io.imsave(fname=plate_path, arr=plate_arr)\n\n def save_all(self):\n \"\"\"save both stitched plate and sample images\"\"\"\n self.create_output_dir()\n if self.dilution_images is None:\n raise RuntimeError(\"no dilution images, have you run stitch_all_samples()?\")\n if self.plate_images is None:\n raise RuntimeError(\"no plate images, have you run stitch_plate()?\")\n for channel_num, plate_arr in self.plate_images.items():\n plate_path = os.path.join(self.output_dir_path, f\"plate_{channel_num}.png\")\n plate_arr = skimage.img_as_ubyte(plate_arr)\n skimage.io.imsave(fname=plate_path, arr=plate_arr)\n for well_name, well_arr in self.dilution_images.items():\n well_path = os.path.join(self.output_dir_path, f\"well_{well_name}.png\")\n well_arr = skimage.img_as_ubyte(well_arr)\n skimage.io.imsave(fname=well_path, arr=well_arr)\n\n def create_output_dir(self):\n \"\"\"create output directory if it doesn't already exist\"\"\"\n plate_barcode = self.get_plate_barcode()\n if not plate_barcode.startswith((\"T\", \"S\")):\n # standardise the sample type on \"S\" for non-titration plates\n # this ensures the dash_app can find the plates without querying\n # for sample type\n plate_barcode = \"S\" + plate_barcode[1:]\n output_dir_path = os.path.join(self.output_dir, plate_barcode)\n os.makedirs(output_dir_path, exist_ok=True)\n self.output_dir_path = output_dir_path\n\n def get_plate_barcode(self) -> str:\n \"\"\"get plate barcode from indexfile path\"\"\"\n prev_dir = self.indexfile_path.split(os.sep)[-2]\n return prev_dir.split(\"__\")[0]\n\n def collect_missing_images(self) -> List[str]:\n missing = set()\n for i in self.missing_images:\n name = f\"r{i['Row']}c{i['Column']} {i['Channel Name']}\"\n missing.add(name)\n return sorted(list(missing))\n", "repo_name": "FrancisCrickInstitute/hts_neutralisation_launcher", "sub_path": "launcher/stitch_images.py", "file_name": "stitch_images.py", "file_ext": "py", "file_size_in_byte": 15382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "config.parse_config", "line_number": 16, "usage_type": "call"}, {"api_name": "config.to_int_tup", "line_number": 25, "usage_type": "call"}, {"api_name": "config.to_int_tup", "line_number": 26, "usage_type": "call"}, {"api_name": "config.to_int_tup", "line_number": 27, "usage_type": "call"}, {"api_name": "config.to_int_tup", "line_number": 28, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 60, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 76, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 88, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 89, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 115, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 120, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 120, "usage_type": "attribute"}, {"api_name": "numpy.stack", "line_number": 127, "usage_type": "call"}, {"api_name": "skimage.img_as_float", "line_number": 132, "usage_type": "call"}, {"api_name": "skimage.util.montage", "line_number": 133, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 133, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 146, "usage_type": "call"}, {"api_name": "well_dict.well_dict", "line_number": 151, "usage_type": "name"}, {"api_name": "utils.well_to_row_col", "line_number": 153, "usage_type": "call"}, {"api_name": "utils.get_dilution_from_row_col", "line_number": 158, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 166, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 166, "usage_type": "attribute"}, {"api_name": "skimage.img_as_float", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 174, "usage_type": "call"}, {"api_name": "skimage.util.montage", "line_number": 175, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 175, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 143, "usage_type": "attribute"}, {"api_name": "well_dict.well_dict.keys", "line_number": 187, "usage_type": "call"}, {"api_name": "well_dict.well_dict", "line_number": 187, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 216, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 217, "usage_type": "call"}, {"api_name": "utils.row_col_to_well", "line_number": 220, "usage_type": "call"}, {"api_name": "utils.dilution_from_well", "line_number": 221, "usage_type": "call"}, {"api_name": "utils.convert_well_384_to_96", "line_number": 222, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 225, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 225, "usage_type": "attribute"}, {"api_name": "skimage.transform.resize", "line_number": 228, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 228, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 235, "usage_type": "attribute"}, {"api_name": "skimage.io.imread", "line_number": 242, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 242, "usage_type": "attribute"}, {"api_name": "urllib.error.error", "line_number": 243, "usage_type": "attribute"}, {"api_name": "urllib.error", "line_number": 243, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 245, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 250, "usage_type": "attribute"}, {"api_name": "skimage.img_as_float", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 259, "usage_type": "call"}, {"api_name": "skimage.util.montage", "line_number": 260, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 260, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 268, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 269, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 269, "usage_type": "attribute"}, {"api_name": "well_dict.well_dict.keys", "line_number": 273, "usage_type": "call"}, {"api_name": "well_dict.well_dict", "line_number": 273, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 280, "usage_type": "call"}, {"api_name": "skimage.util.montage", "line_number": 281, "usage_type": "call"}, {"api_name": "skimage.util", "line_number": 281, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 289, "usage_type": "call"}, {"api_name": "os.path", "line_number": 289, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 290, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 290, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path", "line_number": 312, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 313, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 314, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 314, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 324, "usage_type": "call"}, {"api_name": "os.path", "line_number": 324, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 325, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 326, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 326, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 328, "usage_type": "attribute"}, {"api_name": "skimage.img_as_ubyte", "line_number": 329, "usage_type": "call"}, {"api_name": "skimage.io.imsave", "line_number": 330, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 330, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 340, "usage_type": "call"}, {"api_name": "os.path", "line_number": 340, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 341, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 346, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 349, "usage_type": "name"}]} +{"seq_id": "36547718661", "text": "\nfrom abc import abstractmethod, ABC\nfrom socket import socket\nfrom typing import Tuple, Union\n\n\nclass AbstractAioSocket(ABC):\n @abstractmethod\n async def connect(self, address: Tuple[str, int]):\n \"\"\"Connect to the given address.\"\"\"\n ...\n\n @abstractmethod\n async def handshake(self) -> None:\n \"\"\"\n Perform any handshake if needed. Can be a no-op for less complicated protocols.\n :return: None.\n \"\"\"\n ...\n\n @abstractmethod\n async def recv(self, nbytes: int) -> bytes:\n \"\"\"\n Receive date.\n\n :param nbytes: Maximum amount of bytes to receive in a single call.\n :return: The received data.\n \"\"\"\n ...\n\n @abstractmethod\n async def sendall(self, data: bytes) -> None:\n \"\"\"\n Send data. Guarantees all data is really sent.\n\n :param data: The Date to send.\n :return: None.\n \"\"\"\n ...\n\n @abstractmethod\n def get_real_socket(self) -> socket:\n \"\"\"\n :return: Get the underlying (real) socket or None.\n \"\"\"\n ...\n\n", "repo_name": "stralsundsecurity/tmmp", "sub_path": "tmmp/aiosock/abc.py", "file_name": "abc.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "abc.ABC", "line_number": 7, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 9, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 8, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 13, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 21, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 31, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 41, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "12472151319", "text": "\"\"\"\n\nAuthor: Caio Henrique Oliveira Cunha\n\nHere it is concentrated as business rules\n\n\"\"\"\nimport coreapi\nimport datetime \nfrom cotacao.models import Cotacao\nfrom cotacao.serializers import CotacaoSerializer\nfrom requests.exceptions import ConnectionError\nfrom brmed.settings import BASE_URL_ENDPOINT_VAT, BASE_COTATION\n\nclass CotacaoService():\n\n def _get_save_data_api(self, date):\n \"\"\"\n A service to seed database with 5 response of VAT ENDPOINT \n\n Args: \\n\n date : Date for search in VAT API\n\n Returns: \\n \n data_dict: data of getted in VAT API or message error\n \"\"\"\n\n client = coreapi.Client()\n\n date_str = date.strftime(\"%Y-%m-%d\")\n filter = Cotacao.objects.filter(date=date_str)\n\n if not filter:\n \n try:\n\n data = client.get(BASE_URL_ENDPOINT_VAT + '?base=' + BASE_COTATION + '&date=' + date_str)\n\n except ConnectionError as e:\n\n raise e\n \n data_dict = {}\n\n try:\n\n data_dict[\"real\"] = data[\"rates\"][\"BRL\"]\n data_dict[\"euro\"] = data[\"rates\"][\"EUR\"]\n data_dict[\"iene\"] = data[\"rates\"][\"JPY\"]\n data_dict[\"date\"] = data[\"date\"]\n data_dict[\"message\"] = \"Dados Importados!\"\n\n except KeyError as exp:\n\n raise exp\n\n cotacao_serializer = CotacaoSerializer(data=data_dict)\n if (cotacao_serializer.is_valid()):\n cotacao_serializer.save()\n \n return data_dict\n\n else:\n \n data_dict = {'date': '', 'message': \"Dados não importados! O registro já está salvo.\"}\n\n return data_dict\n\n \n def seed(self):\n \"\"\"\n A service seed database with 5 response of VAT ENDPOINT \n\n Args: \\n\n - : None\n\n Returns: \\n \n message: sucess message or error\n \"\"\"\n message = []\n limit = 0\n day_cont = 0\n date_initial = datetime.datetime.today()\n\n while (limit <= 4):\n\n date = date_initial - datetime.timedelta(days=day_cont)\n\n weekday = date.weekday()\n\n if weekday < 5:\n \n data = self._get_save_data_api(date)\n \n if data[\"date\"] != '':\n\n limit = limit + 1\n\n date_str = date.strftime(\"%Y-%m-%d\")\n message.append(date_str + ': ' + data[\"message\"])\n\n day_cont = day_cont + 1\n\n return message \n \n\n def get_data_initial_chart(self):\n \"\"\"\n A service to get and organize data from database, getting the last 5 cotations in relation USD\n\n Args: \\n\n - : None\n\n Returns: \\n \n datas_final : data of cotations from database or errors \n\n \"\"\"\n datas_final = {'dates': [], 'real': [], 'euro': [], 'iene': [], 'errors': ''}\n\n date_inicial = datetime.datetime.today() - datetime.timedelta(days=6)\n date_inicial_str = date_inicial.strftime(\"%Y-%m-%d\")\n\n date_final = datetime.datetime.today()\n date_final_str = date_final.strftime(\"%Y-%m-%d\")\n\n if date_inicial.weekday == 5:\n date_inicial = date_inicial - datetime.timedelta(days=1)\n elif date_inicial.weekday == 6:\n date_inicial = date_inicial - datetime.timedelta(days=2)\n\n ## get records with date range selected\n datas = Cotacao.objects.filter(date__range=[date_inicial_str, date_final_str]).order_by('date')\n\n if not datas:\n datas_final['errors'] = \"Nenhum dado encontrado no BANCO DE DADOS!\"\n return datas_final\n \n for data in datas:\n\n date_str = data.date.strftime(\"%Y-%m-%d\")\n datas_final['dates'].append(date_str)\n datas_final['real'].append(data.real)\n datas_final['euro'].append(data.euro)\n datas_final['iene'].append(data.iene)\n\n return datas_final \n\n def get_data_date_chart(self, date_inicial, date_final):\n \"\"\"\n A service to get and organize data from database, using date selected by user\n\n Args: \\n\n date_inicial : Date Initial for generate chart\n date_final: Date Final for generate chart\n\n Returns: \\n \n datas_final : data of cotations or errors\n \"\"\"\n \n datas_final = {'dates': [], 'real': [], 'euro': [], 'iene': [], 'errors': ''}\n\n diferenca = date_final - date_inicial\n if diferenca.days > 5:\n datas_final['errors'] = \"Selecione um intervalo de no máximo 5 dias!\"\n return datas_final\n\n if date_inicial > date_final:\n datas_final['errors'] = \"Selecione uma Data Inicial menor que a Data Final!\"\n return datas_final\n\n for i in range(0,5):\n\n date = date_final - datetime.timedelta(days=i)\n data = self._get_save_data_api(date)\n\n ## get records with date range selected\n datas = Cotacao.objects.filter(date__range=[date_inicial, date_final]).order_by('-id')\n\n if not datas:\n datas_final['errors'] = \"Não há dados para a data selecionada!\"\n return datas_final\n \n for data in datas:\n\n date_str = data.date.strftime(\"%Y-%m-%d\")\n datas_final['dates'].append(date_str)\n datas_final['real'].append(data.real)\n datas_final['euro'].append(data.euro)\n datas_final['iene'].append(data.iene)\n\n return datas_final\n\n def get_all(self):\n \"\"\"\n A service to get all cotation saved in database\n\n Args: \\n\n - : None\n\n Returns: \\n \n cotacao : data of all cotations\n \"\"\"\n\n ## Note: As the amount of data available in the database will be small, this query would not be costly..\n ## Note02: In a scenario where the quote table grows a lot, some strategy is needed to make this query\n ## Note03: Pagination for example, limiting the amount of searches in the bank.\n cotacao = Cotacao.objects.all()\n \n return cotacao\n\n", "repo_name": "caio-cunha/br-med", "sub_path": "brmed/cotacao/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 6309, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "coreapi.Client", "line_number": 28, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cotacao.models.Cotacao", "line_number": 31, "usage_type": "name"}, {"api_name": "brmed.settings.BASE_URL_ENDPOINT_VAT", "line_number": 37, "usage_type": "name"}, {"api_name": "brmed.settings.BASE_COTATION", "line_number": 37, "usage_type": "name"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 39, "usage_type": "name"}, {"api_name": "cotacao.serializers.CotacaoSerializer", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 120, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 120, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 123, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 129, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects.filter", "line_number": 132, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cotacao.models.Cotacao", "line_number": 132, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 173, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects.filter", "line_number": 177, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "cotacao.models.Cotacao", "line_number": 177, "usage_type": "name"}, {"api_name": "cotacao.models", "line_number": 207, "usage_type": "name"}, {"api_name": "cotacao.models.Cotacao.objects.all", "line_number": 207, "usage_type": "call"}, {"api_name": "cotacao.models.Cotacao.objects", "line_number": 207, "usage_type": "attribute"}, {"api_name": "cotacao.models.Cotacao", "line_number": 207, "usage_type": "name"}, {"api_name": "cotacao.models", "line_number": 209, "usage_type": "name"}]} +{"seq_id": "20024119187", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CustomFields',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.CharField(max_length=521, verbose_name='\\u5b57\\u6bb5\\u5185\\u5bb9')),\n ('type', models.IntegerField(verbose_name='\\u5b57\\u6bb5\\u7c7b\\u578b', choices=[(1, 'text'), (2, 'integer')])),\n ],\n ),\n migrations.CreateModel(\n name='Device',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('SN', models.CharField(max_length=32)),\n ('state', models.IntegerField(default=1, verbose_name='\\u72b6\\u6001', choices=[(1, '\\u6b63\\u5e38'), (2, '\\u5f02\\u5e38')])),\n ('model', models.IntegerField(default=1, verbose_name='\\u6fc0\\u6d3b\\u72b6\\u6001', choices=[(1, '\\u6d3b\\u8dc3'), (2, '\\u5b58\\u6863')])),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u521b\\u5efa\\u65f6\\u95f4')),\n ],\n ),\n migrations.CreateModel(\n name='DeviceProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32, verbose_name='\\u8bbe\\u5907\\u540d')),\n ('type', models.IntegerField(blank=True, null=True, verbose_name='\\u8bbe\\u5907\\u7c7b\\u578b', choices=[(1, 'mac'), (2, 'pc'), (3, 'android')])),\n ('describe', models.TextField(max_length=521, null=True, verbose_name='\\u63cf\\u8ff0', blank=True)),\n ('high_temperature', models.CharField(max_length=32, null=True, verbose_name='\\u6700\\u9ad8\\u6e29\\u5ea6', blank=True)),\n ('low_temperature', models.CharField(max_length=32, null=True, verbose_name='\\u6700\\u4f4e\\u6e29\\u5ea6', blank=True)),\n ('delayed', models.IntegerField(null=True, verbose_name='\\u5ef6\\u65f6\\u5f00\\u59cb', blank=True)),\n ('record_interval', models.IntegerField(null=True, verbose_name='\\u8bb0\\u5f55\\u95f4\\u9694', blank=True)),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u521b\\u5efa\\u65f6\\u95f4')),\n ],\n ),\n migrations.CreateModel(\n name='IotDevice',\n fields=[\n ('device_id', models.AutoField(serialize=False, primary_key=True)),\n ('mac_addr', models.CharField(unique=True, max_length=32)),\n ('serial_num', models.CharField(max_length=32, blank=True)),\n ('model_num', models.CharField(max_length=32, blank=True)),\n ('firmware_rev', models.CharField(max_length=32, blank=True)),\n ('software_rev', models.CharField(max_length=32, blank=True)),\n ('hardware_rev', models.CharField(max_length=32, blank=True)),\n ('time_registered', models.DateTimeField(auto_now_add=True, null=True)),\n ],\n ),\n migrations.AddField(\n model_name='device',\n name='profile',\n field=models.OneToOneField(null=True, blank=True, to='device.DeviceProfile', verbose_name='\\u8bbe\\u5907\\u63cf\\u8ff0'),\n ),\n migrations.AddField(\n model_name='device',\n name='users',\n field=models.ManyToManyField(to='users.User', verbose_name='\\u6240\\u5c5e\\u7528\\u6237'),\n ),\n migrations.AddField(\n model_name='customfields',\n name='device',\n field=models.OneToOneField(null=True, blank=True, to='device.Device', verbose_name='\\u81ea\\u5b9a\\u4e49\\u5b57\\u6bb5'),\n ),\n ]\n", "repo_name": "wangguanfu/docker-django", "sub_path": "mmcsite/apps/device/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 4009, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 64, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.OneToOneField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "15331545915", "text": "from collections import defaultdict, namedtuple\nfrom typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, TypedDict, Union\n\nfrom flask import g, current_app\nfrom flask_user import current_user\nfrom sqlalchemy.dialects.postgresql import insert\n\nfrom models.alchemy.api_token import APIToken\nfrom models.alchemy.alerts import AlertDefinition\nfrom models.alchemy.dashboard import Dashboard\nfrom models.alchemy.security_group import Group\nfrom models.alchemy.permission import Role, Resource\nfrom models.alchemy.user import UserRoles, User, UserAcl, UserStatusEnum\nfrom web.server.data.data_access import (\n get_db_adapter,\n add_entity,\n delete_entity,\n find_one_by_fields,\n find_all_by_fields,\n Transaction,\n)\nfrom web.server.errors import UserAlreadyInvited\nfrom web.server.routes.views.core import try_get_role_and_resource\nfrom web.server.routes.views.invite import send_invite_emails\nfrom web.server.util.util import get_user_string, Success\nfrom web.server.potion.access import get_id_from_uri\nfrom web.server.potion.signals import after_user_role_change, before_user_role_change\n\nif TYPE_CHECKING:\n from sqlalchemy.orm.session import Session\n\nUNREGISTERED_USER_USERNAME = 'anonymous_user_tracking@zenysis.com'\nUNREGISTERED_USER_FIRST = 'Anonymous'\nUNREGISTERED_USER_LAST = 'User'\n\nSUCCESS_USER_ROLE_ADDED = 'USER_ROLE_ADDED'\nSUCCESS_USER_ROLE_DELETED = 'USER_ROLE_DELETED'\n\nInvitee = namedtuple('Invitee', ['name', 'email'])\n\n\nclass RollResourceType(TypedDict):\n sitewideRoles: List[str]\n resources: Dict[str, List[str]]\n\n\nclass ResourceType(TypedDict):\n label: str\n name: str\n resourceType: str\n\n\nclass ResourceRoleType(TypedDict):\n name: str\n resourceType: str\n\n\nclass AclType(TypedDict):\n resource: ResourceType\n resourceRole: ResourceRoleType\n\n\nclass UserObject(TypedDict):\n username: str\n first_name: str\n last_name: str\n phone_number: str\n status_id: str\n acls: List[AclType]\n roles: List[str]\n groups: List[str]\n\n\nAPITokenType = TypedDict('APITokenType', {'$uri': str, 'is_revoked': bool, 'id': str})\n\n\ndef try_get_user(username: str, session: 'Optional[Session]' = None) -> Optional[User]:\n return find_one_by_fields(\n User,\n case_sensitive=False,\n search_fields={'username': username},\n session=session,\n )\n\n\ndef try_get_user_acl(\n user_id: int,\n resource_role_id: int,\n resource_id: Optional[int] = None,\n session: 'Optional[Session]' = None,\n) -> Optional[UserAcl]:\n '''Attempt to find a user role association for a given user, resource_role\n and resource.\n '''\n return find_one_by_fields(\n UserAcl,\n case_sensitive=True,\n search_fields={\n 'user_id': user_id,\n 'resource_role_id': resource_role_id,\n 'resource_id': resource_id,\n },\n session=session,\n )\n\n\ndef list_roles_for_resource(\n user: User, resource: Resource, session: 'Optional[Session]' = None\n) -> List[UserAcl]:\n '''Returns an enumeration of `UserAcl` instances matching the given user and resource.'''\n return find_all_by_fields(\n UserAcl,\n search_fields={'user_id': user.id, 'resource_id': resource.id},\n session=session,\n )\n\n\ndef list_user_roles_for_resource_api(\n resource: Resource, session: 'Optional[Session]' = None\n) -> Dict[str, List[str]]:\n '''Returns an enumeration of the users and the roles that they hold (if any) for a given\n resource. Users that do not hold any roles specific to the resource will not\n be listed.\n '''\n matching_acls = find_all_by_fields(\n UserAcl, search_fields={'resource_id': resource.id}, session=session\n )\n username_to_role_list = defaultdict(lambda: [])\n for acl in matching_acls:\n username_to_role_list[acl.user.username].append(acl.resource_role.name)\n\n return username_to_role_list\n\n\ndef force_delete_user(\n user: User,\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> None:\n '''Force deletes a user by deleting all of the alerts and dashboards\n associated with them along with the user entity.\n '''\n\n session = session or get_db_adapter().session\n\n # NOTE: type suppression is necessary here because SQL Alchemy model attributes\n # do not contain __iter__ attributes so mypy will complain that `roles` is not iterable\n for dashboard in user.dashboards: # type: ignore\n delete_entity(session, dashboard)\n\n alerts = find_all_by_fields(\n AlertDefinition, search_fields={'user_id': user.id}, session=session\n )\n for alert in alerts:\n delete_entity(session, alert, commit=True)\n\n session.delete(user)\n\n if flush:\n session.flush()\n\n if commit:\n session.commit()\n\n\n# TODO: We need to deprecate this function. find all functions to deprecate.\ndef add_user_role(\n user: User,\n role_name: str,\n resource_type: str,\n resource_name: Optional[str],\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> Tuple[Union[UserRoles, Optional[UserAcl]], bool]:\n session = session or get_db_adapter().session\n (role, resource_type, resource) = try_get_role_and_resource(\n role_name, resource_type, resource_name, session\n )\n resource_id = resource.id if resource else None\n entity = try_get_user_acl(user.id, role.id, resource_id, session)\n exists = False\n\n if not entity:\n exists = True\n entity = UserRoles(\n user_id=user.id, role_id=role.id, resource_id=resource_id\n ) # type: ignore\n before_user_role_change.send(user, role=role)\n add_entity(session, entity, flush, commit)\n after_user_role_change.send(user, role=role)\n\n return (entity, exists)\n\n\ndef add_user_acl(\n user: User,\n resource_role_name: str,\n resource_type: str,\n resource_name: Optional[str],\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> Tuple[UserAcl, bool]:\n session = session or get_db_adapter().session\n (resource_role, resource_type, resource) = try_get_role_and_resource(\n resource_role_name, resource_type, resource_name, session\n )\n resource_id = resource.id if resource else None\n entity = try_get_user_acl(user.id, resource_role.id, resource_id, session)\n exists = False\n\n if not entity:\n exists = True\n entity = UserAcl(\n user_id=user.id, resource_role_id=resource_role.id, resource_id=resource_id\n )\n before_user_role_change.send(user, role=resource_role)\n add_entity(session, entity, flush, commit)\n after_user_role_change.send(user, role=resource_role)\n\n return (entity, exists)\n\n\ndef delete_user_role(\n user: User,\n role_name: str,\n resource_type: str,\n resource_name: Optional[str],\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> Tuple[Optional[UserAcl], bool]:\n session = session or get_db_adapter().session\n (role, resource_type, resource) = try_get_role_and_resource(\n role_name, resource_type, resource_name, session\n )\n resource_id = resource.id if resource else None\n entity = try_get_user_acl(user.id, role.id, resource_id)\n exists = False\n\n if entity:\n exists = True\n before_user_role_change.send(user, role=role)\n delete_entity(session, entity, flush, commit)\n after_user_role_change.send(user, role=role)\n\n return (entity, exists)\n\n\n# NOTE: Will have to deprecate / modify this with new roles\ndef update_user_roles_from_map(\n user: User,\n role_mapping: Dict[str, RollResourceType],\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> List[Union[UserRoles, UserAcl, None]]:\n session = session or get_db_adapter().session\n new_role_entities = []\n roles = user.roles\n\n # NOTE: type suppression is necessary here because SQL Alchemy model attributes\n # do not contain __iter__ attributes so mypy will complain that `roles` is not iterable\n for role in roles: # type: ignore\n before_user_role_change.send(user, role=role)\n session.delete(role)\n after_user_role_change.send(user, role=role)\n\n for resource_type in list(role_mapping.keys()):\n resource_to_roles = role_mapping[resource_type]['resources']\n sitewide_roles = role_mapping[resource_type]['sitewideRoles']\n\n # Add all sitewide roles for the current resource type\n for role_name in sitewide_roles:\n (result, _) = add_user_role(\n user, role_name, resource_type, None, session, flush=False, commit=False\n )\n new_role_entities.append(result)\n\n # Add all resource specific roles for the current resource type\n for resource_name, role_names in list(resource_to_roles.items()):\n for role_name in role_names:\n (result, _) = add_user_role(\n user,\n role_name,\n resource_type,\n resource_name,\n session,\n flush=False,\n commit=False,\n )\n new_role_entities.append(result)\n\n if flush:\n session.flush()\n\n if commit:\n session.commit()\n\n return new_role_entities\n\n\ndef list_resource_roles_for_user(user_id: int) -> List[UserAcl]:\n return find_all_by_fields(UserAcl, search_fields={'user_id': user_id})\n\n\ndef list_resource_roles_for_user_and_resource(\n resource_id: int, user_id: int\n) -> List[UserAcl]:\n return find_all_by_fields(\n UserAcl, search_fields={'resource_id': resource_id, 'user_id': user_id}\n )\n\n\ndef update_user_resource_roles(\n user: User,\n new_resource_roles: List[Dict[str, Any]],\n resource: Optional[Resource] = None,\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> List[UserAcl]:\n session = session or get_db_adapter().session\n new_role_entities = []\n # List only resource roles acls for user and resource ids when resource\n # exists.\n if resource:\n resource_roles = list_resource_roles_for_user_and_resource(resource.id, user.id)\n else:\n resource_roles = list_resource_roles_for_user(user.id)\n\n # TODO: Refactor this to take advantage of SQLAlchemy rather than\n # manually deleting and re-adding resource roles\n for role in resource_roles:\n session.delete(role)\n\n for new_role in new_resource_roles:\n role_name = new_role['role_name']\n resource_type = new_role['resource_type']\n resource_name = resource.name if resource else new_role.get('resource_name')\n\n # Do not flush or commit these changes. We want to perform the update in a transacted\n # fashion.\n (result, _) = add_user_acl(\n user,\n role_name,\n resource_type,\n resource_name,\n session,\n flush=False,\n commit=False,\n )\n new_role_entities.append(result)\n\n if flush:\n session.flush()\n\n if commit:\n session.commit()\n\n return new_role_entities\n\n\ndef update_user_acls(user: User, acls: List[AclType]) -> None:\n resource_roles_map = []\n for acl in acls:\n resource = acl['resource']\n resource_roles_map.append(\n {\n 'role_name': acl['resourceRole']['name'],\n 'resource_type': resource.get('resourceType'),\n 'resource_name': resource.get('name'),\n }\n )\n update_user_resource_roles(user, resource_roles_map)\n\n\ndef update_user_groups(user: User, new_groups: List[str]) -> None:\n with Transaction() as transaction:\n groups = []\n for group_uri in new_groups:\n group = transaction.find_by_id(Group, get_id_from_uri(group_uri))\n if group:\n groups.append(group)\n # TODO: fix type error\n user.groups = groups # type: ignore\n\n\ndef update_user_api_tokens(user: User, tokens: List[APITokenType]):\n # pylint: disable=import-outside-toplevel\n from web.server.security.signal_handlers import check_token_validity\n\n if not tokens:\n # nothing to do here\n return\n\n to_revoke = [\n token['$uri'].rsplit('/', 1)[-1] for token in tokens if token['is_revoked']\n ]\n with Transaction() as transaction:\n # create all the tokens that still are not there\n transaction.run_raw().execute(\n insert(APIToken)\n .values(\n [\n {\n 'id': token['id'],\n 'user_id': user.id,\n }\n for token in tokens\n ]\n )\n .on_conflict_do_nothing()\n )\n\n # now revoke tokens to be revoked, we don't allow un-revoke them\n user.api_tokens.filter( # type: ignore[attr-defined]\n # pylint: disable=singleton-comparison\n APIToken.is_revoked == False,\n APIToken.id.in_(to_revoke),\n ).update({'is_revoked': True}, synchronize_session=False)\n\n # invalidate validity caches because the state of the tokens has changed\n memoized = current_app.cache.memoize()(check_token_validity)\n for token in tokens:\n current_app.cache.delete_memoized(memoized, token['id'])\n\n\ndef build_user_updates(user_obj: UserObject) -> Dict[str, Any]:\n '''Gather necessary components that need to be updated in a user'''\n roles = []\n with Transaction() as transaction:\n for role_uri in user_obj['roles']:\n role = transaction.find_by_id(Role, get_id_from_uri(role_uri))\n if role:\n roles.append(role)\n\n user_updates = {\n 'username': user_obj['username'],\n 'first_name': user_obj['first_name'],\n 'last_name': user_obj['last_name'],\n 'phone_number': user_obj['phone_number'],\n 'status_id': user_obj['status_id'],\n 'roles': roles,\n }\n\n # NOTE: This is the only action and attribute that a non-admin can\n # affect. Corner case where a user is invited by an admin and activates\n # their account, and without refreshing the page, the admin assigns\n # something to the user and status gets overwritten. Since a status cannot\n # revert back to pending, we'll remove this from the updates\n if user_obj['status_id'] == UserStatusEnum.PENDING.value:\n user_updates.pop('status_id')\n return user_updates\n\n\n# NOTE: Will need to modify this later\ndef add_user_role_api(\n user: User,\n role_name: str,\n resource_type: str,\n resource_name: Optional[str] = None,\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> Success:\n '''Add a user role association for a given user, role and resource.'''\n add_user_role(user, role_name, resource_type, resource_name, session, flush, commit)\n\n resource_string = (\n f'Resource \\'{resource_name}\\' of type \\'{resource_type}\\''\n if resource_name\n else f'all resources of type \\'{resource_type}\\''\n )\n message = '%s Role \\'%s\\' for User \\'%s\\' on %s' % (\n 'Added' if commit else 'Commit pending for addition of',\n role_name,\n get_user_string(user),\n resource_string,\n )\n\n g.request_logger.info(message)\n return Success({'code': SUCCESS_USER_ROLE_ADDED, 'message': message})\n\n\ndef delete_user_role_api(\n user: User,\n role_name: str,\n resource_type: str,\n resource_name: Optional[str] = None,\n session: 'Optional[Session]' = None,\n flush: bool = True,\n commit: bool = True,\n) -> Success:\n '''Delete a user role association for a given user, role and resource.'''\n delete_user_role(\n user, role_name, resource_type, resource_name, session, flush, commit\n )\n\n resource_string = (\n f'Resource \\'{resource_name}\\' of type \\'{resource_type}\\''\n if resource_name\n else f'all resources of type \\'{resource_type}\\''\n )\n message = '%s Role \\'%s\\' on %s for User \\'%s\\'' % (\n 'Revoked' if commit else 'Commit pending for revocation of',\n role_name,\n resource_string,\n get_user_string(user),\n )\n\n g.request_logger.info(message)\n return Success({'code': SUCCESS_USER_ROLE_DELETED, 'message': message})\n\n\ndef invite_users(invitees: List[Invitee]) -> List[User]:\n with Transaction() as transaction:\n # First make sure that all invitees are not already registered users\n emails = [user.email.lower() for user in invitees]\n pending_users = []\n # pylint:disable=E1101\n existing_users = User.query.filter(\n User.username.in_(emails), User.status_id != UserStatusEnum.PENDING.value\n ).all()\n # pylint:disable=E1101\n existing_pending_users = User.query.filter(\n User.username.in_(emails), User.status_id == UserStatusEnum.PENDING.value\n ).all()\n\n existing_username_to_user = {}\n for user in existing_pending_users:\n existing_username_to_user[user.username.lower()] = user\n\n if existing_users != []:\n # ERROR: some users have already registered\n existing_emails = [user.username for user in existing_users]\n raise UserAlreadyInvited(existing_emails)\n\n # Add all invitees to the database as Pending Users\n for invitee in invitees:\n email = invitee.email.lower()\n existing_user = existing_username_to_user.get(email)\n if not existing_user:\n pending_user = User(\n username=email,\n first_name=invitee.name,\n last_name='',\n status_id=UserStatusEnum.PENDING.value,\n )\n pending_users.append(\n transaction.add_or_update(pending_user, flush=True)\n )\n else:\n pending_users.append(existing_user)\n\n # Now send emails to all of them\n send_invite_emails(pending_users)\n\n return pending_users\n\n\ndef get_anonymous_user() -> User:\n '''Fetch anonymous user. Create if it doesn't already exist.'''\n with Transaction() as transaction:\n maybe_anon_user = transaction.find_one_by_fields(\n User, False, {'username': UNREGISTERED_USER_USERNAME}\n )\n if maybe_anon_user:\n return maybe_anon_user\n\n # NOTE: Only to be used for placeholder user objects that track\n # unregistered user activity\n return transaction.add_or_update(\n User(\n username=UNREGISTERED_USER_USERNAME,\n first_name=UNREGISTERED_USER_FIRST,\n last_name=UNREGISTERED_USER_LAST,\n status_id=UserStatusEnum.ACTIVE.value,\n ),\n flush=True,\n )\n\n\ndef get_current_user() -> User:\n '''Safely fetches the current user object, taking into account unregistered\n users. Use this instead of flask_user.current_user if ever code path is used\n with unregistered users.\n '''\n return get_anonymous_user() if current_user.is_anonymous else current_user\n\n\ndef get_user_owned_resources(user: User) -> List[Resource]:\n '''Get the resources owned by a given user.'''\n with Transaction() as transaction:\n user_id = user.id\n owned_dashboards = transaction.find_all_by_fields(\n Dashboard, {'author_id': user_id}\n )\n owned_alerts = transaction.find_all_by_fields(\n AlertDefinition, {'user_id': user_id}\n )\n resource_ids = [dashboard.resource_id for dashboard in owned_dashboards] + [\n alert.authorization_resource_id for alert in owned_alerts\n ]\n return (\n transaction.run_raw()\n .query(Resource)\n .filter(Resource.id.in_(resource_ids))\n .all()\n )\n", "repo_name": "Zenysis/Harmony", "sub_path": "web/server/routes/views/users.py", "file_name": "users.py", "file_ext": "py", "file_size_in_byte": 20025, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 29, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 39, "usage_type": "call"}, {"api_name": "typing.TypedDict", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 43, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 44, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 53, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 58, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 69, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 70, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 71, "usage_type": "name"}, {"api_name": "typing.TypedDict", "line_number": 74, "usage_type": "call"}, {"api_name": "web.server.data.data_access.find_one_by_fields", "line_number": 78, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 79, "usage_type": "argument"}, {"api_name": "typing.Optional", "line_number": 77, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 89, "usage_type": "name"}, {"api_name": "web.server.data.data_access.find_one_by_fields", "line_number": 95, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 96, "usage_type": "argument"}, {"api_name": "typing.Optional", "line_number": 91, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 91, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 108, "usage_type": "name"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 108, "usage_type": "name"}, {"api_name": "web.server.data.data_access.find_all_by_fields", "line_number": 111, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 112, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 109, "usage_type": "name"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 119, "usage_type": "name"}, {"api_name": "web.server.data.data_access.find_all_by_fields", "line_number": 125, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 126, "usage_type": "argument"}, {"api_name": "collections.defaultdict", "line_number": 128, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 120, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 136, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 145, "usage_type": "call"}, {"api_name": "web.server.data.data_access.delete_entity", "line_number": 150, "usage_type": "call"}, {"api_name": "web.server.data.data_access.find_all_by_fields", "line_number": 152, "usage_type": "call"}, {"api_name": "models.alchemy.alerts.AlertDefinition", "line_number": 153, "usage_type": "argument"}, {"api_name": "web.server.data.data_access.delete_entity", "line_number": 156, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 169, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 172, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 177, "usage_type": "call"}, {"api_name": "web.server.routes.views.core.try_get_role_and_resource", "line_number": 178, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserRoles", "line_number": 187, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change.send", "line_number": 190, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change", "line_number": 190, "usage_type": "name"}, {"api_name": "web.server.data.data_access.add_entity", "line_number": 191, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change.send", "line_number": 192, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change", "line_number": 192, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 176, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserRoles", "line_number": 176, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 176, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 176, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 198, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 201, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 206, "usage_type": "call"}, {"api_name": "web.server.routes.views.core.try_get_role_and_resource", "line_number": 207, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 216, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change.send", "line_number": 219, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change", "line_number": 219, "usage_type": "name"}, {"api_name": "web.server.data.data_access.add_entity", "line_number": 220, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change.send", "line_number": 221, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 205, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 205, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 227, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 230, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 235, "usage_type": "call"}, {"api_name": "web.server.routes.views.core.try_get_role_and_resource", "line_number": 236, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change.send", "line_number": 245, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change", "line_number": 245, "usage_type": "name"}, {"api_name": "web.server.data.data_access.delete_entity", "line_number": 246, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change.send", "line_number": 247, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change", "line_number": 247, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 234, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 234, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 234, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 254, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 255, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 260, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change.send", "line_number": 267, "usage_type": "call"}, {"api_name": "web.server.potion.signals.before_user_role_change", "line_number": 267, "usage_type": "name"}, {"api_name": "web.server.potion.signals.after_user_role_change.send", "line_number": 269, "usage_type": "call"}, {"api_name": "web.server.potion.signals.after_user_role_change", "line_number": 269, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 259, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 259, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserRoles", "line_number": 259, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 259, "usage_type": "name"}, {"api_name": "web.server.data.data_access.find_all_by_fields", "line_number": 306, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 306, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 305, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 305, "usage_type": "name"}, {"api_name": "web.server.data.data_access.find_all_by_fields", "line_number": 312, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 313, "usage_type": "argument"}, {"api_name": "typing.List", "line_number": 311, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 311, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 318, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 319, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 320, "usage_type": "name"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 320, "usage_type": "name"}, {"api_name": "web.server.data.data_access.get_db_adapter", "line_number": 325, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 324, "usage_type": "name"}, {"api_name": "models.alchemy.user.UserAcl", "line_number": 324, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 366, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 366, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 380, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 380, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 381, "usage_type": "call"}, {"api_name": "models.alchemy.security_group.Group", "line_number": 384, "usage_type": "argument"}, {"api_name": "web.server.potion.access.get_id_from_uri", "line_number": 384, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 391, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 391, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 402, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.insert", "line_number": 405, "usage_type": "call"}, {"api_name": "models.alchemy.api_token.APIToken", "line_number": 405, "usage_type": "argument"}, {"api_name": "models.alchemy.api_token.APIToken.is_revoked", "line_number": 421, "usage_type": "attribute"}, {"api_name": "models.alchemy.api_token.APIToken", "line_number": 421, "usage_type": "name"}, {"api_name": "models.alchemy.api_token.APIToken.id.in_", "line_number": 422, "usage_type": "call"}, {"api_name": "models.alchemy.api_token.APIToken.id", "line_number": 422, "usage_type": "attribute"}, {"api_name": "models.alchemy.api_token.APIToken", "line_number": 422, "usage_type": "name"}, {"api_name": "web.server.security.signal_handlers.check_token_validity", "line_number": 426, "usage_type": "argument"}, {"api_name": "flask.current_app.cache.memoize", "line_number": 426, "usage_type": "call"}, {"api_name": "flask.current_app.cache", "line_number": 426, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 426, "usage_type": "name"}, {"api_name": "flask.current_app.cache.delete_memoized", "line_number": 428, "usage_type": "call"}, {"api_name": "flask.current_app.cache", "line_number": 428, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 428, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 434, "usage_type": "call"}, {"api_name": "models.alchemy.permission.Role", "line_number": 436, "usage_type": "argument"}, {"api_name": "web.server.potion.access.get_id_from_uri", "line_number": 436, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserStatusEnum.PENDING", "line_number": 454, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum", "line_number": 454, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 431, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 431, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 461, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 464, "usage_type": "name"}, {"api_name": "web.server.util.util.get_user_string", "line_number": 480, "usage_type": "call"}, {"api_name": "flask.g.request_logger.info", "line_number": 484, "usage_type": "call"}, {"api_name": "flask.g.request_logger", "line_number": 484, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 484, "usage_type": "name"}, {"api_name": "web.server.util.util.Success", "line_number": 485, "usage_type": "call"}, {"api_name": "web.server.util.util.Success", "line_number": 468, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 489, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 492, "usage_type": "name"}, {"api_name": "web.server.util.util.get_user_string", "line_number": 511, "usage_type": "call"}, {"api_name": "flask.g.request_logger.info", "line_number": 514, "usage_type": "call"}, {"api_name": "flask.g.request_logger", "line_number": 514, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 514, "usage_type": "name"}, {"api_name": "web.server.util.util.Success", "line_number": 515, "usage_type": "call"}, {"api_name": "web.server.util.util.Success", "line_number": 496, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 518, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 519, "usage_type": "call"}, {"api_name": "models.alchemy.user.User.query.filter", "line_number": 524, "usage_type": "call"}, {"api_name": "models.alchemy.user.User.query", "line_number": 524, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.User", "line_number": 524, "usage_type": "name"}, {"api_name": "models.alchemy.user.User.username.in_", "line_number": 525, "usage_type": "call"}, {"api_name": "models.alchemy.user.User.username", "line_number": 525, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.User", "line_number": 525, "usage_type": "name"}, {"api_name": "models.alchemy.user.User.status_id", "line_number": 525, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum.PENDING", "line_number": 525, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum", "line_number": 525, "usage_type": "name"}, {"api_name": "models.alchemy.user.User.query.filter", "line_number": 528, "usage_type": "call"}, {"api_name": "models.alchemy.user.User.query", "line_number": 528, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.User", "line_number": 528, "usage_type": "name"}, {"api_name": "models.alchemy.user.User.username.in_", "line_number": 529, "usage_type": "call"}, {"api_name": "models.alchemy.user.User.username", "line_number": 529, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.User", "line_number": 529, "usage_type": "name"}, {"api_name": "models.alchemy.user.User.status_id", "line_number": 529, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum.PENDING", "line_number": 529, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum", "line_number": 529, "usage_type": "name"}, {"api_name": "web.server.errors.UserAlreadyInvited", "line_number": 539, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 546, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserStatusEnum.PENDING", "line_number": 550, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum", "line_number": 550, "usage_type": "name"}, {"api_name": "web.server.routes.views.invite.send_invite_emails", "line_number": 559, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 518, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 566, "usage_type": "call"}, {"api_name": "models.alchemy.user.User", "line_number": 568, "usage_type": "argument"}, {"api_name": "models.alchemy.user.User", "line_number": 576, "usage_type": "call"}, {"api_name": "models.alchemy.user.UserStatusEnum.ACTIVE", "line_number": 580, "usage_type": "attribute"}, {"api_name": "models.alchemy.user.UserStatusEnum", "line_number": 580, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 564, "usage_type": "name"}, {"api_name": "flask_user.current_user.is_anonymous", "line_number": 591, "usage_type": "attribute"}, {"api_name": "flask_user.current_user", "line_number": 591, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 586, "usage_type": "name"}, {"api_name": "models.alchemy.user.User", "line_number": 594, "usage_type": "name"}, {"api_name": "web.server.data.data_access.Transaction", "line_number": 596, "usage_type": "call"}, {"api_name": "models.alchemy.dashboard.Dashboard", "line_number": 599, "usage_type": "argument"}, {"api_name": "models.alchemy.alerts.AlertDefinition", "line_number": 602, "usage_type": "argument"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 609, "usage_type": "argument"}, {"api_name": "models.alchemy.permission.Resource.id.in_", "line_number": 610, "usage_type": "call"}, {"api_name": "models.alchemy.permission.Resource.id", "line_number": 610, "usage_type": "attribute"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 610, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 594, "usage_type": "name"}, {"api_name": "models.alchemy.permission.Resource", "line_number": 594, "usage_type": "name"}]} +{"seq_id": "459367122", "text": "from matplotlib import pyplot\n\n\nclass People:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __str__(self):\n return f\"Name: {self.name}, age: {self.age}\"\n\n\ndef display(hum):\n for h in hum:\n print(h)\n\n\ndef average(hum):\n y = [h.age for h in hum]\n x = sum(y) / len(y)\n return x\n\n\ndef oldest_person(hum):\n ages = [h.age for h in hum]\n names = [name.name for name in hum]\n old_age = max(ages)\n print(f\"Oldest person is/are: \")\n for i in range(len(ages)):\n if ages[i] == old_age:\n print(f\"\\t{names[i]}\")\n\n\ndef youngest_person(hum):\n ages = [h.age for h in hum]\n names = [name.name for name in hum]\n young = min(ages)\n print(f\"Youngest person is/are: \")\n for i in range(len(ages)):\n if ages[i] == young:\n print(f\"\\t{names[i]}\")\n\n\ndef children(hum):\n kids = [kid.name for kid in hum if kid.age < 18]\n print(f\"Children under 18: \")\n return kids\n\n\ndef pensioners_list(hum):\n pensioners = [per.name for per in hum if per.age >= 65]\n print(f\"Pensioners:\")\n return pensioners\n\n\ndef working_age(hum):\n workers = [w.name for w in hum if 18 <= w.age < 65]\n print(f\"People in working age: \")\n return workers\n\n\ndef main():\n with open(\"people.cvs\") as connection:\n lines = connection.readlines()\n humans = []\n for line in lines:\n split_line = line.split(\",\")\n name = split_line[0]\n age = int(split_line[1])\n people = People(name, age)\n humans.append(people)\n\n average_result = average(humans)\n print(f\"Average age is: {average_result:.2f} years.\")\n\n oldest_person(humans)\n youngest_person(humans)\n kids = children(humans)\n display(kids)\n print()\n\n pensioners = pensioners_list(humans)\n display(pensioners)\n print()\n workers = working_age(humans)\n display(workers)\n\n age_groups = ['Working age', 'Children', 'Pensioners']\n preferences = [len(workers), len(kids), len(pensioners)]\n fig, ax = pyplot.subplots()\n ax.pie(preferences, labels=age_groups, autopct='%1.1f%%')\n ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n pyplot.show()\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "TomaszNowakDev/modular_programming_module", "sub_path": "Lab08_EasterBreak/task1.py", "file_name": "task1.py", "file_ext": "py", "file_size_in_byte": 2280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "13303358543", "text": "import cv2 as cv\r\n\r\n\r\nname=\"photo/dog.jpg\"\r\nimg=cv.imread(name, -1)\r\nimg=cv.resize(img,(800,800),interpolation=cv.INTER_CUBIC)\r\nimg1=cv.imread(name, -1)\r\nimg1=cv.resize(img1,(800,800))\r\n\r\ndef click_event(event, x, y, flags, param):\r\n global img\r\n global img1\r\n global save\r\n global action\r\n if event==cv.EVENT_LBUTTONDOWN and not(flags & cv.EVENT_FLAG_CTRLKEY) and not(flags &cv.EVENT_FLAG_ALTKEY):\r\n img1=cv.rotate(img, cv.ROTATE_90_COUNTERCLOCKWISE)\r\n cv.imshow(\"image\", img1)\r\n img=img1.copy()\r\n\r\n if event==cv.EVENT_RBUTTONDOWN and not(flags & cv.EVENT_FLAG_CTRLKEY) and not(flags &cv.EVENT_FLAG_ALTKEY):\r\n img1=cv.rotate(img, cv.ROTATE_90_CLOCKWISE)\r\n cv.imshow(\"image\", img1)\r\n img=img1.copy()\r\n \r\n if event ==cv.EVENT_MOUSEMOVE and (flags & cv.EVENT_FLAG_CTRLKEY):\r\n if action==\"rectangle\":\r\n if len(points)==0:\r\n pass\r\n else:\r\n point=(x,y)\r\n points.append(point)\r\n cv.rectangle(img1, points[-1], points[-2], [255, 255, 255], 15)\r\n cv.imshow(\"image\", img1)\r\n img1=img.copy()\r\n if action==\"circle\":\r\n if len(points)==0:\r\n pass\r\n else:\r\n radius=int(((points[-1][0]-points[0][0])**2+(points[-1][1]-points[0][1])**2)**0.5)\r\n point=(x,y)\r\n points.append(point)\r\n cv.circle(img1,points[0],radius,(255,255,255),5)\r\n cv.imshow(\"image\", img1)\r\n img1=img.copy()\r\n if event == cv.EVENT_LBUTTONDOWN and (flags & cv.EVENT_FLAG_CTRLKEY):\r\n action = \"rectangle\"\r\n points.clear()\r\n points.append((x,y))\r\n\r\n #dikdörtgeni çizmek\r\n if event ==cv.EVENT_LBUTTONUP and (flags & cv.EVENT_FLAG_CTRLKEY):\r\n action=\"\"\r\n save=img[min(points[-1][1],points[0][1]):max(points[-1][1],points[0][1]),min(points[-1][0],points[0][0]):max(points[-1][0],points[0][0])]\r\n cv.rectangle(img, points[-1], points[0], [255, 255, 255], 5)\r\n cv.imwrite(\"crop.jpg\", save)\r\n crop=cv.imread(\"crop.jpg\")\r\n cv.imshow(\"cropped\",crop)\r\n cv.imshow(\"image\", img)\r\n\r\n #texti yazdırmak \r\n if event == cv.EVENT_LBUTTONDOWN and (flags & cv.EVENT_FLAG_ALTKEY):\r\n blue = img[y, x, 0]\r\n green = img[y, x, 1]\r\n red = img[y, x, 2]\r\n color=(int(blue),int(green),int(red))\r\n font = cv.FONT_HERSHEY_COMPLEX\r\n cv.putText(img, \"ZOR BAYA\", (x, y), font, 0.5, color, 1)\r\n cv.imshow(\"image\", img)\r\n\r\n\r\n if event == cv.EVENT_RBUTTONDOWN and (flags & cv.EVENT_FLAG_CTRLKEY):\r\n action=\"circle\"\r\n points.clear()\r\n points.append((x, y))\r\n \r\n #çemberi çizmek\r\n if event ==cv.EVENT_RBUTTONUP and (flags & cv.EVENT_FLAG_CTRLKEY):\r\n action=\"\"\r\n radius = int(((points[-1][0] - points[0][0]) ** 2 + (points[-1][1] - points[0][1]) ** 2) ** 0.5)\r\n cv.circle(img, points[0], radius, (255, 255, 255), 5)\r\n cv.imshow(\"image\", img)\r\n \r\n #en son hali\r\n if event == cv.EVENT_RBUTTONDOWN and (flags & cv.EVENT_FLAG_ALTKEY):\r\n cv.imwrite(\"result.jpg\", img)\r\n result=cv.imread(\"result.jpg\")\r\n cv.imshow(\"result\",result)\r\n\r\naction=\"rectangle\"\r\ncv.imshow(\"image\",img)\r\npoints=[]\r\ncv.setMouseCallback(\"image\",click_event)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()\r\n", "repo_name": "bugracinaroglu/cv-odev", "sub_path": "bugracinaroglu-opencv_ilködev.py", "file_name": "bugracinaroglu-opencv_ilködev.py", "file_ext": "py", "file_size_in_byte": 3425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 6, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_ALTKEY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_COUNTERCLOCKWISE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.EVENT_RBUTTONDOWN", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_ALTKEY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.rotate", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.ROTATE_90_CLOCKWISE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.EVENT_MOUSEMOVE", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 45, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_LBUTTONUP", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 56, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_ALTKEY", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 66, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 67, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 68, "usage_type": "call"}, {"api_name": "cv2.EVENT_RBUTTONDOWN", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 71, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_RBUTTONUP", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_CTRLKEY", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.EVENT_RBUTTONDOWN", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.EVENT_FLAG_ALTKEY", "line_number": 84, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 87, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.setMouseCallback", "line_number": 92, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "42100384199", "text": "import os\nimport logging\nimport pytz\nimport datetime\n\nfrom dotenv import load_dotenv\n\nfrom telegram.ext import (\n CommandHandler,\n ConversationHandler,\n CallbackQueryHandler,\n Filters,\n MessageHandler,\n Updater,\n)\n\nfrom keyboard import (\n CALLBACK_BUTTON_CITY,\n CALLBACK_BUTTON_NAME,\n CALLBACK_BUTTON_SETTINGS,\n CALLBACK_BUTTON_WEATHER,\n CALLBACK_BUTTON_CANCEL,\n CALLBACK_BUTTON_TIME,\n CALLBACK_BUTTON_SELECT_CITY,\n)\nfrom mongo import db\nfrom handlers import (\n get_time_notification,\n get_user_timezone,\n send_weather_in_due_time,\n start,\n starting,\n send_weather,\n get_settings,\n change_time_notification,\n cancel_return_basic_keyboard,\n change_time_inlinebutton_pressed,\n change_name,\n save_new_name,\n dont_know,\n change_city,\n save_city,\n get_geolocation,\n save_geolocation,\n default_answer,\n)\n\n\nload_dotenv()\n\n\nTELEGA_TOKEN = os.getenv(\"TELEGRAM_TOKEN\")\nPASSWORD_FOR_USE = os.getenv(\"PASSWORD_FOR_USE\")\n\n\nlogging.basicConfig(\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n level=logging.INFO,\n filename=\"bot.log\",\n)\n\n\ndef run_jobs(update):\n users = db.users.find()\n for user in users:\n chat_id = user[\"chat_id\"]\n time = get_time_notification(db, chat_id)\n hour = time[0]\n minute = time[1]\n tzinfo = pytz.timezone(get_user_timezone(db, chat_id))\n update.job_queue.run_daily(\n callback=send_weather_in_due_time,\n time=datetime.time(hour=hour, minute=minute, tzinfo=tzinfo),\n context=chat_id,\n name=str(chat_id),\n )\n\n\ndef main() -> None:\n updater = Updater(TELEGA_TOKEN)\n\n logging.info(\"Start bot\")\n\n dispatcher = updater.dispatcher\n dispatcher.add_handler(CommandHandler(\"start\", start))\n dispatcher.add_handler(\n MessageHandler(Filters.regex(str(PASSWORD_FOR_USE)), starting)\n )\n dispatcher.add_handler(\n MessageHandler(Filters.regex(CALLBACK_BUTTON_WEATHER), send_weather)\n )\n dispatcher.add_handler(\n MessageHandler(Filters.regex(CALLBACK_BUTTON_SETTINGS), get_settings)\n )\n dispatcher.add_handler(\n MessageHandler(\n Filters.regex(CALLBACK_BUTTON_TIME), change_time_notification\n )\n )\n dispatcher.add_handler(\n MessageHandler(\n Filters.regex(CALLBACK_BUTTON_CANCEL), cancel_return_basic_keyboard\n )\n )\n dispatcher.add_handler(\n CallbackQueryHandler(change_time_inlinebutton_pressed)\n )\n dispatcher.add_handler(\n ConversationHandler(\n entry_points=[\n MessageHandler(\n Filters.regex(CALLBACK_BUTTON_NAME), change_name\n )\n ],\n states={\n \"first_name\": [MessageHandler(Filters.text, save_new_name)],\n },\n fallbacks=[\n MessageHandler(\n Filters.location\n | Filters.contact\n | Filters.voice\n | Filters.sticker\n | Filters.photo\n | Filters.video\n | Filters.audio\n | Filters.document,\n dont_know,\n )\n ],\n )\n )\n dispatcher.add_handler(\n ConversationHandler(\n entry_points=[\n MessageHandler(\n Filters.regex(CALLBACK_BUTTON_SELECT_CITY), change_city\n )\n ],\n states={\n \"location\": [MessageHandler(Filters.text, save_city)],\n },\n fallbacks=[\n MessageHandler(\n Filters.location\n | Filters.contact\n | Filters.voice\n | Filters.sticker\n | Filters.photo\n | Filters.video\n | Filters.audio\n | Filters.document,\n dont_know,\n )\n ],\n )\n )\n dispatcher.add_handler(\n MessageHandler(Filters.regex(CALLBACK_BUTTON_CITY), get_geolocation)\n )\n dispatcher.add_handler(MessageHandler(Filters.location, save_geolocation))\n dispatcher.add_handler(MessageHandler(Filters.text, default_answer))\n\n updater.start_polling(bootstrap_retries=-1)\n run_jobs(updater)\n updater.idle()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "KonstantinRaikhert/weather_telegram_bot", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 4459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 49, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 52, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 53, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 56, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 58, "usage_type": "attribute"}, {"api_name": "mongo.db.users.find", "line_number": 64, "usage_type": "call"}, {"api_name": "mongo.db.users", "line_number": 64, "usage_type": "attribute"}, {"api_name": "mongo.db", "line_number": 64, "usage_type": "name"}, {"api_name": "handlers.get_time_notification", "line_number": 67, "usage_type": "call"}, {"api_name": "mongo.db", "line_number": 67, "usage_type": "argument"}, {"api_name": "pytz.timezone", "line_number": 70, "usage_type": "call"}, {"api_name": "handlers.get_user_timezone", "line_number": 70, "usage_type": "call"}, {"api_name": "mongo.db", "line_number": 70, "usage_type": "argument"}, {"api_name": "handlers.send_weather_in_due_time", "line_number": 72, "usage_type": "name"}, {"api_name": "datetime.time", "line_number": 73, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 85, "usage_type": "call"}, {"api_name": "handlers.start", "line_number": 85, "usage_type": "argument"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 87, "usage_type": "call"}, {"api_name": "handlers.starting", "line_number": 87, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 87, "usage_type": "call"}, {"api_name": "telegram.ext.Filters", "line_number": 87, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 90, "usage_type": "call"}, {"api_name": "handlers.send_weather", "line_number": 90, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 90, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_WEATHER", "line_number": 90, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 90, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 93, "usage_type": "call"}, {"api_name": "handlers.get_settings", "line_number": 93, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 93, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_SETTINGS", "line_number": 93, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 93, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 96, "usage_type": "call"}, {"api_name": "handlers.change_time_notification", "line_number": 97, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 97, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_TIME", "line_number": 97, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 97, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 101, "usage_type": "call"}, {"api_name": "handlers.cancel_return_basic_keyboard", "line_number": 102, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 102, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_CANCEL", "line_number": 102, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 102, "usage_type": "name"}, {"api_name": "telegram.ext.CallbackQueryHandler", "line_number": 106, "usage_type": "call"}, {"api_name": "handlers.change_time_inlinebutton_pressed", "line_number": 106, "usage_type": "argument"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 109, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 111, "usage_type": "call"}, {"api_name": "handlers.change_name", "line_number": 112, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 112, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_NAME", "line_number": 112, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 112, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 116, "usage_type": "call"}, {"api_name": "handlers.save_new_name", "line_number": 116, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.text", "line_number": 116, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 116, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 119, "usage_type": "call"}, {"api_name": "handlers.dont_know", "line_number": 128, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.location", "line_number": 120, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 120, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.contact", "line_number": 121, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 121, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.voice", "line_number": 122, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 122, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.sticker", "line_number": 123, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 123, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.photo", "line_number": 124, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 124, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.video", "line_number": 125, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 125, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.audio", "line_number": 126, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 126, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.document", "line_number": 127, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 127, "usage_type": "name"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 134, "usage_type": "call"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 136, "usage_type": "call"}, {"api_name": "handlers.change_city", "line_number": 137, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 137, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_SELECT_CITY", "line_number": 137, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 137, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 141, "usage_type": "call"}, {"api_name": "handlers.save_city", "line_number": 141, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.text", "line_number": 141, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 141, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 144, "usage_type": "call"}, {"api_name": "handlers.dont_know", "line_number": 153, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.location", "line_number": 145, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 145, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.contact", "line_number": 146, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 146, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.voice", "line_number": 147, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 147, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.sticker", "line_number": 148, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 148, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.photo", "line_number": 149, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 149, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.video", "line_number": 150, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 150, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.audio", "line_number": 151, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 151, "usage_type": "name"}, {"api_name": "telegram.ext.Filters.document", "line_number": 152, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 152, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 159, "usage_type": "call"}, {"api_name": "handlers.get_geolocation", "line_number": 159, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.regex", "line_number": 159, "usage_type": "call"}, {"api_name": "keyboard.CALLBACK_BUTTON_CITY", "line_number": 159, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters", "line_number": 159, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 161, "usage_type": "call"}, {"api_name": "handlers.save_geolocation", "line_number": 161, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.location", "line_number": 161, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 161, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 162, "usage_type": "call"}, {"api_name": "handlers.default_answer", "line_number": 162, "usage_type": "argument"}, {"api_name": "telegram.ext.Filters.text", "line_number": 162, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "20456262217", "text": "#!/usr/bin/env python\nfrom __future__ import print_function\nimport sparselsh\nfrom scipy.sparse import csr_matrix\nimport numpy as np\ntry:\n # Python 2\n import cPickle as pickle\nexcept ImportError:\n # Python 3\n import pickle\nimport argparse\nimport re\nimport math\nimport operator\n\ndef parse_args():\n desc = 'Search for optimal hyperparams for a given corpus, saving ' \\\n 'models as we go.'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\n 'corpus', type=str,\n help='Path to file, one line per clusterable entity.')\n parser.add_argument(\n '--save', type=str,\n help='Save model to specified file. By default, index is not saved.')\n parser.add_argument(\n '--hashsize', type=int, default=128,\n help='Size of hash. Smaller sizes create fewer \"buckets\" and hence ' \\\n 'more variance between items in the bucket.')\n parser.add_argument(\n '--output', type=str, choices=[\n 'clusters','wssse'\n ], default='clusters', help='What to output. Default prints out ' \\\n 'the cluster names and their items. \"wssse\" outputs variance inside '\\\n 'clusters and their items. Useful for finding optimal hashsize.')\n args = parser.parse_args()\n return args\n\ndef run(corpus_path, save_index=False, hashsize=128, output='clusters'):\n rawcorpus = None\n with open(corpus_path, 'r') as f:\n rawcorpus = f.readlines()\n\n corpus = [re.sub( '[^0-9a-z]', '', rc.lower()) for rc in rawcorpus]\n maxlen = max([ len(x) for x in corpus])\n size = len(corpus)\n\n X = np.zeros((size, maxlen))\n for i in range(size):\n c = corpus[i]\n for j in range(len(c)):\n X[i][j] = ord(c[j])\n\n Xs = csr_matrix(X)\n lsh = sparselsh.LSH(\n hashsize, Xs.shape[1], num_hashtables=1)\n for i in range(Xs.shape[0]):\n x = Xs.getrow(i)\n c = rawcorpus[i]\n lsh.index( x, extra_data=c)\n\n if save_index:\n with open(save_index, 'wb') as f:\n pickle.dump( lsh, f)\n\n if output == 'clusters':\n t = lsh.hash_tables[0]\n for k in t.keys():\n vals = t.get_val(k)\n n_vals = len(vals)\n if n_vals == 1:\n continue\n print('\\n', k, n_vals)\n for val in vals:\n print(re.sub('\\r|\\n', '', val[1]))\n\n elif output == 'wssse':\n cluster_keys = lsh.hash_tables[0].keys()\n\n def mean(cluster):\n cluster_matrices = map( lambda c: c[0], cluster)\n n = len(cluster_matrices)\n if n == 1:\n return cluster_matrices[0]\n return reduce(operator.add, cluster_matrices) / n\n\n def error(point, cluster_mean):\n return math.sqrt(\n sum([x**2 for x in (point - cluster_mean).toarray()[0]]))\n\n wssse = 0\n for key in cluster_keys:\n cluster = lsh.hash_tables[0].get_val(key)\n cluster_mean = mean(cluster)\n for point in cluster:\n # matrix is 0th item w/ extra_data 1st\n e = error(point[0], cluster_mean)\n wssse += e\n\n print(hashsize, wssse)\n\n\nif __name__ == '__main__':\n args = parse_args()\n save = args.save if args.save else False\n hashsize = args.hashsize\n run(args.corpus, save_index=save, hashsize=hashsize, output=args.output)\n", "repo_name": "hlhfhmt/SparseLSH", "sub_path": "examples/cluster.py", "file_name": "cluster.py", "file_ext": "py", "file_size_in_byte": 3398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 55, "usage_type": "call"}, {"api_name": "sparselsh.LSH", "line_number": 56, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 65, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 76, "usage_type": "call"}, {"api_name": "operator.add", "line_number": 86, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "2277470961", "text": "import cv2\nimport numpy as np\n\nimage = cv2.imread(\"../../../../res/opencv.png\", 1)\ncv2.imshow(\"Original\", image)\n\nblur = cv2.GaussianBlur(image, (5, 55), 0)\ncv2.imshow(\"Blur\", blur)\n\n# Diloation black to white\n# Erosiaon white to black\n\nkernel = np.ones((5, 5), 'uint8')\n\ndilate = cv2.dilate(image, kernel, iterations=1)\nerod = cv2.erode(image, kernel, iterations=1)\n\ncv2.imshow(\"Dilate\", dilate)\ncv2.imshow(\"Erode\", erod)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "repo_name": "DastanIqbal/LearnOpenCV", "sub_path": "src/LinkedInLearning/course/cv/02.5/program.py", "file_name": "program.py", "file_ext": "py", "file_size_in_byte": 463, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "33034834210", "text": "import time\n\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse,FileResponse,Http404\nfrom django.views import View\n\nfrom .forms import PhotoForm\nfrom .models import Photo\nimport os\nimport img2pdf\n\n\nclass BasicUploadView(View):\n def get(self, request):\n photos_list = Photo.objects.all()\n return render(self.request, 'photos/progress_bar_upload/index.html', {'photos': photos_list})\n\n def post(self, request):\n form = PhotoForm(self.request.POST, self.request.FILES)\n if form.is_valid():\n photo = form.save()\n data = {'is_valid': True, 'name': photo.file.name, 'url': photo.file.url}\n else:\n data = {'is_valid': False}\n return JsonResponse(data)\n\n\nclass ProgressBarUploadView(View):\n def get(self, request):\n photos_list = Photo.objects.all()\n return render(self.request, 'photos/progress_bar_upload/index.html', {'photos': photos_list})\n\n def post(self, request):\n time.sleep(1) # You don't need this line. This is just to delay the process so you can see the progress bar testing locally.\n form = PhotoForm(self.request.POST, self.request.FILES)\n if form.is_valid():\n photo = form.save()\n data = {'is_valid': True, 'name': photo.file.name, 'url': photo.file.url}\n else:\n data = {'is_valid': False}\n return JsonResponse(data)\n\n\nclass DragAndDropUploadView(View):\n def get(self, request):\n photos_list = Photo.objects.all()\n return render(self.request, 'photos/drag_and_drop_upload/index.html', {'photos': photos_list})\n\n def post(self, request):\n form = PhotoForm(self.request.POST, self.request.FILES)\n if form.is_valid():\n photo = form.save()\n data = {'is_valid': True, 'name': photo.file.name, 'url': photo.file.url}\n else:\n data = {'is_valid': False}\n return JsonResponse(data)\n\n\ndef clear_database(request):\n for photo in Photo.objects.all():\n photo.file.delete()\n photo.delete()\n return redirect(request.POST.get('next'))\n\ndef conver(request):\n l = []\n for photo in Photo.objects.all():\n l.append(photo.file)\n\n print(l)\n if(len(l)==0):\n raise Http404()\n with open(\"media/output.pdf\", \"wb\") as f:\n f.write(img2pdf.convert([i for i in l ]))\n try:\n return FileResponse(open('media/output.pdf', 'rb'), content_type='application/pdf')\n except FileNotFoundError:\n raise Http404()\n\ndef display(request):\n try:\n clear_database(request)\n render(request,'photos/output.html', FileResponse(open('output.pdf', 'rb'), content_type='application/pdf'))\n except FileNotFoundError:\n raise Http404()", "repo_name": "Nimit5/jpg-to-pdf-Converter", "sub_path": "jpg_pdf/photos/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2789, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.View", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Photo.objects.all", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Photo.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Photo", "line_number": 15, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 19, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 25, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 28, "usage_type": "name"}, {"api_name": "models.Photo.objects.all", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Photo.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Photo", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 34, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 35, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 41, "usage_type": "call"}, {"api_name": "django.views.View", "line_number": 44, "usage_type": "name"}, {"api_name": "models.Photo.objects.all", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Photo.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Photo", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.PhotoForm", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Photo.objects.all", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Photo.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Photo", "line_number": 60, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Photo.objects.all", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Photo.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.Photo", "line_number": 67, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 72, "usage_type": "call"}, {"api_name": "img2pdf.convert", "line_number": 74, "usage_type": "call"}, {"api_name": "django.http.FileResponse", "line_number": 76, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.FileResponse", "line_number": 83, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "38461341523", "text": "#!/usr/bin/env python3\n\nimport sys\nimport math\nfrom random import choice, randint, seed, shuffle\nfrom time import time\nfrom operator import mul\nfrom functools import reduce\n\nimport numpy\nfrom numpy import dot\nimport numpy.linalg\n\nfrom qupy.abstract import genidx\nfrom qupy.dense import is_close\nfrom qupy.ldpc.tool import write\nfrom qupy.ldpc.solve import dot2, zeros2, shortstr, shortstrx, span, array2\n#from qupy.ldpc.ensemble import Ensemble, loglikely\nfrom qupy.argv import argv\n\nepsilon = 1e-10\nr2 = math.sqrt(2)\n\nscalar = numpy.float64\n\nif 'int' in sys.argv:\n scalar = numpy.int32\n\n\ndef flatstr(A):\n s = []\n for idx in genidx(A.shape):\n s_idx = ''.join(str(i) for i in idx)\n s.append(\"%s:%s\"%(s_idx, A[idx]))\n return ', '.join(s)\n\ndef identity(n):\n I = numpy.zeros((n, n))\n for i in range(n):\n I[i, i] = 1.\n return I\n\n\nclass MPS(object):\n def __init__(self, As, linkss=None):\n self.As = As = [A.view() for A in As]\n self.linkss = linkss\n self.n = len(self.As)\n # shape: (d, r0), (r0, d, r1), (r1, d, r2), ..., (rn, d)\n if len(As[0].shape)==2:\n As[0].shape = (1,)+As[0].shape\n d = As[0].shape[1]\n rs = [As[0].shape[2]]\n for i in range(1, self.n-1):\n A = As[i]\n assert A.shape[0] == rs[-1]\n assert A.shape[1] == d\n r = A.shape[2]\n rs.append(r)\n i += 1\n A = As[i]\n if len(A.shape)==2:\n A.shape = A.shape+(1,)\n assert A.shape[0] == rs[-1]\n assert A.shape[1] == d\n self.rs = rs\n self.d = d\n self.dtype = A.dtype\n\n def __str__(self):\n return \"MPS(%s)\"%(', '.join(str(A.shape) for A in self.As))\n #return \"MPS(%s)\"%(', '.join(str(r) for r in self.rs))\n\n def get_khi(self):\n return max(A.shape[0] for A in self.As)\n\n def to_net(self, linkss=None):\n As = [A.view() for A in self.As]\n As[0].shape = As[0].shape[1:]\n As[-1].shape = As[-1].shape[:2]\n net = TensorNetwork(As, linkss or self.linkss)\n return net\n\n def get_dense(self):\n As = self.As\n idx = 0\n A0 = As[0] # (1, d, r0)\n A0 = A0.view()\n A0.shape = A0.shape[1:]\n d = self.d\n while idx+2=1:\n psi = Vh\n psi = psi.copy()\n psi.shape = (r0*d, d**n)\n \n U, s, Vh = numpy.linalg.svd(psi, False)\n assert U.shape == (psi.shape[0], min(psi.shape))\n assert s.shape == (min(psi.shape),)\n assert Vh.shape == (min(psi.shape), psi.shape[1])\n \n if khi is not None:\n U = U[:, :khi]\n s = s[:khi]\n Vh = Vh[:khi, :]\n \n #print \"MPS.build:\", n, s\n\n #print \"U:\", U.shape,\n #print \"s:\", s.shape,\n #print \"Vh:\", Vh.shape\n \n r1 = U.shape[1]\n U.shape = (r0, d, r1)\n \n s.shape = s.shape+(1,)\n Vh = s*Vh\n \n As.append(U)\n rs.append(r1)\n \n r0 = r1\n n -= 1\n \n #print Vh.shape\n As.append(Vh)\n \n assert len(As) == n0\n \n mps = MPS(As)\n return mps\n\n @classmethod\n def random(cls, n, khi):\n d = 2\n A = numpy.random.normal(0., 1., (1, d, khi))\n As = [A]\n for i in range(n-2):\n A = numpy.random.normal(0., 1., (khi, d, khi))\n As.append(A)\n A = numpy.random.normal(0., 1., (khi, d, 1))\n As.append(A)\n return MPS(As)\n\n @classmethod\n def from_ensemble(cls, E, p, khi=None):\n# print\n# print shortstrx(E.S)\n\n n = E.n\n\n shape = (2,)*n\n assert n<=20\n x = numpy.zeros(shape, dtype=numpy.float64)\n for i in range(E.N):\n row = E.S[i]\n #print row\n w = row.sum()\n pval = p**w * (1-p)**(n-w)\n x[tuple(row)] = pval # ?\n\n #print x.shape\n\n mps = cls.build(x, khi)\n\n #print mps\n\n y = mps.get_dense()\n idx = reindex(n, y.argmax())\n mps.idx = idx\n #print \"argmax:\", idx\n\n #err = numpy.abs((x-y)).sum() / numpy.abs(y).sum()\n err = numpy.sqrt(((x-y)**2).sum()/(2**n))\n\n print(\"Error:\", err)\n\n return mps\n\n def left_canonical(self):\n As, n = self.As, self.n\n assert self.d == 2\n\n As = list(As) # mutate!\n\n Bs = []\n for i in range(n):\n A = As[i]\n assert len(A.shape)==3\n assert A.shape[1]==self.d\n\n r, d, c = A.shape\n\n # Eq. (48)\n A1 = numpy.zeros((r*d, c), dtype=scalar)\n A1[:r, :] = A[:, 0, :]\n A1[r:, :] = A[:, 1, :]\n\n #result = numpy.linalg.qr(A1, 'economic')\n Q, R = numpy.linalg.qr(A1)\n #print \"left_canonical\", (r, d, c), Q.shape, R.shape\n m = min(c, 2*r)\n assert Q.shape == (2*r, m)\n assert R.shape == (m, c)\n\n B = numpy.zeros((r, d, m), dtype=scalar)\n B[:, 0, :] = Q[:r]\n B[:, 1, :] = Q[r:]\n Bs.append(B)\n\n if i+11:\n\n #for j in range(i):\n # A = As[j]\n # A0, A1 = A[:, 0, :], A[:, 1, :]\n # I = dot(A0.transpose(), A0) + dot(A1.transpose(), A1)\n # assert numpy.allclose(I, identity(I.shape[0]))\n\n #for j in range(i+1, n):\n # A = As[j]\n # assert A.shape[0]<=khi\n # assert A.shape[2]<=khi\n # A0, A1 = A[:, 0, :], A[:, 1, :]\n # I = dot(A0, A0.transpose()) + dot(A1, A1.transpose())\n # assert numpy.allclose(I, identity(I.shape[0]))\n\n A = As[i]\n r, d, c = A.shape\n\n # Eq. (51)\n A1 = numpy.zeros((r, 2*c), dtype=scalar)\n A1[:, :c] = A[:, 0, :]\n A1[:, c:] = A[:, 1, :]\n\n U, s, Vh = numpy.linalg.svd(A1, False)\n V = Vh.transpose()\n\n assert U.shape[0] == r\n assert V.shape[0] == 2*c\n assert U.shape[1]==s.shape[0]==V.shape[1]\n\n # Eq. (57)\n U1 = U[:, :khi]\n s1 = s[:khi]\n V1 = V[:, :khi]\n\n khi1 = U1.shape[1]\n\n assert U1.shape[1] == s1.shape[0] == V1.shape[1]\n\n #assert numpy.allclose(dot(U1.transpose(), U1), identity(khi1))\n #assert numpy.allclose(dot(V1.transpose(), V1), identity(khi1))\n\n s1.shape = (1, khi1) # for broadcast\n\n A = As[i-1] # mutate this\n A0, A1 = A[:, 0, :], A[:, 1, :]\n #print\n #print A0.shape, A1.shape\n A0 = dot(A0, U1)*s1\n A1 = dot(A1, U1)*s1\n #A0 = dot(dot(A0, U1), S1)\n #A1 = dot(dot(A1, U1), S1)\n assert A0.shape==A1.shape\n A = numpy.zeros((A0.shape[0], 2, A1.shape[1]), dtype=scalar)\n A[:, 0, :] = A0\n A[:, 1, :] = A1\n As[i-1] = A\n\n A = As[i] # mutate this\n A = numpy.zeros((khi1, 2, c), dtype=scalar)\n A[:, 0, :] = V1[:c].transpose()\n A[:, 1, :] = V1[c:].transpose()\n As[i] = A\n\n i -= 1\n\n As[0] = As[0] * gamma\n mps = MPS(As, self.linkss)\n return mps\n\n\ndef test_mps():\n\n n = 7\n d = 2\n shape = (d,)*n\n\n for i in range(5):\n\n khi = 4\n\n x = numpy.random.normal(0., 1., shape)\n x = MPS.random(n, khi).get_dense()\n x /= ((x**2).sum()/(d**n))**0.5\n\n mps = MPS.build(x)\n x1 = mps.get_dense()\n assert is_close(x, x1)\n #print mps\n\n gamma, mps1 = mps.left_canonical()\n mps1.As[0] *= gamma\n x1 = mps1.get_dense()\n assert is_close(x, x1)\n\n mps1 = MPS.build(x, khi)\n assert mps1.get_khi()==khi\n x1 = mps1.get_dense()\n #assert is_close(x, x1)\n #print mps1\n\n mps2 = mps.truncate(khi)\n #print mps2\n assert mps2.get_khi()==khi\n x2 = mps2.get_dense()\n\n assert ((x1-x2)**2).sum() < 1e-20\n assert ((x-x2)**2).sum() < 1e-20\n #print \"err to mps(khi):\", ((x1-x2)**2).sum()\n #print \"err to original:\", ((x-x2)**2).sum()\n\n\ndef reindex(n, idx0):\n idx = [0]*n\n for i in range(n):\n if (1< axis2:\n axis2, axis1 = axis1, axis2\n B.pop(axis2)\n B.pop(axis1)\n B.append(A[axis1])\n return tuple(B)\n\n\ndef sum_shape(A, axis):\n A = list(A)\n A.pop(axis)\n return tuple(A)\n\n\ndef tensordot_shape(A, B, axes):\n axs, bxs = axes\n assert len(axs)==len(bxs)\n assert len(axs)==1\n ax = axs[0]\n bx = bxs[0]\n A = list(A)\n A.pop(ax)\n B = list(B)\n B.pop(bx)\n return tuple(A+B)\n\n\nclass DummyNetwork(object):\n def __init__(self, shapes=[], linkss=[]):\n self.shapes = list(shapes)\n self.linkss = list(linkss)\n self.n = len(self.shapes)\n self.cost = 0\n assert self.check()\n\n def check(self):\n shapes = self.shapes\n linkss = self.linkss\n assert len(shapes)==len(linkss)==self.n\n for shape, links in self:\n assert len(shape)==len(links), (shape, links)\n for link in self.get_links():\n idxs = self.has_link(link)\n shape = [shapes[idx][linkss[idx].index(link)] for idx in idxs]\n assert len(set(shape))==1, shape\n return True\n\n def update_cost(self):\n cost = 0\n for shape, links in self:\n cost = max(cost, reduce(mul, shape, 1))\n self.cost = max(cost, self.cost)\n\n def __str__(self):\n return \"DummyNetwork(%s)\"%(\n ', '.join(str(len(shape)) for shape in self.shapes))\n\n def __len__(self):\n return self.n\n\n def __getitem__(self, i):\n shapes = self.shapes\n linkss = self.linkss\n return shapes[i], linkss[i]\n\n def get_links(self):\n links = []\n for _links in self.linkss:\n links += _links\n links = list(set(links))\n links.sort()\n return links\n\n def has_link(self, link): # TOO SLOW\n linkss = self.linkss\n idxs = [i for i in range(self.n) if link in linkss[i]]\n return idxs\n\n def cleanup_link(self, i, link, verbose=False):\n shapes = self.shapes\n linkss = self.linkss\n shape = shapes[i]\n links = linkss[i]\n while links.count(link)>=2:\n j = links.index(link)\n k = links.index(link, j+1)\n assert len(shape)==len(links)\n if verbose:\n print(\"cleanup_link\", shape, links, j, k)\n #print flatstr(shape)\n shape = diagonal_shape(shape, axis1=j, axis2=k)\n #print flatstr(shape)\n links.pop(k)\n links.pop(j)\n links.append(link)\n shapes[i] = shape\n if verbose:\n print(\"\\t\", shape.shape, links)\n print(flatstr(shape))\n assert len(shape)==len(links)\n assert self.check()\n self.update_cost()\n\n def cleanup(self, verbose=False):\n shapes = self.shapes\n linkss = self.linkss\n for i in range(self.n):\n shape = shapes[i]\n links = linkss[i]\n for link in set(links):\n if links.count(link)>1:\n self.cleanup_link(i, link, verbose=verbose)\n for link in self.get_links():\n self.cleanup_freelegs(link, verbose=verbose)\n for i in range(self.n):\n shape = shapes[i]\n links = linkss[i]\n assert len(set(links))==len(links)\n assert self.check()\n self.update_cost()\n\n def cleanup_freelegs(self, link, verbose=False):\n shapes = self.shapes\n linkss = self.linkss\n\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n if len(idxs)==1:\n\n if verbose:\n print()\n print(\"cleanup_freelegs\", link)\n print(\"\\tidxs:\", idxs)\n\n idx = idxs[0]\n shape = shapes[idx]\n links = linkss[idx]\n\n if verbose:\n print(\"\\tsum axis\", links.index(link))\n assert links.count(link)==1\n shape = sum_shape(shape, links.index(link))\n #print flatstr(shape)\n links.remove(link)\n shapes[idx] = shape\n if verbose:\n print(\"\\t\", shape, links)\n\n assert self.check()\n self.update_cost()\n\n def contract_1(self, link, verbose=False):\n shapes = self.shapes\n linkss = self.linkss\n\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n while len(idxs)>=3:\n if verbose:\n print()\n print(\"contract_1\", link)\n print(\"\\tidxs:\", idxs)\n\n A, B = shapes[idxs[-2]], shapes[idxs[-1]]\n assert A is not B\n links, minks = linkss[idxs[-2]], linkss[idxs[-1]]\n j, k = links.index(link), minks.index(link)\n # pad shape for broadcast\n #print \"A:\", flatstr(A), links, j\n #print \"B:\", flatstr(B), minks, k\n A, B = (\n A[:j] + (1,)*k + A[j:j+1] \\\n + (1,)*(len(B)-k-1) + A[j+1:],\n (1,)*j + B + (1,)*(len(A)-j-1))\n #print A\n #print B\n assert len(A)==len(B)\n\n #A = A*B # broadcast (inplace??)\n A = broadcast_shape(A, B)\n links = links[:j] + minks + links[j+1:]\n #print \"A:\", flatstr(A), links\n assert len(A)==len(links)\n\n shapes[idxs[-2]] = A\n linkss[idxs[-2]] = links\n\n shapes.pop(idxs[-1])\n linkss.pop(idxs[-1])\n self.n -= 1\n\n idxs.pop(-1)\n\n self.cleanup(verbose)\n\n assert self.check()\n self.update_cost()\n\n def contract_2(self, link, cleanup=True, verbose=False):\n shapes = self.shapes\n linkss = self.linkss\n if verbose:\n print()\n print(\"contract_2\", link)\n\n #print linkss\n #for links in linkss:\n # try:\n # #link in links\n # link == links[0]\n # except:\n # print link, type(link[0][0])\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n assert len(idxs) in (0, 2)\n #assert len(idxs) <= 2\n if len(idxs)==2:\n A, B = shapes[idxs[-2]], shapes[idxs[-1]]\n assert A is not B\n links, minks = linkss[idxs[-2]], linkss[idxs[-1]]\n #print \"tensordot\", links, minks\n assert links.count(link)==1 # not necessary...?\n assert minks.count(link)==1 # not necessary...?\n\n C = tensordot_shape(A, B,\n ([links.index(link)], [minks.index(link)]))\n\n links.remove(link)\n minks.remove(link)\n shapes[idxs[-2]] = C\n linkss[idxs[-2]] = links + minks\n assert len(C)==len(links+minks)\n\n shapes.pop(idxs[-1])\n linkss.pop(idxs[-1])\n self.n -= 1\n\n idxs.pop(-1)\n\n if cleanup:\n #print links, minks\n self.cleanup(verbose) # XXX too slow XXX\n\n assert self.check()\n self.update_cost()\n\n def contract_scalars(self, verbose=False):\n for shape in self.shapes:\n assert shape==()\n self.shapes = [()]\n self.linkss = [[]]\n self.n = 1\n if verbose:\n self.dump()\n self.update_cost()\n\n def contract_all(self, verbose=False, rand=False):\n if verbose:\n self.dump()\n links = self.get_links()\n if rand:\n shuffle(links)\n for link in links:\n #break\n if verbose:\n self.dump()\n self.contract_1(link, verbose=verbose)\n self.cleanup(verbose)\n links = self.get_links()\n if rand:\n shuffle(links)\n for link in links:\n if verbose:\n self.dump()\n self.contract_2(link, verbose=verbose)\n #print([A.shape for A in self.shapes])\n if verbose:\n self.dump()\n #if self.get_links():\n # self.contract_all_slow(verbose)\n if len(self)>1:\n self.contract_scalars(verbose)\n assert len(self)==1\n\n\n\nclass TensorNetwork(object):\n def __init__(self, As=[], linkss=[]):\n self.As = list(As)\n self.linkss = list(linkss)\n self.n = len(self.As)\n self.mark = {}\n assert self.check()\n\n def check(self):\n As = self.As\n linkss = self.linkss\n assert len(As)==len(linkss)==self.n\n for A, links in self:\n assert len(A.shape)==len(links), ((A.shape), (links))\n for link in self.get_links():\n idxs = self.has_link(link)\n shape = [As[idx].shape[linkss[idx].index(link)] for idx in idxs]\n assert len(set(shape))==1, shape\n return True\n\n def clone(self):\n As = [A.copy() for A in self.As]\n linkss = [list(links) for links in self.linkss]\n return TensorNetwork(As, linkss)\n\n def __str__(self):\n return \"TensorNetwork(%s)\"%(\n ', '.join(str(len(A.shape)) for A in self.As))\n\n def get_dummy(self):\n shapes = [A.shape for A in self.As]\n linkss = [list(links) for links in self.linkss]\n return DummyNetwork(shapes, linkss)\n\n @property\n def value(self):\n assert self.n == 1\n A = self.As[0]\n assert A.shape == (), \"not a scalar\"\n v = A[()]\n return v\n\n def get_links(self):\n links = []\n for _links in self.linkss:\n links += _links\n links = list(set(links))\n links.sort(key = str)\n return links\n\n def freelinks(self):\n linkss = self.linkss\n links = []\n for link in self.get_links():\n if sum(1 for links in linkss if links.count(link))>1:\n links.append(link)\n return links\n\n def neighbours(self, idx):\n linkss = self.linkss\n links = linkss[idx]\n nbd = []\n for link in links:\n for i in range(self.n):\n if link in linkss[i] and i!=idx:\n nbd.append(i)\n return nbd\n\n def append(self, A, links):\n # links is a list of names for the \"legs\" of A\n assert len(A.shape)==len(links)\n assert len(set(links))==len(links), links # unique\n self.As.append(A)\n self.linkss.append(list(links)) # copy\n self.n += 1\n\n def pop(self, i):\n A = self.As.pop(i)\n links = self.linkss.pop(i)\n self.n -= 1\n return A, links\n\n def dump(self):\n As = self.As\n linkss = self.linkss\n print(\"TensorNetwork:\")\n for i in range(self.n):\n print('\\t%d'%i, As[i].shape, linkss[i])\n #print('\\t', flatstr(As[i]), linkss[i])\n print(As[i])\n\n def __len__(self):\n return self.n\n\n def __getitem__(self, i):\n As = self.As\n linkss = self.linkss\n return As[i], linkss[i]\n\n def __setitem__(self, i, xxx_todo_changeme):\n (A, links) = xxx_todo_changeme\n self.As[i] = A\n self.linkss[i] = links\n\n def todot(self, name):\n As = self.As\n linkss = self.linkss\n #all = self.get_links()\n f = open(name, 'w')\n print(\"graph the_graph\\n{\", file=f)\n for i in range(len(self)):\n A = As[i]\n s = str(A.shape)\n s = s.replace(' ', '')\n print('\\t A_%d [label=\"%s\", shape=box];'%(i, s), file=f)\n for link in self.get_links():\n s = str(link)\n s = s.replace(' ', '')\n print('\\t L_%s [shape=ellipse, label=\"%s\"];'%(id(link), s), file=f)\n #print >>f, '\\t L_%d_%d [shape=ellipse, label=\"%s\"];'%(link[0], link[1], s)\n #print >>f, '\\t L_%d_%d [shape=ellipse, label=\"L\"];'%(link[0], link[1])\n for link in self.get_links():\n idxs = [i for i in range(self.n) if link in linkss[i]]\n for idx in idxs:\n# print >>f, \"\\t A_%d -- L_%d_%d;\" % (idx, link[0], link[1])\n print(\"\\t A_%d -- L_%s;\" % (idx, id(link)), file=f)\n print(\"}\", file=f)\n f.close()\n\n def select(self, idxs):\n As = self.As\n linkss = self.linkss\n As = [As[idx] for idx in idxs]\n linkss = [linkss[idx] for idx in idxs]\n return TensorNetwork(As, linkss)\n\n def paste(self, idxs, net):\n assert len(idxs)==len(net)\n for i, idx in enumerate(idxs):\n self[idx] = net[i]\n\n def has_link(self, link): # TOO SLOW\n linkss = self.linkss\n idxs = [i for i in range(self.n) if link in linkss[i]]\n return idxs\n\n def transpose(self, idx, perm):\n assert len(perm)==len(self.As[idx].shape)\n self.As[idx] = self.As[idx].transpose(perm)\n links = self.linkss[idx]\n links = [links[i] for i in perm]\n self.linkss[idx] = links\n\n def to_mps(self):\n As = self.As\n linkss = self.linkss\n assert self.n>=2\n\n #print self.linkss\n\n links = linkss[0]\n assert len(links)==2\n idxs = self.has_link(links[0])\n if len(idxs)>1:\n self.transpose(0, [1, 0])\n\n #print self.linkss\n links = linkss[0]\n assert len(self.has_link(links[0]))==1, self.has_link(links[0])\n assert len(self.has_link(links[1]))==2\n\n for i in range(1, self.n-1):\n links = linkss[i]\n assert len(links)==3\n if i-1 in self.has_link(links[1]):\n perm = [1]\n elif i-1 in self.has_link(links[2]):\n perm = [2]\n else:\n assert i-1 in self.has_link(links[0])\n perm = [0]\n\n if len(self.has_link(links[1]))==1:\n perm.append(1)\n elif len(self.has_link(links[2]))==1:\n perm.append(2)\n else:\n assert len(self.has_link(links[0]))==1\n perm.append(0)\n\n if i+1 in self.has_link(links[1]):\n perm.append(1)\n elif i+1 in self.has_link(links[2]):\n perm.append(2)\n else:\n assert i+1 in self.has_link(links[0])\n perm.append(0)\n\n assert sum(perm)==3\n if perm!=[0,1,2]:\n self.transpose(i, perm)\n\n i = self.n-1\n links = linkss[i]\n assert len(links)==2\n idxs = self.has_link(links[1])\n if len(idxs)>1:\n self.transpose(i, [1, 0])\n links = linkss[i]\n assert len(self.has_link(links[0]))==2\n assert len(self.has_link(links[1]))==1\n\n mps = MPS(As, linkss)\n #print mps, linkss\n #mps.to_net().check()\n return mps\n\n def shrink_at(self, idxs, link0, link1, verbose=False):\n As = self.As\n linkss = self.linkss\n assert len(idxs)==2\n assert link0!=link1\n\n# print \"shrink_at\", idxs, link0, link1\n\n idx0, idx1 = idxs\n A0, A1 = As[idx0], As[idx1]\n links0, links1 = linkss[idx0], linkss[idx1]\n\n #self.check()\n i0 = links0.index(link0)\n j0 = links0.index(link1)\n perm = list(range(len(links0)))\n perm[0], perm[i0] = perm[i0], perm[0]\n perm[1], perm[j0] = perm[j0], perm[1]\n self.transpose(idx0, perm)\n# print \"transpose\", idx0, perm\n# self.dump()\n As[idx0] = As[idx0].copy()\n #print A0.shape, linkss[idx0]\n As[idx0].shape = (As[idx0].shape[0]*As[idx0].shape[1],)+As[idx0].shape[2:]\n linkss[idx0].remove(link1)\n #print A0.shape, linkss[idx0]\n\n i1 = links1.index(link0)\n j1 = links1.index(link1)\n perm = list(range(len(links1)))\n perm[0], perm[i1] = perm[i1], perm[0]\n perm[1], perm[j1] = perm[j1], perm[1]\n self.transpose(idx1, perm)\n# print \"transpose\", idx1, perm\n# self.dump()\n As[idx1] = As[idx1].copy()\n As[idx1].shape = (As[idx1].shape[0]*As[idx1].shape[1],)+As[idx1].shape[2:]\n linkss[idx1].remove(link1)\n\n# self.dump()\n assert self.check()\n\n def shrink(self, verbose=False): # TOO SLOW\n \"combine two legs into one\"\n links = {}\n for link in self.get_links():\n idxs = tuple(self.has_link(link))\n if links.get(idxs):\n self.shrink_at(idxs, links[idxs], link, verbose=verbose)\n else:\n links[idxs] = link\n\n def cleanup_link(self, i, link, verbose=False):\n As = self.As\n linkss = self.linkss\n A = As[i]\n links = linkss[i]\n while links.count(link)>=2:\n j = links.index(link)\n k = links.index(link, j+1)\n assert len(A.shape)==len(links)\n if verbose:\n print(\"cleanup_link\", A.shape, links, j, k)\n #print flatstr(A)\n #print(\"diagonal:\", A.shape, end=\" \")\n shape = diagonal_shape(A.shape, j, k)\n A = A.diagonal(axis1=j, axis2=k)\n #print(\" =\", A.shape)\n assert A.shape == shape\n #print flatstr(A)\n links.pop(k)\n links.pop(j)\n links.append(link)\n As[i] = A\n if verbose:\n print(\"\\t\", A.shape, links)\n print(flatstr(A))\n assert len(A.shape)==len(links)\n assert self.check()\n\n def cleanup(self, verbose=False):\n As = self.As\n linkss = self.linkss\n for i in range(self.n):\n A = As[i]\n links = linkss[i]\n for link in set(links):\n if links.count(link)>1:\n self.cleanup_link(i, link, verbose=verbose)\n for link in self.get_links():\n self.cleanup_freelegs(link, verbose=verbose)\n for i in range(self.n):\n A = As[i]\n links = linkss[i]\n assert len(set(links))==len(links)\n assert self.check()\n\n def cleanup_freelegs(self, link, verbose=False):\n As = self.As\n linkss = self.linkss\n\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n if len(idxs)==1:\n\n if verbose:\n print()\n print(\"cleanup_freelegs\", link)\n print(\"\\tidxs:\", idxs)\n\n idx = idxs[0]\n A = As[idx]\n links = linkss[idx]\n\n if verbose:\n print(\"\\tsum axis\", links.index(link))\n assert links.count(link)==1\n A = A.sum(links.index(link))\n #print flatstr(A)\n links.remove(link)\n As[idx] = A\n if verbose:\n print(\"\\t\", A.shape, links)\n\n assert self.check()\n\n def contract_1(self, link, verbose=False):\n As = self.As\n linkss = self.linkss\n\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n while len(idxs)>=3:\n if verbose:\n print()\n print(\"contract_1\", link)\n print(\"\\tidxs:\", idxs)\n\n A, B = As[idxs[-2]], As[idxs[-1]]\n assert A is not B\n links, minks = linkss[idxs[-2]], linkss[idxs[-1]]\n j, k = links.index(link), minks.index(link)\n # pad shape for broadcast\n #print \"A:\", flatstr(A), links, j\n #print \"B:\", flatstr(B), minks, k\n A.shape, B.shape = (\n A.shape[:j] + (1,)*k + A.shape[j:j+1] \\\n + (1,)*(len(B.shape)-k-1) + A.shape[j+1:],\n (1,)*j + B.shape + (1,)*(len(A.shape)-j-1))\n #print A.shape\n #print B.shape\n assert len(A.shape)==len(B.shape)\n\n #print(\"broadcast\", A.shape, B.shape, end=\" \")\n A = A*B # broadcast (inplace??)\n #print(\" = \", A.shape)\n links = links[:j] + minks + links[j+1:]\n #print \"A:\", flatstr(A), links\n assert len(A.shape)==len(links)\n\n As[idxs[-2]] = A\n linkss[idxs[-2]] = links\n\n As.pop(idxs[-1])\n linkss.pop(idxs[-1])\n self.n -= 1\n\n idxs.pop(-1)\n\n self.cleanup(verbose)\n\n assert self.check()\n\n def contract_2(self, link, cleanup=True, verbose=False):\n As = self.As\n linkss = self.linkss\n if verbose:\n print()\n print(\"contract_2\", link)\n\n #print linkss\n #for links in linkss:\n # try:\n # #link in links\n # link == links[0]\n # except:\n # print link, type(link[0][0])\n idxs = [i for i in range(self.n) if link in linkss[i]]\n\n assert len(idxs) in (0, 2)\n #assert len(idxs) <= 2\n if len(idxs)==2:\n A, B = As[idxs[-2]], As[idxs[-1]]\n assert A is not B\n links, minks = linkss[idxs[-2]], linkss[idxs[-1]]\n #print \"tensordot\", links, minks\n assert links.count(link)==1 # not necessary...?\n assert minks.count(link)==1 # not necessary...?\n\n if 0:\n C = numpy.outer(A, B)\n C.shape = A.shape + B.shape\n \n As[idxs[-2]] = C\n linkss[idxs[-2]] = links + minks\n assert len(C.shape)==len(links+minks)\n \n As.pop(idxs[-1])\n linkss.pop(idxs[-1])\n self.n -= 1\n \n idxs.pop(-1)\n \n self.cleanup(verbose)\n \n return # <--------\n\n try:\n C = numpy.tensordot(A, B,\n ([links.index(link)], [minks.index(link)]))\n except MemoryError:\n print(\"\\ntensordot fail:\", len(A.shape), \"+\", len(B.shape))\n raise\n\n links.remove(link)\n minks.remove(link)\n As[idxs[-2]] = C\n linkss[idxs[-2]] = links + minks\n assert len(C.shape)==len(links+minks)\n\n As.pop(idxs[-1])\n linkss.pop(idxs[-1])\n self.n -= 1\n\n idxs.pop(-1)\n\n if cleanup:\n #print links, minks\n self.cleanup(verbose) # XXX too slow XXX\n\n assert self.check()\n\n def contract_scalars(self, verbose=False):\n r = 1.\n for A in self.As:\n assert A.shape==()\n r *= A[()]\n self.As = [scalar(r)]\n self.linkss = [[]]\n self.n = 1\n if verbose:\n self.dump()\n\n def contract_easy(self):\n didit = False\n while 1:\n links = []\n for i in range(len(self)):\n if len(self.linkss[i])<=2:\n link = self.linkss[i][0]\n links.append(link)\n if not links:\n break\n \n #print \"links\", len(links)\n for link in links:\n self.contract_2(link, False)\n write('c')\n didit = True\n\n for i in range(len(self)):\n assert len(self.linkss[i])>2\n return didit\n\n #def contract_shortest(self):\n\n def contract_upto(self, max_weight, verbose=False):\n didit = False\n while 1:\n for link in self.get_links():\n idxs = self.has_link(link)\n w = sum(len(self.As[idx].shape) for idx in idxs)\n if w <= max_weight:\n break\n else:\n break\n self.contract_2(link, verbose=verbose)\n write('u')\n didit = True\n return didit\n\n def contract_all(self, verbose=False):\n if verbose:\n self.dump()\n links = self.get_links()\n for link in links:\n #if link in skip:\n # continue\n #break\n if verbose:\n self.dump()\n self.contract_1(link, verbose=verbose)\n self.cleanup(verbose)\n links = self.get_links()\n for link in links:\n #if link in skip:\n # continue\n if verbose:\n self.dump()\n self.contract_2(link, verbose=verbose)\n #print([A.shape for A in self.As])\n if verbose:\n self.dump()\n #if self.get_links():\n # self.contract_all_slow(verbose)\n if len(self)>1:\n self.contract_scalars(verbose)\n assert len(self)==1\n\n def contract_slow(self, link, verbose=False):\n As = self.As\n linkss = self.linkss\n idxs = [i for i in range(self.n) if link in linkss[i]]\n if verbose:\n print(\"contract_slow\", link, idxs)\n if not idxs:\n return\n As = [A for A, links in self if link in links]\n linkss = [links for A, links in self if link in links]\n links = list(linkss[0])\n A = As[0]\n i = 0\n while i+11:\n self.contract_scalars(verbose)\n\n def contract_all_slow_slow(self, verbose=False):\n As = list(self.As)\n linkss = self.linkss\n links = list(linkss[0])\n i = 0\n while i+1 4194304:\n assert 0\n\n v = oe.contract(*args)\n #print(\"contract_oe\", v.shape)\n assert v.shape == ()\n return v[()]\n\n\n\ndef build_test():\n\n if 0:\n n = 3\n k = 2\n shape = (2,)*k\n As = [numpy.random.normal(0., 1., shape) for i in range(n)]\n #As = [2.*numpy.random.binomial(1, 0.5, shape) for i in range(n)]\n #linkss = [range(k) for i in range(n)]\n linkss = [[0, 1], [1, 2], [2, 0]]\n\n elif 0:\n n = 2\n k = 3\n shape = (2,)*k\n As = [numpy.random.normal(0., 1., shape) for i in range(n)]\n #As = [2.*numpy.random.binomial(1, 0.5, shape) for i in range(n)]\n linkss = [list(range(k)) for i in range(n)]\n #linkss = [[0, 1], [1, 2], [2, 0]]\n\n elif 0:\n n = 1\n k = 4\n shape = (2,)*k\n As = [numpy.random.normal(0., 1., shape) for i in range(n)]\n #As = [2.*numpy.random.binomial(1, 0.5, shape) for i in range(n)]\n linkss = [list(range(k/2))*2]\n\n n = randint(1, 5)\n As = []\n linkss = []\n for i in range(n):\n k = randint(1, 5)\n shape = (2,)*k\n A = numpy.random.normal(0., 1., shape)\n links = [randint(0, 2*n) for _ in range(k)]\n As.append(A)\n linkss.append(links)\n\n net = TensorNetwork(As, linkss)\n return net\n\n\ndef test_net():\n\n #numpy.random.seed(0)\n #seed(0)\n\n #verbose = True\n verbose = False\n\n for trial in range(100):\n\n net = build_test()\n net1 = net.clone()\n #net2 = net.clone()\n\n idx = randint(0, net.n-1)\n perm = list(range(len(net.linkss[idx])))\n shuffle(perm)\n net.transpose(idx, perm)\n \n net.contract_all(verbose=verbose)\n #net.contract_2(0, verbose=verbose)\n #net.cleanup(verbose)\n \n net.contract_all_slow(verbose=verbose)\n \n #print \"~~~~~~~~~~~~~~~~~\\n\"\n \n net1.contract_all_slow(verbose=verbose)\n #net2.contract_all_slow_slow(verbose=verbose)\n\n #net1.dump()\n \n #print net.value\n #print net1.value\n #print net2.value\n \n assert abs(net.value-net1.value)<1e-8\n write('.')\n print(\"OK\")\n\n\ndef test_gauge():\n\n from qupy.ldpc.gcolor import Lattice\n\n n = int(sys.argv[1])\n lattice = Lattice(n)\n print(lattice)\n code = lattice.build_code()\n\n print(code.weightsummary())\n print(code.weightstr())\n\n #print shortstr(code.Gx)\n\n Gz, Gx = code.Gz, code.Gx\n mz, n = Gz.shape\n mx, _ = Gx.shape\n\n net = TensorNetwork()\n\n # Gx nodes flip qubits\n for i in range(mx):\n write('x')\n\n g = Gx[i]\n links = [('x', i, int(j)) for j in numpy.where(g)[0]]\n w = g.sum()\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n\n # delta\n A[(0,)*w] = 1.\n A[(1,)*w] = 1.\n\n net.append(A, links)\n\n # qubit nodes: sum all bitflips\n for i in range(n):\n write('q')\n g = Gx[:, i]\n links = [('x', int(j), i) for j in numpy.where(g)[0]]\n links.append(i)\n w = g.sum()\n shape = (2,)*w\n A = numpy.zeros(shape+(2,), dtype=scalar)\n\n for idx in genidx(shape):\n idx = idx + (sum(idx)%2,)\n A[idx] = 1.\n\n net.append(A, links)\n\n # phase nodes\n for i in range(mz):\n write('z')\n\n g = Gz[i]\n links = [('z', i, int(j)) for j in numpy.where(g)[0]]\n w = g.sum()\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n\n for idx in genidx(shape):\n c = sum(idx)%2\n A[idx] = -1. if c else 1.\n\n net.append(A, links)\n\n for i in range(n):\n write('q')\n g = Gz[:, i]\n links = [('z', int(j), i) for j in numpy.where(g)[0]]\n links.append(i)\n w = g.sum()+1\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n\n # delta\n A[(0,)*w] = 1.\n A[(1,)*w] = 1.\n\n net.append(A, links)\n\n net.dump()\n\n corners = []\n for i in range(n):\n g = Gx[:, i]\n if g.sum()==3:\n corners.append(i)\n print(\"corners:\", corners)\n\n for i in corners:\n i = mx + i\n links = net.linkss[i]\n print(\"paint:\", links)\n for link in links:\n net.paint(link)\n break\n\n mark = net.mark\n links = list(mark.keys())\n links.sort(key = lambda link : mark[link])\n print([(link, mark[link]) for link in links])\n values = list(mark.values())\n values = list(set(values))\n values.sort()\n\n #for link in links:\n for value in reversed(values):\n links = [link for link in mark if mark[link]==value]\n while links:\n links.sort(key = net.cost)\n link = links.pop(0)\n c = net.cost(link)\n if c:\n print(\"contract_2:\", c, value)\n if c<=22:\n net.contract_2(link)\n if c:\n print(net, len(net))\n\n net.todot('net.dot')\n\n for i in range(len(net)):\n n = reduce(mul, net.As[i].shape)\n A = numpy.abs(net.As[i])\n print(i, n, A.sum())\n\n B = SparseTensor.fromdense(A)\n\n return\n\n net.contract_easy()\n net.contract_upto(8)\n net.contract_upto(12)\n net.contract_upto(16)\n for w in range(20, 35):\n net.contract_upto(w)\n print(net, len(net))\n #net.contract_all()\n\n print()\n #net.dump()\n print(net, len(net))\n\n\nclass ExactDecoder(object):\n \"Tensor network decoder. Computes exact probabilities.\"\n \"See OEDecoder for a faster version (smarter contractions.)\"\n def __init__(self, code):\n self.code = code\n assert code.k <= 24, \"too big...?\"\n self.logops = list(span(code.Lx))\n\n def get_p(self, p, op, verbose=False):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n net = TensorNetwork()\n\n # one tensor for each qubit\n for i in range(n):\n h = Hx[:, i]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = numpy.where(h)[0]\n\n opi = op[i]\n\n for idx in genidx(shape):\n if sum(idx)%2 == opi:\n A[idx] = 1.-p # qubit is off\n else:\n A[idx] = p # qubit is on\n\n net.append(A, links)\n\n net.contract_all(verbose)\n #net.contract_all_slow(verbose)\n\n return net.value\n\n def get_p_slow(self, p, op, verbose=False):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n r = 0\n for idx in genidx((2,)*mx):\n h = op.copy()\n for i in range(mx):\n if idx[i]:\n h += Hx[i]\n h %= 2\n w = h.sum()\n r += (p**w)*((1.-p)**(n-w))\n\n return r\n\n def decode(self, p, err_op, argv=None, verbose=False, **kw):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n T = code.get_T(err_op)\n\n #if T.sum()==0:\n # return T\n\n dist = []\n\n #print\n best = None\n best_r = 0.\n for logop in self.logops:\n op = (T+logop)%2\n r = self.get_p(p, op, verbose=verbose)\n #r1 = self.get_p_slow(p, op, verbose=verbose)\n #print \"%.6f\"%r, \"%.6f\"%r1\n if r>best_r:\n best_r = r\n best = op\n dist.append(r)\n\n #print(dist)\n return best\n\n\nclass OEDecoder(ExactDecoder):\n \"Faster version of ExactDecoder \"\n def __init__(self, code):\n self.code = code\n assert code.k <= 24, \"too big...?\"\n self.logops = list(span(code.Lx))\n self.n = code.n\n self.build()\n\n def build(self):\n import opt_einsum as oe # pip3 install opt_einsum\n\n #from opt_einsum.backends.dispatch import _has_einsum\n #_has_einsum['numpy'] = False\n\n code = self.code\n #Hz = code.Hz\n #Tx = code.Tx\n Hx = code.Hx\n #Lx = code.Lx\n n = code.n\n mx = code.mx\n\n net = []\n As = []\n linkss = []\n\n # one tensor for each qubit\n for i in range(n):\n h = Hx[:, i]\n w = h.sum()\n assert w<20, \"ouch: w=%d\"%w\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n As.append(A)\n links = list(numpy.where(h)[0])\n linkss.append(links)\n net.append((A, links))\n #print(A.shape, links)\n #print(linkss)\n\n self.net = net\n self.linkss = linkss\n self.As = As\n self.check()\n\n kw = {\"optimize\" : \"random-greedy\"}\n\n str_args = []\n shapes = []\n for A, links in net:\n links = ''.join(oe.get_symbol(i) for i in links)\n #print(A.shape, links)\n str_args.append(links)\n shapes.append(A.shape)\n #print(shapes)\n #print(linkss)\n str_args = ','.join(str_args)\n #print(str_args)\n path, path_info = oe.contract_path(str_args, *As, **kw)\n #print(path_info)\n sz = path_info.largest_intermediate\n print(\"OEDecoder: size=%d\" % sz)\n\n# if sz > 4194304:\n if sz > 134217728:\n assert 0, \"too big... maybe\"\n\n self.do_contract = oe.contract_expression(str_args, *shapes, **kw)\n\n def get_links(self):\n links = []\n for _links in self.linkss:\n links += _links\n links = list(set(links))\n links.sort()\n return links\n\n def has_link(self, link): # TOO SLOW\n linkss = self.linkss\n idxs = [i for i in range(self.n) if link in linkss[i]]\n return idxs\n\n def check(self):\n As = self.As\n linkss = self.linkss\n assert len(As)==len(linkss)\n for A, links in self.net:\n assert len(A.shape)==len(links), ((A.shape), (links))\n for link in self.get_links():\n idxs = self.has_link(link)\n shape = [As[idx].shape[linkss[idx].index(link)] for idx in idxs]\n assert len(set(shape))==1, shape\n return True\n\n do_contract = None\n def contract_oe(self):\n if self.do_contract is None:\n import opt_einsum as oe\n args = []\n for A, links in self.net:\n args.append(A)\n args.append(links)\n links = ''.join(oe.get_symbol(i) for i in links)\n \n v = oe.contract(*args)\n #print(\"contract_oe\", v.shape)\n\n else:\n v = self.do_contract(*self.As)\n\n assert v.shape == ()\n return v[()]\n\n t0 = 0.\n t1 = 0.\n def get_p(self, p, op, verbose=False):\n code = self.code\n Hx = code.Hx\n n = code.n\n mx = code.mx\n\n t0 = time()\n \n # one tensor for each qubit\n for i in range(n):\n h = Hx[:, i]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A, links = self.net[i]\n opi = op[i]\n\n for idx in genidx(shape):\n if sum(idx)%2 == opi:\n A[idx] = 1.-p # qubit is off\n else:\n A[idx] = p # qubit is on\n\n t1 = time()\n\n value = self.contract_oe()\n\n t2 = time()\n\n self.t0 += t1-t0\n self.t1 += t2-t1\n\n #write(\".\")\n return value\n\n def fini(self):\n print(\"\\nOEDecoder.t0 =\", self.t0)\n print(\"OEDecoder.t1 =\", self.t1)\n\n\n\nclass MPSDecoder(ExactDecoder):\n def __init__(self, code, khi=4):\n self.code = code\n assert code.k <= 24, \"too big...?\"\n self.logops = list(span(code.Lx))\n self.khi = khi\n print(\"khi:\", khi)\n\n def get_p(self, p, op, verbose=False):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n #print \"build\"\n net = TensorNetwork()\n\n # one tensor for each qubit\n for i in range(n):\n h = Hx[:, i]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [(j,i) for j in numpy.where(h)[0]]\n\n opi = op[i]\n\n for idx in genidx(shape):\n if sum(idx)%2 == opi:\n A[idx] = 1.-p # qubit is off\n else:\n A[idx] = p # qubit is on\n\n net.append(A, links)\n\n # one tensor for each stabilizer\n for j in range(mx):\n h = Hx[j, :]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [(j,i) for i in numpy.where(h)[0]]\n\n A[(0,)*w] = 1.\n A[(1,)*w] = 1.\n\n net.append(A, links)\n\n #net1 = net.clone()\n net1 = None\n\n #print(\"net.dot\")\n #net.todot(\"net.dot\")\n\n links = []\n for i in range(code.l):\n\n idx = code.keymap[i, 0, 0]\n assert len(net.linkss[idx])==1\n link = net.linkss[idx][0]\n links.append(link)\n idx = code.keymap[i, code.l-1, 0]\n assert len(net.linkss[idx])==1\n link = net.linkss[idx][0]\n links.append(link)\n\n for j in range(1, code.l-1):\n idx = code.keymap[i, j, 0]\n assert len(net.linkss[idx])==2\n link = net.linkss[idx][0]\n links.append(link)\n\n for i in range(code.l-1):\n for j in range(code.l-1):\n idx = code.keymap[i, j, 1]\n assert len(net.linkss[idx])==2\n link = net.linkss[idx][0]\n links.append(link)\n\n #print \"links\", len(links)\n for link in links:\n net.contract_2(link, False)\n\n# net.todot(\"net1.dot\")\n #print \"corners\"\n\n corners = [idx for idx in range(len(net)) if len(net.linkss[idx])==2]\n assert len(corners)==4\n\n idx0 = corners[0]\n edge = [idx0]\n done = False\n while not done:\n idxs = net.neighbours(edge[-1])\n for i in idxs:\n sz = len(net.As[i].shape)\n if sz<=3 and i not in edge:\n edge.append(i)\n if sz==2:\n done = True\n break\n\n mps = net.select(edge)\n links = []\n for link in mps.get_links():\n if len(mps.has_link(link))==1:\n links.append(link)\n\n #mps.todot(\"net2.dot\")\n #mps = mps.to_mps()\n\n for link in links:\n net.contract_2(link, False)\n\n #net.todot(\"net3.dot\")\n\n net.shrink() # TOO SLOW\n\n# net.todot(\"net3.dot\")\n\n while len(net)>code.l:\n \n corners = [idx for idx in range(len(net)) if len(net.linkss[idx])==2]\n assert len(corners)==4\n \n for idx in corners:\n A = net.As[idx]\n if max(A.shape)>2:\n break\n else:\n assert 0\n \n edge = [idx]\n done = False\n while not done:\n idxs = net.neighbours(edge[-1])\n for i in idxs:\n shape = net.As[i].shape\n if len(shape)<=3 and i not in edge and max(shape)>2:\n edge.append(i)\n if len(shape)==2:\n done = True\n break\n \n mps = net.select(edge)\n links = []\n for link in mps.get_links():\n if len(mps.has_link(link))==1:\n links.append(link)\n \n #mps.todot(\"net2.dot\")\n mps = mps.to_mps()\n #print mps\n #print mps#, mps.linkss\n if mps.get_khi()>self.khi:\n #print \"khi\", mps.get_khi(), \"->\", self.khi\n mps = mps.truncate(self.khi)\n #print \"khi\", mps.get_khi()\n #print mps#, mps.linkss\n #print\n mps = mps.to_net()\n net.paste(edge, mps)\n \n for link in links:\n net.contract_2(link, False)\n \n# net.todot(\"net4.dot\")\n \n net.shrink() # TOO SLOW\n \n# net.todot(\"net5.dot\")\n write('.')\n\n #net.dump()\n net.contract_all(verbose)\n #print \"OK\"\n #net.contract_all_slow(verbose)\n\n if net1:\n net1.contract_all()\n assert abs(net.value-net1.value)<1e-6\n print(net.value, net1.value)\n\n return net.value\n\n\nclass BigLogopDecoder(object):\n \"\"\"\n An exact decoder that builds a tensor network\n that represents the distribution over the logical operators.\n \"\"\"\n\n def __init__(self, code):\n self.code = code\n assert code.k <= 24, \"too big...?\"\n\n def get_dist(self, p, op, verbose=False):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n k = code.k\n mx = code.mx\n\n HLx = numpy.concatenate((Hx, Lx))\n\n #print()\n #print(HLx)\n\n #print \"build\"\n net = []\n\n # one tensor for each qubit\n for i in range(n):\n h = HLx[:, i]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [(j, i) for j in numpy.where(h)[0]]\n\n opi = op[i]\n\n for idx in genidx(shape):\n if sum(idx)%2 == opi:\n A[idx] = 1.-p # qubit is off\n else:\n A[idx] = p # qubit is on\n\n net.append((A, links))\n\n contract = []\n\n # one tensor for each stabilizer\n for j in range(mx):\n h = HLx[j, :]\n w = h.sum()\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [(j, i) for i in numpy.where(h)[0]]\n\n A[(0,)*w] = 1.\n A[(1,)*w] = 1.\n\n net.append((A, links))\n contract += links\n\n # one tensor for each logop\n free = []\n for j in range(mx, mx+k):\n h = HLx[j, :]\n w = h.sum()+1 # add one free leg\n assert w<20, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [(j, i) for i in numpy.where(h)[0]]\n link = \"l%d\"%(j-mx)\n links.append(link)\n free.append(link)\n \n #print(\"logop\", links[-1])\n\n A[(0,)*w] = 1.\n A[(1,)*w] = 1.\n\n net.append((A, links))\n contract += links\n\n contract = list(set(contract))\n\n all_links = []\n for A, links in net:\n assert len(links)==len(A.shape)\n all_links += links\n #for link in all_links:\n # print(all_links.count(link), end=\" \")\n #print()\n all_links = list(set(all_links))\n all_links.sort(key = str)\n lookup = dict((link, idx) for (idx, link) in enumerate(all_links))\n\n As = [A for (A, links) in net]\n linkss = [links for (A, links) in net]\n\n if 0:\n tn = TensorNetwork(As, linkss)\n #tn.dump()\n total = 0.\n idxs = {}\n for vec in genidx((2,)*len(all_links)):\n for i, link in enumerate(all_links):\n idxs[link] = vec[i]\n val = tn.get(idxs)\n #if val > 0.:\n # assert idxs['l0'] == 0\n # assert idxs['l1'] == 0\n # print(\"val:\", val)\n if idxs['l0'] == 1 and idxs['l1'] == 0:\n total += val\n print(\"total:\", total)\n \n #tn = tn.clone()\n #tn.todot(\"net.dot\")\n #tn.contract_all(skip=free)\n #print(tn.As)\n assert 0\n\n #print(\"links:\", len(all_links))\n\n import opt_einsum as oe\n args = []\n str_args = []\n for A, links in net:\n args.append(A)\n links = ''.join(oe.get_symbol(lookup[i]) for i in links)\n #print(A.shape, links)\n args.append(links)\n str_args.append(links)\n assert len(links) == len(A.shape)\n\n free = ''.join(oe.get_symbol(lookup[i]) for i in free)\n args.append(free)\n str_args = ','.join(str_args) + '->' + free\n\n #v = oe.contract(*args)\n #print(str_args)\n path, path_info = oe.contract_path(str_args, *As)\n #print(path_info)\n sz = path_info.largest_intermediate\n print(\"OEDecoder: size=%d\" % sz)\n\n if sz>4194304:\n assert 0, \"ugh, too big\"\n\n v = oe.contract(str_args, *As)\n #print(\"contract_oe\", v.shape)\n\n return v\n\n def decode(self, p, err_op, argv=None, verbose=False, **kw):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n T = code.get_T(err_op)\n\n #if T.sum()==0:\n # return T\n\n dist = self.get_dist(p, T, verbose=verbose)\n\n # XXX find max in dist \n\n print(\"...NOT FINISHED...\")\n\n return T\n\n\n\nclass LogopDecoder(object):\n \"\"\"\n An exact decoder that builds a tensor network\n that represents the distribution over the logical operators.\n This one uses less tensors than BigLogopDecoder.\n \"\"\"\n\n def __init__(self, code):\n self.code = code\n assert code.k <= 26, \"too big...?\"\n\n def get_dist(self, p, op, verbose=False):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n k = code.k\n mx = code.mx\n\n HLx = numpy.concatenate((Hx, Lx))\n\n #print()\n #print(HLx)\n\n #print \"build\"\n net = []\n #free = list(range(mx, mx+k))\n free = []\n\n # one tensor for each qubit\n for i in range(n):\n h = HLx[:, i] # all the incident check operators\n w = h.sum()\n assert w<24, \"ouch\"\n shape = (2,)*w\n A = numpy.zeros(shape, dtype=scalar)\n links = [j for j in numpy.where(h)[0]]\n\n opi = op[i]\n\n for idx in genidx(shape):\n if sum(idx)%2 == opi:\n A[idx] = 1.-p # qubit is off\n else:\n A[idx] = p # qubit is on\n\n net.append((A, links))\n\n for j in range(mx, mx+k):\n A = numpy.zeros((2,2), dtype=scalar)\n A[0, 0] = 1.\n A[1, 1] = 1.\n idx = j+k\n free.append(idx)\n net.append((A, [j, idx]))\n\n all_links = []\n for A, links in net:\n assert len(links)==len(A.shape)\n all_links += links\n #for link in all_links:\n # print(all_links.count(link), end=\" \")\n #print()\n all_links = list(set(all_links))\n all_links.sort(key = str)\n lookup = dict((link, idx) for (idx, link) in enumerate(all_links))\n\n As = [A for (A, links) in net]\n linkss = [links for (A, links) in net]\n\n if 0:\n tn = TensorNetwork(As, linkss)\n #tn.dump()\n total = 0.\n idxs = {}\n for vec in genidx((2,)*len(all_links)):\n for i, link in enumerate(all_links):\n idxs[link] = vec[i]\n val = tn.get(idxs)\n #if val > 0.:\n # assert idxs['l0'] == 0\n # assert idxs['l1'] == 0\n # print(\"val:\", val)\n if idxs['l0'] == 1 and idxs['l1'] == 0:\n total += val\n print(\"total:\", total)\n \n #tn = tn.clone()\n #tn.todot(\"net.dot\")\n #tn.contract_all(skip=free)\n #print(tn.As)\n assert 0\n\n #print(\"links:\", len(all_links))\n\n import opt_einsum as oe\n args = []\n str_args = []\n for A, links in net:\n args.append(A)\n links = ''.join(oe.get_symbol(lookup[i]) for i in links)\n #print(A.shape, links)\n args.append(links)\n str_args.append(links)\n assert len(links) == len(A.shape)\n\n free = ''.join(oe.get_symbol(lookup[i]) for i in free)\n args.append(free)\n str_args = ','.join(str_args) + '->' + free\n\n #v = oe.contract(*args)\n #print(str_args)\n path, path_info = oe.contract_path(str_args, *As)\n #print(path_info)\n sz = path_info.largest_intermediate\n print(\"(size=%d)\" % (sz,), end='', flush=True)\n\n# if sz>33554432:\n if sz>268435456:\n assert 0, \"ugh, too big\"\n\n v = oe.contract(str_args, *As)\n #print(\"contract_oe\", v.shape)\n\n return v\n\n def decode(self, p, err_op, argv=None, verbose=False, **kw):\n code = self.code\n Hz = code.Hz\n Tx = code.Tx\n Hx = code.Hx\n Lx = code.Lx\n n = code.n\n mx = code.mx\n\n T = code.get_T(err_op)\n\n #if T.sum()==0:\n # return T\n\n dist = self.get_dist(p, T, verbose=verbose)\n\n #print(dist.shape)\n #print(dist)\n\n best_idx = None\n best_v = 0.\n for idx in genidx(dist.shape):\n val = dist[idx]\n if val > best_v:\n best_idx = idx\n best_v = val\n\n #print(\"best:\", best_idx)\n\n best_idx = array2(best_idx)\n op = dot2(best_idx, Lx)\n op = (T+op) % 2\n\n return op\n\n\n\n\n\nif __name__==\"__main__\":\n\n test_mps()\n test_net()\n #test_gauge()\n\n\n\n", "repo_name": "punkdit/qupy", "sub_path": "qupy/ldpc/mps.py", "file_name": "mps.py", "file_ext": "py", "file_size_in_byte": 74297, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "math.sqrt", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "qupy.abstract.genidx", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.inner", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.linalg.svd", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 146, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 185, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.linalg.qr", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 289, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 290, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 322, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 326, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 351, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 362, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 384, "usage_type": "attribute"}, {"api_name": "qupy.dense.is_close", "line_number": 390, "usage_type": "call"}, {"api_name": "qupy.dense.is_close", "line_number": 396, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.dot2", "line_number": 439, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.zeros2", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 449, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 451, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 452, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 453, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.dot2", "line_number": 462, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 465, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 469, "usage_type": "call"}, {"api_name": "qupy.argv.argv.get", "line_number": 480, "usage_type": "call"}, {"api_name": "qupy.argv.argv", "line_number": 480, "usage_type": "name"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 483, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 523, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 524, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 535, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 564, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.dot2", "line_number": 590, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.shortstr", "line_number": 592, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 598, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 601, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 605, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 607, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 617, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 621, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 626, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.shortstr", "line_number": 631, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 637, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.shortstr", "line_number": 653, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.shortstr", "line_number": 656, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 662, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 712, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 713, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 714, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 714, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 788, "usage_type": "call"}, {"api_name": "operator.mul", "line_number": 788, "usage_type": "argument"}, {"api_name": "random.shuffle", "line_number": 999, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 1008, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 1441, "usage_type": "call"}, {"api_name": "numpy.tensordot", "line_number": 1459, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1508, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1528, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 1574, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1588, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 1589, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 1631, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1645, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 1646, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 1708, "usage_type": "call"}, {"api_name": "opt_einsum.contract_path", "line_number": 1712, "usage_type": "call"}, {"api_name": "opt_einsum.contract", "line_number": 1720, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 1733, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1733, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 1742, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1742, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 1751, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1751, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 1755, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 1759, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 1761, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 1761, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 1762, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 1784, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 1786, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1807, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 1815, "usage_type": "attribute"}, {"api_name": "qupy.ldpc.gcolor.Lattice", "line_number": 1816, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1833, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1836, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1839, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1849, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1851, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1855, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 1857, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1865, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1868, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1871, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 1873, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 1880, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1882, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1886, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 1936, "usage_type": "call"}, {"api_name": "operator.mul", "line_number": 1936, "usage_type": "argument"}, {"api_name": "numpy.abs", "line_number": 1937, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.span", "line_number": 1964, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1983, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 1984, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 1988, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2011, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.span", "line_number": 2060, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2088, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2090, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2106, "usage_type": "call"}, {"api_name": "opt_einsum.contract_path", "line_number": 2114, "usage_type": "call"}, {"api_name": "opt_einsum.contract_expression", "line_number": 2123, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2158, "usage_type": "call"}, {"api_name": "opt_einsum.contract", "line_number": 2160, "usage_type": "call"}, {"api_name": "time.time", "line_number": 2177, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2188, "usage_type": "call"}, {"api_name": "time.time", "line_number": 2194, "usage_type": "call"}, {"api_name": "time.time", "line_number": 2198, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.span", "line_number": 2216, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2238, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2239, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2243, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2257, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2258, "usage_type": "call"}, {"api_name": "qupy.ldpc.tool.write", "line_number": 2388, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2423, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2437, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2438, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2442, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2458, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2459, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2474, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2475, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2509, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2534, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2540, "usage_type": "call"}, {"api_name": "opt_einsum.contract_path", "line_number": 2546, "usage_type": "call"}, {"api_name": "opt_einsum.contract", "line_number": 2554, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 2604, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2620, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 2621, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2625, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2634, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2660, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2685, "usage_type": "call"}, {"api_name": "opt_einsum.get_symbol", "line_number": 2691, "usage_type": "call"}, {"api_name": "opt_einsum.contract_path", "line_number": 2697, "usage_type": "call"}, {"api_name": "opt_einsum.contract", "line_number": 2706, "usage_type": "call"}, {"api_name": "qupy.abstract.genidx", "line_number": 2732, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.array2", "line_number": 2740, "usage_type": "call"}, {"api_name": "qupy.ldpc.solve.dot2", "line_number": 2741, "usage_type": "call"}]} +{"seq_id": "40731854854", "text": "import requests\nimport json\nfrom ncclient import manager\nimport xmltodict\nimport xml.dom.minidom\nfrom pprint import pprint as pp\nfrom rich import print\n\nios_xe_host = \"172.16.100.146\"\nios_xe_port = 830\nios_xe_username = \"cisco\"\nios_xe_password = \"cisco\"\n\nwith manager.connect(\n host=ios_xe_host,\n port=ios_xe_port,\n username=ios_xe_username,\n password=ios_xe_password,\n hostkey_verify=False,\n look_for_keys=False\n) as m:\n # m = manager.connect(\n # host=ios_xe_host,\n # port=ios_xe_port,\n # username=ios_xe_username,\n # password=ios_xe_password,\n # hostkey_verify=False,\n # look_for_keys=False\n # )\n\n netconf_filter = \"\"\"\n \n \n \n GigabitEthernet1\n \n \n \"\"\"\n\n for capability in m.server_capabilities:\n print(\"*\" * 50)\n print(capability)\n print('Connected')\n interface_netconf = m.get_config('running', netconf_filter)\n print('getting running config')\n xmlDom = xml.dom.minidom.parseString(str(interface_netconf))\n print(xmlDom.toprettyxml(indent=\" \"))\n print('*' * 25 + \"Break \" + \"*\" * 50)\n interface_python = xmltodict.parse(interface_netconf.xml)['rpc-reply']['data']\n name = interface_python['interfaces']['interface']['name']['#text']\n intf_ip = interface_python['interfaces']['interface']['ipv4']['address']['ip']\n subnet = interface_python['interfaces']['interface']['ipv4']['address']['netmask']\n print(f\"[yellow]{name}[/yellow] ====> {intf_ip} {subnet}\")\n# m.close_session()", "repo_name": "aramidetosin/ciscoDevnet", "sub_path": "netconfdemo.py", "file_name": "netconfdemo.py", "file_ext": "py", "file_size_in_byte": 1692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ncclient.manager.connect", "line_number": 14, "usage_type": "call"}, {"api_name": "ncclient.manager", "line_number": 14, "usage_type": "name"}, {"api_name": "rich.print", "line_number": 41, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 42, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 43, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 45, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom.minidom.parseString", "line_number": 46, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom", "line_number": 46, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom", "line_number": 46, "usage_type": "name"}, {"api_name": "rich.print", "line_number": 47, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 48, "usage_type": "call"}, {"api_name": "xmltodict.parse", "line_number": 49, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "20653506651", "text": "#\n# Arquivo de exemplo para uso da classe timedeltas\n#\n\nfrom datetime import date\nfrom datetime import time\nfrom datetime import datetime\nfrom datetime import timedelta\n\ndef quantosFaltam(ano, mes, dia):\n hoje = date.today()\n dataProcurada = date(ano,mes,dia)\n if dataProcurada > hoje:\n data = dataProcurada - hoje\n mensagem = \"Faltam \"+ str(data).replace(\"days, 0:00:00\",\"\")+\"dias\"\n return mensagem\n elif dataProcurada < hoje:\n d = dataProcurada - hoje\n data = str(d).replace(\"days, 0:00:00\", \"\")\n mensagem = \"Passaram \" + data.replace(\"-\",\"\")+\"dias\"\n return mensagem\n else:\n mensagem = \"A data é hoje!\"\n return mensagem\n \nprint(quantosFaltam(2022,4,13))\n", "repo_name": "dbdjr/PythonBasics", "sub_path": "Cap. 03/exemploDeltas_start.py", "file_name": "exemploDeltas_start.py", "file_ext": "py", "file_size_in_byte": 739, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.date.today", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 11, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "71438552484", "text": "import os.path\nfrom os import listdir\nimport pandas as pd\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\ndef add_age(df):\n \"\"\"Add an age column to the pandas DataFrame, based on date of birth.\"\"\"\n new_df = df.copy()\n ref_date = pd.to_datetime(2021, format=\"%Y\")\n new_df[\"AGE\"] = (ref_date - pd.to_datetime(df.DOB)).astype('timedelta64[Y]')\n return new_df\n\nclass BloodSmearDataset(Dataset):\n \"\"\"This dataset includes the preprocessed blood smears of a list of subjects.\"\"\"\n \n def __init__(self,\n img_dir,\n data_df,\n transform=None,\n age_stats=(68.77, 17.60),\n lymph_count_stats=(26.42, 46.64)\n ):\n \"\"\"\n Args:\n img_dir: (str) path to the images directory.\n data_df: (DataFrame) list of subjects / sessions used.\n transform: Optional, transformations applied to the images.\n age_stats: (tuple) Optional, mean and std of age for normalisation.\n lymph_count_stats: (tuple) Optional, mean and std of lymphocytes count for normalisation.\n \"\"\"\n if 'AGE' not in data_df.columns:\n data_df = add_age(data_df)\n self.img_dir = img_dir\n self.data_df = data_df\n self.transform = transform\n self.age_stats = age_stats\n self.lymph_count_stats = lymph_count_stats\n self.gender_code = {'F': 1, 'f': 1, 'M': -1, 'm': -1}\n \n def load_images(self, patient_id):\n patient_dir = os.path.join(self.img_dir, patient_id)\n num_images = len(os.listdir(patient_dir))\n filenames = [os.path.join(patient_dir, '%06d.jpg' % i) for i in range(num_images)]\n images = [Image.open(filename).convert('RGB') for filename in filenames]\n return images\n \n def __len__(self):\n return len(self.data_df)\n \n def __getitem__(self, idx):\n \"\"\"\n Args:\n idx: (int) the index of the subject/session whose data is loaded.\n Returns:\n sample: (dict) corresponding data described by the following keys:\n images: (list) list of blood smear images after applying transform\n label: (int) the diagnosis code (0 for healthy or 1 for lymphocytosis)\n patient_id: (str) ID of the patient (format P...)\n gender: (int) gender of the patient (0 for male or 1 for female)\n age: (int) age of the patient\n lymph_count: (float) absolute number of lymphocytes found in the patient\n \"\"\"\n label = self.data_df.iloc[idx].LABEL\n patient_id = self.data_df.iloc[idx].ID\n gender_str = self.data_df.iloc[idx].GENDER\n gender = self.gender_code[gender_str]\n age = self.data_df.iloc[idx].AGE\n lymph_count = self.data_df.iloc[idx].LYMPH_COUNT\n images = self.load_images(patient_id)\n\n # Applying transforms\n if self.transform is not None:\n images = [self.transform(image) for image in images]\n if self.age_stats is not None:\n age = (age - self.age_stats[0]) / self.age_stats[1]\n if self.lymph_count_stats is not None:\n lymph_count = (lymph_count - self.lymph_count_stats[0]) / self.lymph_count_stats[1]\n\n sample = {'images': images,\n 'label': label,\n 'patient_id': patient_id,\n 'gender': gender,\n 'age': age,\n 'lymph_count': lymph_count}\n return sample\n\n def train(self):\n \"\"\"Put all the transforms of the dataset in training mode\"\"\"\n self.transform.train()\n\n def eval(self):\n \"\"\"Put all the transforms of the dataset in evaluation mode\"\"\"\n self.transform.eval()\n", "repo_name": "lcsdn/MIL-medical-imaging", "sub_path": "MILcode/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 3758, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.to_datetime", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.listdir", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 44, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "40235036063", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"income\"),\n path('add-income', views.add_income, name=\"add-income\"),\n path('edit-income/', views.income_edit, name=\"income-edit\"),\n path('income-delete/', views.delete_income, name=\"income-delete\"),\n path('income_source_summary', views.income_source_summary, name=\"income_source_summary\"),\n path('income_stats', views.stats_view, name=\"income-stats\"),\n path('export_csv', views.export_csv, name=\"income-export-csv\"),\n]\n", "repo_name": "Shakil-Mahmud/income-expense-record", "sub_path": "userincome/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 550, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 5, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "30880359320", "text": "from bbs.models import Post\nfrom bbs.forms import PostForm\nfrom django.shortcuts import render, redirect, get_object_or_404\n\n\ndef p_list(request):\n posts = Post.objects.all().order_by('-id')\n return render(request, 'bbs/list.html',\n {'posts': posts})\n\n\ndef p_create(request):\n if request.method == 'POST':\n post_form = PostForm(request.POST)\n\n if post_form.is_valid():\n post_form.save()\n return redirect('bbs:p_list')\n\n if request.method == 'GET':\n post_form = PostForm()\n return render(request, 'bbs/create.html', {'post_form': post_form})\n\n\n\n\n\n\n", "repo_name": "sw-baek/Cafedb_modification_web_project", "sub_path": "bbs/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 626, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bbs.models.Post.objects.all", "line_number": 7, "usage_type": "call"}, {"api_name": "bbs.models.Post.objects", "line_number": 7, "usage_type": "attribute"}, {"api_name": "bbs.models.Post", "line_number": 7, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 8, "usage_type": "call"}, {"api_name": "bbs.forms.PostForm", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 18, "usage_type": "call"}, {"api_name": "bbs.forms.PostForm", "line_number": 21, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "28290559041", "text": "import pygame\n\nfrom ui.component import Component\nfrom ui.container import Container\nfrom ui.factory import Factory\nfrom ui.menu.menu import Menu\nfrom ui.layout.gridlayout import GridLayout\nfrom util.keys import V_ALIGN_TOP, USER_EVENT_TYPE, SUB_TYPE_KEYBOARD\nfrom util.config import NAME, COLORS, COLOR_BRIGHT, SCREEN_INFO, WIDTH, HEIGHT\n\nIMAGE_SCALE = 0.49\n\nclass Popup(Container):\n \"\"\" Popup Menu class \"\"\"\n\n def __init__(self, items, util, bounding_box, update_parent, callback, default_selection=None):\n \"\"\" Initializer\n\n :param items: list of item names\n :param util: utility object\n :param bounding_box: bounding box\n :param update_parent: redraw parent function\n :param callback: menu selection callback\n \"\"\"\n Container.__init__(self, util, bounding_box, (0, 0, 0))\n self.util = util\n self.factory = Factory(util)\n self.config = util.config\n self.update_parent = update_parent\n self.callback = callback\n self.popup = True\n\n c = Component(self.util)\n w = self.config[SCREEN_INFO][WIDTH]\n h = self.config[SCREEN_INFO][HEIGHT]\n c.content = pygame.Rect(0, 0, w, h)\n c.content_x = 0\n c.content_y = 0\n c.bounding_box = c.content\n c.bgr = (0, 0, 0, 0)\n c.name = \"popup.overlay.bgr\"\n c.handle_event = self.handle_outside_event\n self.add_component(c)\n\n c = Component(self.util)\n c.content = pygame.Rect(bounding_box.x, bounding_box.y, bounding_box.w, bounding_box.h - 1)\n c.content_x = 0\n c.content_y = 0\n c.bounding_box = c.content\n c.bgr = self.config[COLORS][COLOR_BRIGHT]\n c.name = \"popup.bgr\"\n self.add_component(c)\n\n self.cols = 1\n self.rows = len(items)\n\n m = self.create_popup_menu_button\n b = pygame.Rect(bounding_box.x, bounding_box.y, bounding_box.w, bounding_box.h - 2)\n self.menu = Menu(util, None, b, self.rows, self.cols, create_item_method=m)\n \n layout = GridLayout(self.menu.bb)\n layout.set_pixel_constraints(self.rows, self.cols, 1, 1)\n bounding_box = layout.get_next_constraints()\n self.modes = self.util.load_menu(items, NAME, [], V_ALIGN_TOP, bb=bounding_box, scale=IMAGE_SCALE)\n\n if not default_selection:\n selection = self.modes[items[0]]\n else:\n selection = self.modes[default_selection]\n\n self.menu.set_items(self.modes, 0, self.select_item, False)\n self.menu.visible = False\n self.menu.item_selected(selection)\n self.add_component(self.menu)\n\n self.redraw_observer = None\n self.clicked = False\n self.visible = False\n\n def create_popup_menu_button(self, s, constr, action, scale, font_size=0):\n \"\"\" Create Popup Menu button\n\n :param s: button state\n :param constr: scaling constraints\n :param action: button event listener\n :param scale: True - scale images, False - don't scale images\n\n :return: home menu button\n \"\"\"\n return self.factory.create_menu_button(s, constr, action, scale=True, show_label=False, ignore_bgr_opacity=True)\n\n def handle_outside_event(self, event):\n \"\"\" Handle popup event\n \n :param event: the event to handle\n \"\"\"\n if not self.visible: return\n\n mouse_events = [pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN]\n \n if event.type in mouse_events and event.button == 1 and not self.menu.bb.collidepoint(event.pos):\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.clicked = True\n elif event.type == pygame.MOUSEBUTTONUP and self.clicked:\n self.clicked = False\n self.set_visible(False)\n self.update_parent()\n if self.redraw_observer:\n self.redraw_observer()\n elif event.type == USER_EVENT_TYPE:\n valid_keys = [pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT]\n if event.sub_type == SUB_TYPE_KEYBOARD and event.keyboard_key not in valid_keys and event.action == pygame.KEYUP:\n self.set_visible(False)\n self.update_parent()\n\n def select_item(self, state):\n \"\"\" Select menu item\n\n :param state: button state\n \"\"\"\n self.set_visible(False)\n self.update_parent()\n self.callback(state)\n\n def update_popup(self, state):\n if not self.visible: return\n\n self.clean_draw_update()\n\n def add_menu_observers(self, update_observer, redraw_observer):\n \"\"\" Add menu observer\n \n :param update_observer: observer for updating menu\n :param redraw_observer: observer to redraw the whole screen\n \"\"\"\n for b in self.menu.buttons.values():\n b.add_press_listener(update_observer)\n b.add_release_listener(redraw_observer)\n\n self.menu.add_move_listener(redraw_observer)\n self.menu.add_listener(redraw_observer)\n\n self.redraw_observer = redraw_observer\n", "repo_name": "GregoryWest501/Peppy", "sub_path": "ui/menu/popup.py", "file_name": "popup.py", "file_ext": "py", "file_size_in_byte": 5109, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "ui.container.Container", "line_number": 13, "usage_type": "name"}, {"api_name": "ui.container.Container.__init__", "line_number": 25, "usage_type": "call"}, {"api_name": "util.keys", "line_number": 25, "usage_type": "argument"}, {"api_name": "ui.container.Container", "line_number": 25, "usage_type": "name"}, {"api_name": "util.keys", "line_number": 26, "usage_type": "name"}, {"api_name": "ui.factory.Factory", "line_number": 27, "usage_type": "call"}, {"api_name": "util.keys", "line_number": 27, "usage_type": "argument"}, {"api_name": "util.keys.config", "line_number": 28, "usage_type": "attribute"}, {"api_name": "util.keys", "line_number": 28, "usage_type": "name"}, {"api_name": "ui.component.Component", "line_number": 33, "usage_type": "call"}, {"api_name": "util.config.SCREEN_INFO", "line_number": 34, "usage_type": "name"}, {"api_name": "util.config.WIDTH", "line_number": 34, "usage_type": "name"}, {"api_name": "util.config.SCREEN_INFO", "line_number": 35, "usage_type": "name"}, {"api_name": "util.config.HEIGHT", "line_number": 35, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 36, "usage_type": "call"}, {"api_name": "ui.component.Component", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.Rect", "line_number": 46, "usage_type": "call"}, {"api_name": "util.config.COLORS", "line_number": 50, "usage_type": "name"}, {"api_name": "util.config.COLOR_BRIGHT", "line_number": 50, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 58, "usage_type": "call"}, {"api_name": "ui.menu.menu.Menu", "line_number": 59, "usage_type": "call"}, {"api_name": "util.keys", "line_number": 59, "usage_type": "argument"}, {"api_name": "ui.layout.gridlayout.GridLayout", "line_number": 61, "usage_type": "call"}, {"api_name": "util.config.NAME", "line_number": 64, "usage_type": "argument"}, {"api_name": "util.keys.V_ALIGN_TOP", "line_number": 64, "usage_type": "argument"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 104, "usage_type": "attribute"}, {"api_name": "util.keys.USER_EVENT_TYPE", "line_number": 110, "usage_type": "name"}, {"api_name": "pygame.K_UP", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 111, "usage_type": "attribute"}, {"api_name": "util.keys.SUB_TYPE_KEYBOARD", "line_number": 112, "usage_type": "name"}, {"api_name": "pygame.KEYUP", "line_number": 112, "usage_type": "attribute"}]} +{"seq_id": "37409451909", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nn = 45\n\n# 存放讀入數據\nx = []\ny = []\n\n# 存放次方數值\nxtmp = []\nytmp = []\n\nerr = []\n\n# 讀入 x y 數據\ndef SetUp():\n file = open(\"xyfile.txt\")\n for i in range(0, 45):\n text = file.readline()\n x.append(float(text.split(\" \", 2)[0]))\n xtmp.append(1)\n y.append(float(text.split(\" \", 2)[1].replace(\"\\n\", \"\")))\n ytmp.append(0)\n file.close()\n\ndef GetMatrix():\n # a 為二維 np array,b 為一維 np array\n a = np.zeros((n,n))\n b = np.array([])\n\n # 決定這回合填入 knn 次填入 np array\n knn = 1\n ky = 0\n\n # 總共要走 k 次對角線(91)\n for k in range(0, 88): # 做 k+1 * k+1 就代表右下角 i+j == k\n\n # 上三角與下三角的起始值不同,但方向同為右上到左下\n if(k < 45):\n i = 0\n j = k - i\n else:\n j = 44\n i = k - j\n\n sumx = 0\n sumy = 0\n\n # 針對每個點計算總和\n for nn in range(0, 45):\n sumy = sumy + y[nn] * xtmp[nn] # sigma yx^ky\n sumx = sumx + xtmp[nn] # sigma x^k\n xtmp[nn] = xtmp[nn] * x[nn]\n \n # 將 sumy 寫入 b np array\n if(ky < 45):\n b = np.append(b, sumy)\n ky = ky + 1\n \n # 將 sumx 斜著放入 a np array\n for kn in range(0, knn):\n a[i][j] = sumx\n i = i + 1\n j = j - 1\n\n # 第 k 回合中,要將數值填入 knn 次\n if(k < 44):\n knn = knn + 1\n else:\n knn = knn - 1\n\n np.savetxt(\"A_output\", a)\n np.savetxt(\"B_output\", b)\n return a, b\n\ndef GetSolve(m, a, b):\n aa = np.zeros((m+1, m+1))\n bb = np.array([])\n for i in range(0, m+1):\n for j in range(0, m+1):\n aa[i][j] = a[i][j]\n bb = np.append(bb, b[i])\n \n xx = np.linalg.solve(aa, bb)\n return xx\n\n# 計算 P(X),m 代表維度,即最高次方\ndef GetPxAns(m, x, xx):\n yy = []\n\n error = 0\n # 計算每一個 x 點在 m 維度情況下的數值\n for i in range(0, len(x)):\n sum = 0\n # p(x) = a0 + a1x + a2^2\n for j in range(0, m+1):\n sum = sum + xx[j] * pow(x[i], j)\n\n yy.append(sum)\n\n error = error + pow((sum - y[i]), 2)\n \n CountError(m, error)\n\n plt.figure()\n plt.grid(True)\n plt.plot(x, y, '-o', color='green', alpha = 0.5, label='Basic F(x)', Markersize=3)\n plt.plot(x, yy, '-o', color='red', alpha = 0.5, label='P(x)', Markersize = 3)\n plt.title(\"m = \" + str(m))\n plt.xlabel('x')\n plt.ylabel('f(x)')\n plt.legend()\n plt.savefig(\"./IMG/m = \" + str(m) + \" P(x).png\", dpi=300)\n\n\ndef CountError(m, e):\n sigma = math.sqrt(e/(n-m))\n err.append(sigma)\n\ndef BestChoise():\n f = open(\"Error.txt\", \"w\")\n max = 100000000\n best = 0\n for i in range(0, len(err)):\n f.write(\"m = \" + str(i+2) + \", err is : \" + str(err[i]) + \"\\n\")\n if(max > err[i]):\n max = err[i]\n best = i + 2\n f.close()\n return best\n\n\n#### main ####\n\nSetUp()\nA, B = GetMatrix()\n# m 代表 P(x) 維度\nfor m in range(2, 45):\n # 求得在 m 維度下的係數 XX\n XX = GetSolve(m, A, B)\n GetPxAns(m, x, XX)\n\nprint(\"Best Choise is \" + str(BestChoise()))\n", "repo_name": "Demi871023/FJU", "sub_path": "Numerical Methods/406262216 劉品萱 數值方法作業五/hw5.py", "file_name": "hw5.py", "file_ext": "py", "file_size_in_byte": 3370, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 86, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "32349352181", "text": "import base64\nimport email\nfrom apiclient import errors\n\n\n#######################################################################\ndef ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id,\n q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError:\n print( 'An error occurred: %s')\n\n##############################################################################\ndef GetMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n #print( 'Message snippet: %s' % message['snippet'])\n\n return message\n except errors.HttpError:\n print ('An error occurred: %s')\n\n##############################################################################\n\ndef GetAttachments(service, user_id, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n print(message['payload']['mimeType'])\n for part in message['payload']['parts']:\n if part['filename']:\n print(\"attachmnet is heres\")\n attachment = service.users().messages().attachments().get(userId='me', messageId=message['id'], id=part['body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n\n path = ''.join([store_dir, part['filename']])\n\n f = open(path,'wb')\n f.write(file_data)\n f.close()\n\n except errors.HttpError:\n print ('An error occurred in messaage attactment' )", "repo_name": "RahulRajput1999/Gmail-attchment-Download", "sub_path": "html-page/GmailMethod.py", "file_name": "GmailMethod.py", "file_ext": "py", "file_size_in_byte": 1947, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "apiclient.errors.HttpError", "line_number": 22, "usage_type": "attribute"}, {"api_name": "apiclient.errors", "line_number": 22, "usage_type": "name"}, {"api_name": "apiclient.errors.HttpError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "apiclient.errors", "line_number": 33, "usage_type": "name"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 46, "usage_type": "call"}, {"api_name": "apiclient.errors.HttpError", "line_number": 54, "usage_type": "attribute"}, {"api_name": "apiclient.errors", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "18563502901", "text": "#!/usr/bin/env python\n\nfrom unittest import TestCase\nimport os.path as op\nimport json\nimport os\n\nfrom clowdr import __file__ as cfile\nfrom clowdr.controller import metadata\n\n\nclass TestMetadataGen(TestCase):\n\n cdir = op.abspath(op.join(op.dirname(cfile), op.pardir))\n descriptor = op.join(cdir, \"examples/bids-example/descriptor_d.json\")\n invocation1 = op.join(cdir, \"examples/bids-example/invocation.json\")\n invocation2 = op.join(cdir, \"examples/bids-example/invocation_ses.json\")\n invocation3 = op.join(cdir, \"examples/bids-example/\"\n \"invocation_ses_nopart.json\")\n invocation4 = op.join(cdir, \"examples/bids-example/invocs/\")\n invocation5 = op.join(cdir, \"examples/bids-example/invocation_sweep.json\")\n provdir = op.join(cdir, \"examples/bids-example/task/\")\n dataloc1 = \"localhost\"\n dataloc2 = \"s3://mybucket/path/\"\n\n def test_metadata_single_invoc(self):\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation1,\n self.provdir,\n self.dataloc1,\n verbose=True,\n bids=False)\n self.assertTrue(len(tasks) == len(invocs) == 1)\n\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation1,\n self.provdir,\n self.dataloc1,\n verbose=True,\n bids=True)\n with open(self.invocation1) as f:\n participants = len(json.load(f)[\"participant_label\"])\n self.assertTrue(len(tasks) == len(invocs) == participants)\n\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation2,\n self.provdir,\n self.dataloc1,\n verbose=True,\n bids=True)\n with open(self.invocation2) as f:\n dat = json.load(f)\n total = len(dat[\"participant_label\"]) * len(dat[\"session_label\"])\n self.assertTrue(len(tasks) == len(invocs) == total)\n\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation3,\n self.provdir,\n self.dataloc1,\n verbose=True,\n bids=True)\n with open(self.invocation3) as f:\n dat = json.load(f)\n total = len(dat[\"session_label\"])\n self.assertTrue(len(tasks) == len(invocs) == total)\n\n def test_metadata_directory_invocs(self):\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation4,\n self.provdir,\n self.dataloc1,\n verbose=True,\n bids=False)\n self.assertTrue(len(tasks) == len(invocs) and\n len(tasks) == len(os.listdir(self.invocation4)))\n\n def test_metadata_sweep(self):\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation5,\n self.provdir,\n self.dataloc1,\n verbose=True,\n sweep=[\"participant_label\",\n \"analysis_level\"],\n setup=True)\n\n with open(self.invocation5) as fhandle:\n dat = json.load(fhandle)\n total = len(dat[\"participant_label\"]) * len(dat[\"analysis_level\"])\n self.assertTrue(len(tasks) == len(invocs) == total)\n\n def test_metadata_to_remote(self):\n [tasks, invocs] = metadata.consolidateTask(self.descriptor,\n self.invocation1,\n self.provdir,\n self.dataloc2,\n verbose=True,\n bids=False)\n\n metadata.prepareForRemote(tasks, self.provdir, self.dataloc2)\n", "repo_name": "clowdr/clowdr", "sub_path": "clowdr/tests/test_metadata_gen.py", "file_name": "test_metadata_gen.py", "file_ext": "py", "file_size_in_byte": 5018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 10, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "clowdr.__file__", "line_number": 14, "usage_type": "argument"}, {"api_name": "os.path.pardir", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 27, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 27, "usage_type": "name"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 35, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 35, "usage_type": "name"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 45, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 45, "usage_type": "name"}, {"api_name": "json.load", "line_number": 52, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 56, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 56, "usage_type": "name"}, {"api_name": "json.load", "line_number": 63, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 68, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 68, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 78, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 78, "usage_type": "name"}, {"api_name": "json.load", "line_number": 88, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata.consolidateTask", "line_number": 93, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 93, "usage_type": "name"}, {"api_name": "clowdr.controller.metadata.prepareForRemote", "line_number": 100, "usage_type": "call"}, {"api_name": "clowdr.controller.metadata", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "41111214793", "text": "from django.db import models, transaction\nfrom django.db.models import F, Max\n\nclass ItemManager(models.Manager):\n\n\tdef move(self, obj, new_order):\n\t\tqueryset = self.get_queryset()\n\n\t\twith transaction.atomic():\n\t\t\t# Move down\n\t\t\tif obj.order > int(new_order):\n\t\t\t\tqueryset.filter(\n\t\t\t\t\tlist=obj.list,\n\t\t\t\t\torder__lt=obj.order,\n\t\t\t\t\torder__gte=new_order\n\t\t\t\t).exclude(\n\t\t\t\t\tpk=obj.pk\n\t\t\t\t).update(\n\t\t\t\t\torder=F('order') + 1\n\t\t\t\t)\n\t\t\telse:\n\t\t\t# Move up\n\t\t\t\tqueryset.filter(\n\t\t\t\t\tlist=obj.list,\n\t\t\t\t\torder__lte=new_order,\n\t\t\t\t\torder__gt=obj.order\n\t\t\t\t).exclude(\n\t\t\t\t\tpk=obj.pk\n\t\t\t\t).update(\n\t\t\t\t\torder=F('order') - 1\n\t\t\t\t)\n\n\t\t\tobj.order = new_order\n\t\t\tobj.save()\n\n\tdef create(self, **kwargs):\n\t\tinstance = self.model(**kwargs)\n\n\t\twith transaction.atomic():\n\t\t\t# Get our current max order number\n\t\t\tresults = self.filter(list=instance.list).aggregate(Max('order'))\n\n\t\t\tcurrent_order = results['order__max']\n\t\t\t\n\t\t\tif current_order is None:\n\t\t\t\tcurrent_order = 0\n\n\t\t\tvalue = current_order + 1\n\t\t\tinstance.order = value\n\t\t\tinstance.save()\n\n\t\t\treturn instance", "repo_name": "ergusto/list", "sub_path": "applications/items/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 1052, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.models.Manager", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.transaction.atomic", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.F", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.Max", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "75112449763", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 14 16:15:03 2022\r\n\r\n@author: Acer\r\n\"\"\"\r\n\r\n# Importing necessary libraries\r\nimport pandas as pd # deals with data frame \r\nimport numpy as np # deals with numerical values\r\nimport seaborn as sns\r\nhouse = pd.read_csv(\"C:\\\\Users\\\\Acer\\\\Downloads\\\\50_Startups (2)\\\\50_Startups.csv\",encoding='latin1')\r\nhouse2 = house[['Price', 'Age_08_04','KM', 'HP','cc', 'Doors','Gears', 'Quarterly_Tax', 'Weight']]\r\n\r\nwcat = pd.read_csv(\"C:\\\\Users\\Acer\\Downloads\\SLR Assignment\\Problem Statement (1)\\delivery_time.csv\")\r\ncalories = pd.read_csv(\"C:\\\\Users\\Acer\\Downloads\\SLR Assignment\\Problem Statement (1)\\calories_consumed.csv\")\r\nchurn = pd.read_csv(\"C:\\\\Users\\Acer\\Downloads\\SLR Assignment\\Problem Statement (1)\\emp_data.csv\")\r\nhike = pd.read_csv(\"C:\\\\Users\\Acer\\Downloads\\SLR Assignment\\Problem Statement (1)\\Salary_Data.csv\")\r\nhouse.columns = ['research', 'admin','marketing','state','profit']\r\n\r\n# Exploratory data analysis:\r\n# 1. Measures of central tendency\r\n# 2. Measures of dispersion\r\n# 3. Third moment business decision\r\n# 4. Fourth moment business decision\r\n# 5. Probability distributions of variables \r\n# 6. Graphical representations (Histogram, Box plot, Dot plot, Stem & Leaf plot, Bar plot, etc.)\r\n\r\nwcat.describe().columns\r\nwcat.columns = ['delivery', 'sorting']\r\ncalories.columns = ['gain', 'calories']\r\nchurn.columns = ['salary', 'churn']\r\nhike.columns = ['yearexp', 'salary']\r\n#Graphical Representation\r\nimport matplotlib.pyplot as plt # mostly used for visualization purposes \r\n\r\n# AT\r\nplt.bar(height = wcat.AT, x = np.arange(1,110,1)) #sbb data ada 109 row\r\nplt.hist(wcat.AT) #histogram\r\nplt.boxplot(wcat.AT) #boxplot\r\n\r\n# WC\r\nplt.bar(height = wcat.Waist, x = np.arange(1,109+1,1))\r\nplt.hist(wcat.Waist) #histogram\r\nplt.boxplot(wcat.Waist) #boxplot\r\n\r\n\r\n# Scatter plot\r\nplt.scatter(x=wcat['sorting'], y=wcat['delivery'], color='green') \r\nplt.xlabel(\"sorting \")\r\nplt.ylabel(\"delivery\")\r\nplt.show\r\n\r\nplt.scatter(x=hike['yearexp'], y=hike['salary'], color='green') \r\nplt.xlabel(\"yearexp \")\r\nplt.ylabel(\"salary\")\r\nplt.show\r\n# correlation\r\nnp.corrcoef(wcat.Waist, wcat.AT) \r\nsns.regplot(x=wcat['sorting'],y=wcat['delivery'])\r\n# Import library\r\nimport statsmodels.formula.api as smf\r\nhouse['norm_research'] = np.log(house.research+1)\r\nhouse['norm_marketing'] = np.log(house.marketing+1)\r\nhouse['norm_research'].hist()\r\nhouse['norm_marketing'].hist()\r\n# Simple Linear Regression --line of best fit with smallest distance between points and line\r\nmodel = smf.ols('profit ~ (marketing)', data = house).fit()\r\nmodel.summary()\r\n\r\npred1 = model.predict(pd.DataFrame(wcat['Waist']))\r\npred1 #dpt nilai y based in model y=mx+c\r\nplt.scatter(x=wcat['Weight'], y=wcat['Price'], color='blue') \r\nx = wcat.Waist\r\ny = pred1\r\nplt.plot(x, y)\r\n# Error calculation\r\nres1 = wcat.AT - pred1 #nilai error utk y baru compare dgn y actual \r\nres_sqr1 = res1*res1\r\nmse1 = np.mean(res_sqr1)\r\nrmse1 = np.sqrt(mse1)\r\nrmse1\r\n\r\nhouse2['age_poly'] = house2.Age_08_04*house2.Age_08_04\r\n######### Model building on Transformed Data\r\n# Log Transformation\r\n# x = log(waist); y = at -- jadikan x ada log\r\nplt.scatter(x=(house['Marketing Spend']),y=house['Profit'],color='brown')\r\nnp.corrcoef(np.log(house2.Weight), house2.Price) #correlation\r\n# dptkan equation model baru based on transformed log.Waist\r\nmodel2 = smf.ols('salary ~ np.log(yearexp)',data = hike).fit()\r\nmodel2.summary()\r\n# prediction baru utk y-value ikut equation yg dpt dari log.Waist\r\npred2 = model2.predict(pd.DataFrame(wcat['Waist']))\r\nx = wcat.Waist\r\ny = pred2\r\nplt.plot(x, y)\r\n #value dpt makin dekat dgn actual\r\n# Error calculation\r\nres2 = wcat.AT - pred2\r\nres_sqr2 = res2*res2\r\nmse2 = np.mean(res_sqr2)\r\nrmse2 = np.sqrt(mse2)\r\nrmse2\r\n\r\n\r\n#### Exponential transformation\r\n# x = waist; y = log(at) --log dkat y-axis\r\n\r\nplt.scatter(x=wcat['Waist'], y=np.log(wcat['AT']),color='orange')\r\nnp.corrcoef(wcat.Waist, np.log(wcat.AT)) #correlation\r\n\r\nmodel3 = smf.ols('np.log(salary) ~ yearexp',data = hike).fit()\r\nmodel3.summary()\r\n\r\npred3 = model3.predict(pd.DataFrame(wcat['Waist']))\r\npred3_at = np.exp(pred3)\r\npred3_at\r\n\r\nx = wcat.Waist\r\ny = pred3_at\r\nplt.plot(x, y)\r\n# value y prediction jadi overfit pulak\r\n# Error calculation\r\nres3 = wcat.AT - pred3_at\r\nres_sqr3 = res3*res3\r\nmse3 = np.mean(res_sqr3)\r\nrmse3 = np.sqrt(mse3)\r\nrmse3\r\n\r\n\r\n#### Polynomial transformation\r\n# x = waist; x^2 = waist*waist; y = log(at)\r\n\r\nmodel4 = smf.ols('(profit) ~ (marketing)', data = house).fit()\r\nmodel4.summary()\r\n\r\npred4 = model4.predict(pd.DataFrame(wcat))\r\npred4_at = np.exp(pred4)\r\npred4_at\r\nx = wcat.Waist\r\ny = pred4_at\r\nplt.plot(x, y)\r\n# Error calculation\r\nres4 = wcat.AT - pred4_at\r\nres_sqr4 = res4*res4\r\nmse4 = np.mean(res_sqr4)\r\nrmse4 = np.sqrt(mse4)\r\nrmse4\r\n\r\n# Choose the best model using RMSE\r\ndata = {\"MODEL\":pd.Series([\"SLR\", \"Log model\", \"Exp model\", \"Poly model\"]), \"RMSE\":pd.Series([rmse1, rmse2, rmse3, rmse4])}\r\ntable_rmse=pd.DataFrame(data)\r\ntable_rmse\r\n\r\n###################\r\n# The best model\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ntrain, test = train_test_split(wcat, test_size = 0.2)\r\n\r\n# fit model dekat training data \r\nfinalmodel = smf.ols('np.log(AT) ~ Waist + I(Waist*Waist)', data = train).fit()\r\nfinalmodel.summary()\r\n\r\n# Predict on test data\r\ntest_pred = finalmodel.predict(pd.DataFrame(test))\r\npred_test_AT = np.exp(test_pred)\r\npred_test_AT\r\n\r\n# Model Evaluation on Test data\r\ntest_res = test.AT - pred_test_AT\r\ntest_sqrs = test_res * test_res\r\ntest_mse = np.mean(test_sqrs)\r\ntest_rmse = np.sqrt(test_mse)\r\ntest_rmse\r\n\r\n\r\n# Prediction on train data\r\ntrain_pred = finalmodel.predict(pd.DataFrame(train))\r\npred_train_AT = np.exp(train_pred)\r\npred_train_AT\r\n\r\n# Model Evaluation on train data\r\ntrain_res = train.AT - pred_train_AT\r\ntrain_sqrs = train_res * train_res\r\ntrain_mse = np.mean(train_sqrs)\r\ntrain_rmse = np.sqrt(train_mse)\r\ntrain_rmse\r\n\r\n", "repo_name": "haseena97/mini-project-2", "sub_path": "SLR+MLR Assignment/MLR Assignment/50_Startups (2)/linear regression.py", "file_name": "linear regression.py", "file_ext": "py", "file_size_in_byte": 5859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 59, "usage_type": "call"}, {"api_name": "seaborn.regplot", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 64, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 68, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 68, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 89, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 91, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 91, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 111, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 113, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 113, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 128, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 135, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 135, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 148, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 160, "usage_type": "call"}, {"api_name": "statsmodels.formula.api.ols", "line_number": 163, "usage_type": "call"}, {"api_name": "statsmodels.formula.api", "line_number": 163, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 175, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "21524759990", "text": "import numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom itertools import chain\n\nfrom sklearn.model_selection import GroupKFold\nfrom rankpy.queries import Queries\nfrom rankpy.models import LambdaMART as LambdaMARTModel\n\nclass LambdaMART(object):\n def __init__(self, metric='NDCG', n_estimators=100, max_depth=None,\n max_leaf_nodes=7, max_features=None, min_samples_split=2,\n min_samples_leaf=1, shrinkage=0.1, use_newton_method=True,\n use_random_forest=0, random_thresholds=False, subsample=1.0,\n use_logit_boost=False, use_ada_boost=False, estopping=50,\n min_n_estimators=1, base_model=None, n_jobs=1, random_state=None):\n self.feature_names = None\n\n self.params = {\n 'metric': metric,\n 'n_estimators': n_estimators,\n 'max_depth': max_depth,\n 'max_leaf_nodes': max_leaf_nodes,\n 'max_features': max_features,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'shrinkage': shrinkage,\n 'use_newton_method': use_newton_method,\n 'use_random_forest': use_random_forest,\n 'random_thresholds': random_thresholds,\n 'subsample': subsample,\n 'use_logit_boost': use_logit_boost,\n 'use_ada_boost': use_ada_boost,\n 'estopping': estopping,\n 'min_n_estimators': min_n_estimators,\n 'base_model': base_model,\n 'n_jobs': n_jobs,\n 'random_state': random_state,\n }\n\n\n def __str__(self):\n return self.__repr__()\n\n def __repr(self):\n return (\"%s(metric='%s', n_estimators=%d, max_depth=%d, max_leaf_nodes=%d,\\n\"\n \"max_features=%d, min_samples_split=%d, min_samples_leaf=%d,\\n\"\n \"shrinkage=%f, use_newton_method=%s, use_random_forest=%d,\\n\"\n \"random_thresholds=%s, subsample=%f, use_logit_boost=%s, use_ada_boost=%s,\\n\"\n \"estopping=%d, min_n_estimators=%d, n_jobs=%d, random_state=%s,\\n\"\n \"base_model=%s)\" % (\n self.__class__.__name__,\n self.param[\"metric\"],\n self.params[\"n_estimators\"],\n self.params[\"max_depth\"],\n self.params[\"max_leaf_nodes\"],\n self.params[\"max_features\"],\n self.params[\"min_samples_split\"],\n self.params[\"min_samples_leaf\"],\n self.params[\"shrinkage\"],\n self.params[\"use_newton_method\"],\n self.params[\"use_random_forest\"],\n self.params[\"random_thresholds\"],\n self.params[\"subsample\"],\n self.params[\"use_logit_boost\"],\n self.params[\"use_ada_boost\"],\n self.params[\"estopping\"],\n self.params[\"min_n_estimators\"],\n self.params[\"n_jobs\"],\n self.params[\"random_state\"],\n str(self.params[\"base_model\"]),\n ))\n\n def _build_query_indptr(self, ids):\n \"\"\"\n The query index pointer into the feature_vectors and relevance_scores\n array, i.e. the document feature vectors,\n ``feature_vectors[query_indptr[i]:query_indptr[i + 1]]``, and the\n corresponding relevance scores,\n ``relevance_scores[query_indptr[i]:query_indptr[i + 1]]``,\n are the feature vectors and relevance scores for the i-th\n query documents.\n \"\"\"\n query_indptr = [0]\n query_ids = []\n prev_qid = None\n for qid in ids:\n if qid == prev_qid:\n query_indptr[-1] += 1\n else:\n query_ids.append(qid)\n query_indptr.append(query_indptr[-1] + 1)\n prev_qid = qid\n return query_indptr, query_ids\n\n def _build_queries(self, X, y, ids, w):\n query_indptr, query_ids = self._build_query_indptr(ids)\n q = Queries(X, y, query_indptr, query_ids=query_ids)\n # weights as per query instead of per-row ... just guess\n wn = [np.mean(w[query_indptr[i]:query_indptr[i+1]]) for i in range(len(query_indptr)-1)]\n wn = [w[i] for i in query_indptr[:-1]]\n return q, np.ascontiguousarray(wn, dtype='float64')\n\n def fit(self, X, y, ids, weight=None, feature_names=None):\n self.feature_names = feature_names\n # Unfortunately rankpy only works with integer labels...\n # This is far from perfect, but works as a first try\n y = (np.asanyarray(y) * 5).astype(np.intc)\n # Split out a 10% validation set\n splitter = GroupKFold(10)\n train, valid = next(splitter.split(X, None, ids))\n\n X_train, X_valid, y_train, y_valid, ids_train, ids_valid, w_train, w_valid = chain.from_iterable(\n ((a[train], a[valid]) for a in [X, y, ids, weight]))\n\n q_train, w_train = self._build_queries(X_train, y_train, ids_train, w_train)\n q_valid, w_valid = self._build_queries(X_valid, y_valid, ids_valid, w_valid)\n\n self.model = LambdaMARTModel(**self.params)\n self.model.fit(q_train, w_train, q_valid, w_valid)\n return self\n\n def predict(self, X, ids, weight, feature_names=None):\n self.feature_names = feature_names\n query_indptr, query_ids = self._build_query_indptr(ids)\n # We wont be using this, but Queries wont instantiate without it\n y = np.zeros(X.shape[0])\n q = Queries(X, y, query_indptr, query_ids=query_ids)\n y_pred = self.model.predict(q, n_jobs=self.params['n_jobs'])\n return y_pred\n\n def plot_importance(self):\n if self.feature_names is None:\n raise Exception('No feature names available')\n\n importance = self.model.feature_importances(self.params['n_jobs'])\n\n # stolen from xgboost\n tuples = zip(self.feature_names, importance)\n tuples = sorted(tuples, key=lambda x: x[1])\n labels, values = tuples\n\n self.save_topn_features(labels, values)\n\n _, ax = plt.subplots(1, 1)\n ylocs = np.arange(len(values))\n ax.barh(ylocs, values, align='center', height=0.2)\n for x, y in zip(values, yloc):\n ax.text(x + 1, y, x, va='center')\n ax.set_yticks(ylocs)\n ax.set_yticklabels(labels)\n\n xlim = (0, max(values) * 1.1)\n ax.set_xlim(xlim)\n\n ylim = (-1, len(importance))\n ax.set_ylim(ylim)\n\n ax.grid()\n return ax\n\n def save_topn_features(self, labels, values, fname=\"LambdaMART_topn_features.txt\", topn=-1):\n if topn == -1:\n topn = len(labels)\n else:\n topn = min(topn, len(labels))\n with open(fname, \"w\") as f:\n for i in range(topn):\n f.write(\"%s = %f\" % (labels[i], values[i]))\n", "repo_name": "ebernhardson/l2r", "sub_path": "code/utils/rankpy_utils.py", "file_name": "rankpy_utils.py", "file_ext": "py", "file_size_in_byte": 6867, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "rankpy.queries.Queries", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.ascontiguousarray", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.asanyarray", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.intc", "line_number": 110, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.GroupKFold", "line_number": 112, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 115, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 115, "usage_type": "name"}, {"api_name": "rankpy.models.LambdaMART", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "rankpy.queries.Queries", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "39153650598", "text": "import requests\nimport pandas as pd\nimport json\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nimport creds\n\nlogin_url = 'https://contact.co.nz/contact/account/signin'\nbase_url = 'https://api.contact-digital-prod.net'\nAPI_TOKEN = \"kbIthASA7e1M3NmpMdGrn2Yqe0yHcCjL4QNPSUij\" # This is public\n\n\n\nclass Contact_Energy_API():\n \n def __init__(self):\n \n self.session = requests.session() \n self.headers = {\"x-api-key\": API_TOKEN}\n print(\"Session Started...\")\n\n\n def login(self):\n \n login_request = self.session.post(login_url, data=creds.payload)\n\n if login_request.json()[\"IsSuccessful\"]:\n print(\"Login Successful\")\n else:\n print(\"Login Failed\")\n\n self.auth_token = str(login_request.json()['Data']['Token'])\n self.headers[\"authorization\"] = self.auth_token\n\n\n def query(self, start=\"2022-03-01\", end=\"2022-08-31\", interval=\"monthly\"):\n \"\"\" start / end -> yyyy-mm-dd \"\"\"\n\n data_query = f'https://api.contact-digital-prod.net/usage/{creds.ID}?interval={interval}&from={start}&to={end}'\n\n return self.session.post(data_query, headers=self.headers)\n\n \n def hourly_power(self, start=\"2022-03-01\", end=\"2022-03-08\"):\n \"\"\" API limits to 7 days for hourly (says it does)\n but it actually only returns data for a day...\n \"\"\"\n hourly_usage = {}\n\n periods = pd.period_range(start=start,\n end=end,\n freq='D')\n\n for period in tqdm(periods.values):\n start = period.to_timestamp().strftime('%Y-%m-%d')\n end = period.to_timestamp(how=\"E\").strftime('%Y-%m-%d')\n \n queries = self.query(start, end, interval=\"hourly\").json()\n \n for q in queries:\n hourly_usage[q[\"date\"]] = q[\"value\"]\n \n\n return hourly_usage\n \n\n\nclass Stats():\n\n def __init__(self):\n pass\n\n def hourly_usage_df(self, json):\n\n df = pd.read_json(json, orient='index')\n df = df.reset_index(level=0)\n df = df.set_axis(['Datetime', 'kWH'], axis=1, inplace=False)\n\n #print(df['Datetime'].apply(lambda v: isinstance(v, datetime)).sum())\n \n df[['Datetime', 'Timezone']] = df['Datetime'].astype(str).str.rsplit('+', n=1, expand=True)\n df['Datetime'] = pd.to_datetime(df['Datetime'], utc=True)\n df['Timezone'] = pd.to_datetime(df['Timezone'])\n\n\n df['Hour'] = df['Datetime'].dt.strftime('%H')\n df['Day'] = df['Datetime'].dt.strftime('%Y-%m-%d')\n\n\n hour_df = df.groupby(df['Hour']).sum()\n hour_df[\"%\"] = ((hour_df / hour_df.sum()) * 100).round(decimals=2)\n\n return df, hour_df\n\n\n\n\n\nif __name__ == \"__main__\":\n\n stats = Stats()\n\n #api = Contact_Energy_API()\n #api.login()\n #api.query()\n\n #hourly_usage = api.hourly_power(start=\"2021-11-17\", end=\"2022-08-05\")\n\n #last_week = api.hourly_power(start=\"2022-08-01\", end=\"2022-08-06\")\n\n #with open('hourly_usage.json', 'w') as outfile:\n # json.dump(hourly_usage, outfile, indent = 4)\n\n\n df, hour_df = stats.hourly_usage_df('hourly_usage.json')\n\n current_plan_fixed = 1.059 \n current_plan_cost_per_kWh = 0.2050\n current_plan_discount = 0.98 # 2percent\n\n good_night_fixed = 1.969\n good_night_cost_per_kwh = 0.2105\n\n print(\"Current:\")\n print((df.shape[0]/24 * current_plan_fixed + hour_df[\"kWH\"].sum() * current_plan_cost_per_kWh)) # * current_plan_discount)\n\n print('Free Hour:')\n print((df.shape[0]/24 * good_night_fixed + hour_df[\"kWH\"][0:21].sum() * good_night_cost_per_kwh))\n\n\n\n\n", "repo_name": "harrydobbs/contact-energy", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3732, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.session", "line_number": 22, "usage_type": "call"}, {"api_name": "creds.payload", "line_number": 29, "usage_type": "attribute"}, {"api_name": "creds.ID", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pandas.period_range", "line_number": 54, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.read_json", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "24346454319", "text": "# importing the required module\nimport datetime\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib.dates as mdates\nfrom Base_de_donnée import *\n\ndef printGraph():\n with connection_DBase() as conn:\n c = conn.cursor()\n Temp_First = ''' SELECT Temperature FROM DataHistory ORDER BY Temperature ASC LIMIT 1;'''\n c.execute(Temp_First)\n TempFirst = c.fetchall()\n\n Humid_First = ''' SELECT Humidity FROM DataHistory ORDER BY Humidity ASC LIMIT 1;'''\n c.execute(Humid_First)\n HumidFirst = c.fetchone()\n\n Temp_Last = ''' SELECT * FROM DataHistory ORDER BY Temperature DESC LIMIT 1;'''\n c.execute(Temp_Last)\n TempLast = c.fetchone()\n\n Himd_Last = ''' SELECT * FROM DataHistory ORDER BY Humidity DESC LIMIT 1 ;'''\n c.execute(Himd_Last)\n HimdLast = c.fetchone()\n\n Date_First = ''' SELECT Date FROM DataHistory ORDER BY Date ASC LIMIT 1;'''\n #Date_First = ''' SELECT STRFTIME ('%d-%m-%Y, %H:%M',Date) FROM DataHistory ;'''\n c.execute(Date_First)\n DateFirst = c.fetchone()\n\n Date_cursor = ''' SELECT * FROM DataHistory ORDER BY Date ASC LIMIT 1;'''\n c.execute(Date_First)\n Datecursor = c.fetchall()\n\n Date_Last = ''' SELECT * FROM DataHistory ORDER BY Date DESC LIMIT 1;'''\n c.execute(Date_Last)\n DateLast = c.fetchone()\n\n #print(\"Temp first\",TempFirst[0])\n #print(\"Temp last\",TempLast[1])\n #print(\"Humidity first\",HumidFirst[0])\n #print(\"Humidity Last\",HimdLast[2])\n print(\"Date first:\",DateFirst[0])\n print(\"Date Last:\",DateLast[3])\n\n Start = datetime.datetime.strptime('{}',\"%d-%m-%Y %H:%M:%S\").format(datetime(DateFirst))\n end = datetime.datetime.strptime('{}',\"%d-%m-%Y %H:%M:%S\").format(DateLast)\n date_generated = [Start+ datetime.timedelta(days=i) for i in range(0,(end-Start).days)]\n\n for dte in date_generated:\n print(dte.strptime(\"%d-%m-%Y\"))\n return\n\nprintGraph()\n\"\"\"# x axis values\nx = [1, 2, 3]\n# corresponding y axis values\ny = [2, 4, 1]\n\n# plotting the points\nplt.plot(x, y)\n\n# naming the x axis\nplt.xlabel('x - axis')\n# naming the y axis\nplt.ylabel('y - axis')\n\n# giving a title to my graph\nplt.title('Dashboard')\n\n# function to show the plot\nplt.show()\n\"\"\"\n", "repo_name": "abdelhak-dev/Stage-Gestion-d-alerte", "sub_path": "app/Dashboard.py", "file_name": "Dashboard.py", "file_ext": "py", "file_size_in_byte": 2333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "38091746286", "text": "import torchvision\nfrom torch import nn\nfrom torch.nn import init\nfrom ReIDModules.AIM_CCReID.models.utils import pooling\n \n\nclass ResNet50(nn.Module):\n def __init__(self, config, **kwargs):\n super().__init__()\n\n resnet50 = torchvision.models.resnet50(pretrained=True)\n if config.MODEL.RES4_STRIDE == 1:\n resnet50.layer4[0].conv2.stride=(1, 1)\n resnet50.layer4[0].downsample[0].stride=(1, 1)\n\n self.conv1 = resnet50.conv1\n self.bn1 = resnet50.bn1\n self.relu = resnet50.relu\n self.maxpool = resnet50.maxpool\n\n self.layer1 = resnet50.layer1\n self.layer2 = resnet50.layer2\n self.layer3 = resnet50.layer3\n self.layer4 = resnet50.layer4\n if config.MODEL.POOLING.NAME == 'avg':\n self.globalpooling = nn.AdaptiveAvgPool2d(1)\n elif config.MODEL.POOLING.NAME == 'max':\n self.globalpooling = nn.AdaptiveMaxPool2d(1)\n elif config.MODEL.POOLING.NAME == 'gem':\n self.globalpooling = pooling.GeMPooling(p=config.MODEL.POOLING.P)\n elif config.MODEL.POOLING.NAME == 'maxavg':\n self.globalpooling = pooling.MaxAvgPooling()\n else:\n raise KeyError(\"Invalid pooling: '{}'\".format(config.MODEL.POOLING.NAME))\n\n self.bn = nn.BatchNorm1d(config.MODEL.FEATURE_DIM)\n init.normal_(self.bn.weight.data, 1.0, 0.02)\n init.constant_(self.bn.bias.data, 0.0)\n \n def forward(self, x):\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n base_f = self.layer4(x)\n f = self.globalpooling(base_f)\n f = f.view(f.size(0), -1)\n f = self.bn(f)\n\n return base_f, f", "repo_name": "bar371/ReFace", "sub_path": "ReIDModules/AIM_CCReID/models/img_resnet.py", "file_name": "img_resnet.py", "file_ext": "py", "file_size_in_byte": 1820, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.models.resnet50", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveMaxPool2d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "ReIDModules.AIM_CCReID.models.utils.pooling.GeMPooling", "line_number": 30, "usage_type": "call"}, {"api_name": "ReIDModules.AIM_CCReID.models.utils.pooling", "line_number": 30, "usage_type": "name"}, {"api_name": "ReIDModules.AIM_CCReID.models.utils.pooling.MaxAvgPooling", "line_number": 32, "usage_type": "call"}, {"api_name": "ReIDModules.AIM_CCReID.models.utils.pooling", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "18332956448", "text": "from flask import Flask, flash, make_response, render_template, request\nfrom werkzeug.utils import secure_filename\n\nfrom datetime import timedelta\n\nimport os\nimport re\nimport sys\n\nimport chardet\n\n\napp = Flask(__name__)\napp.secret_key = 'hoQf4^(xlos6@,mc/AfkoY7!p{;dLgd vfV1etvSu6*JcqzP'\n\ndef allowed_file(file):\n \"\"\"\n Check if the Flask file isn't too large,\n see if its extension is valid, and return bool.\n \"\"\"\n pos = file.tell() # Save the current position\n file.seek(0, 2) # Seek to the end of the file\n length = file.tell() # The current position is the length\n file.seek(pos) # Return to the saved position\n # print(file.tell())\n # print(length)\n if length > 150000: # >150kB is too large\n return False\n\n filename = file.filename\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ['srt', 'vtt']\n\ndef string_content(file):\n \"\"\"Returns the contents of the Flask file as string.\"\"\"\n # \"file.stream.read()\" returns bytes, \".decode(\"utf-8\")\" converts them\n # to an \"utf-8\" encoded string; however not every file is in unicode.\n\n # We need a way of determining the character encoding;\n # the flask file object does not have this functionality.\n # Try-except is not useful either because a stream only returns data once.\n # This means a second read (in except clause) would be empty ...\n # -> We need chardet to detect the encoding of the file!\n file_contents = file.stream.read()\n\n result = chardet.detect(file_contents)\n enc = result['encoding']\n # print('\\n\\n' + str(enc) + '\\n\\n')\n if not enc:\n # When chardet can't detect the character encoding, which will happen\n # for non-text files, enc will be None. In this case we return None,\n # and handle it in the caller.\n return None\n else:\n file_contents = file_contents.decode(enc, errors='replace')\n\n return file_contents\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/convert', methods=['POST'])\ndef upload_convert():\n # import pdb; pdb.set_trace()\n file = request.files.get('subtitlefile')\n if not file:\n flash('No file is selected.')\n return home()\n\n plusmin = float(request.form.get('plusmin'))\n seconds = request.form.get('seconds')\n if not seconds:\n flash('No seconds are entered.')\n return home()\n seconds = float(seconds)\n seconds *= plusmin\n\n if not allowed_file(file):\n flash('Select either an .srt or .vtt file.')\n return home()\n\n else:\n inputfile = secure_filename(file.filename)\n from_ext = inputfile.rsplit('.', 1)[1].lower()\n to_ext = request.form.get('to')\n if not to_ext in ['srt', 'vtt']:\n flash('Only converting to .srt or .vtt is supported.')\n return home()\n\n change_ext = False\n if from_ext != to_ext:\n change_ext = True\n outputfile = name_output(inputfile, seconds, change_ext)\n\n string_contents = string_content(file)\n if not string_contents:\n flash('Unknown character encoding. Only select valid subtitle files.')\n return home()\n elif inputfile.endswith('.srt'):\n result = convert_srt(string_contents, seconds, change_ext)\n else:\n result = convert_vtt(string_contents, seconds, change_ext)\n response = make_response(result)\n\n response_str = 'attachment; filename={}'.format(outputfile)\n response.headers['Content-Disposition'] = response_str\n return response\n\n\ndef name_output(inputfile, seconds, change_ext):\n \"\"\"\n Determines the name of the outputfile based on the inputfile and seconds;\n the name of the new file is identical to the old one, but prepended with '{+x.xx_Sec}_'.\n \n However, if the file has already been processed by submod before, we simply change\n the 'increment number' x, instead of prepending '{+x.xx_Sec}_' a second time.\n This way we can conveniently process files multiple times, and still have sensible names.\n \n \"\"\"\n # import pdb; pdb.set_trace()\n # Regex to check if the inputfile was previously processed by submod\n proc_regex = '\\{[+-]\\d+\\.\\d+_Sec\\}_'\n proc = re.compile(proc_regex)\n processed = proc.match(inputfile)\n \n # The inputfile prefix as a string format\n input_prefix = '{{{0:.2f}_Sec}}_'\n \n # inputfile was processed by submod previously\n if processed:\n \n # Regex for extracting the increment number from the inputfile:\n number = re.compile('[+-]\\d+\\.\\d+')\n match = number.search(inputfile)\n \n incr = float(match.group())\n incr += seconds\n \n # Prepare a placeholder for string formatting;\n # in the string 'inputfile', the first occurrence of the 'proc_regex' pattern\n # is substituted with the 'input_prefix' string. \n placeholder = re.sub(proc_regex, input_prefix, inputfile, 1)\n \n # the inputfile has not been processed by submod before \n else:\n incr = seconds\n placeholder = input_prefix + inputfile\n \n if incr >= 0:\n placeholder = '{{+' + placeholder[2:]\n \n # Determine the name of the outputfile by replacing\n # the increment number with the new one:\n outputfile = placeholder.format(incr)\n\n if change_ext:\n if outputfile.endswith('.srt'):\n outputfile = outputfile.rsplit('.', 1)[0] + '.vtt'\n else:\n outputfile = outputfile.rsplit('.', 1)[0] + '.srt'\n \n return outputfile\n\n\ndef convert_srt(file_contents, seconds, change_ext):\n \"\"\"\n Loops through the given inputfile, modifies the lines consisting of the time encoding,\n and writes everything back to the 'new_content' string.\n \n This function is identical to convert_vtt,\n except that it uses ',' for the seconds field's decimal space.\n \n The subtitle files consist of a repetition of the following 3 lines:\n \n - Index-line: integer count indicating line number\n - Time-line: encoding the duration for which the subtitle appears\n - Sub-line: the actual subtitle to appear on-screen (1 or 2 lines)\n \n Example .srt (Note: ',' for decimal spaces):\n \n 1\n 00:00:00,243 --> 00:00:02,110\n Previously on ...\n \n 2\n 00:00:03,802 --> 00:00:05,314\n Etc.\n \n \"\"\"\n # import pdb; pdb.set_trace()\n content_list = []\n skip = False\n time_line = re.compile('\\d\\d:\\d\\d:\\d\\d,\\d\\d\\d')\n\n for line in file_contents.splitlines(True):\n # Time-line: This is the line we need to modify\n if time_line.match(line):\n # We need '.' instead of ',' for floats!\n line = line.replace(',', '.')\n new_line = process_line(line, seconds)\n if new_line == '(DELETED)\\n\\n':\n skip = True\n elif not change_ext:\n # Convert back to '.srt' style:\n new_line = new_line.replace('.', ',')\n \n else:\n # When skip = True, subtitles are shifted too far back into the past,\n # (before the start of the movie), so they are deleted:\n if skip == True:\n # Subtitles can be 1 or 2 lines; only turn of skip on empty line:\n if line == '\\n' or line == '\\r\\n':\n skip = False\n continue\n \n # All other lines are simply copied:\n else:\n new_line = line\n\n content_list.append(new_line)\n\n new_content = ''.join(content_list)\n\n return new_content\n\n\ndef convert_vtt(file_contents, seconds, change_ext):\n # import pdb; pdb.set_trace()\n content_list = []\n skip = False\n time_line = re.compile('\\d\\d:\\d\\d:\\d\\d.\\d\\d\\d')\n\n for line in file_contents.splitlines(True):\n # Time-line: This is the line we need to modify\n if time_line.match(line):\n new_line = process_line(line, seconds)\n if new_line == '(DELETED)\\n\\n':\n skip = True\n elif change_ext:\n new_line = new_line.replace('.', ',')\n\n else:\n # When skip = True, subtitles are shifted too far back into the past,\n # (before the start of the movie), so they are deleted:\n if skip == True:\n # Subtitles can be 1 or 2 lines; only turn of skip on empty line:\n if line == '\\n' or line == '\\r\\n':\n skip = False\n continue\n \n # All other lines are simply copied:\n else:\n new_line = line\n\n content_list.append(new_line)\n\n new_content = ''.join(content_list)\n\n return new_content\n\n\ndef process_line(line, seconds):\n \"\"\"\n Process the given line by adding seconds to start and end time.\n (subtracting if seconds is negative)\n \n Example line: '00:00:01.913 --> 00:00:04.328'\n Index: 01234567890123456789012345678\n Index by tens: (0) 10 20 (28)\n\n \"\"\" \n start = line[0:12]\n start = process_time(start, seconds)\n \n end = line[17:29]\n end = process_time(end, seconds)\n \n if start == '(DELETED)\\n\\n':\n if end == '(DELETED)\\n\\n':\n line = '(DELETED)\\n\\n'\n else:\n line = '00:00:00.000 --> ' + end + '\\n'\n \n else: \n line = start + ' --> ' + end + '\\n'\n \n return line\n\n \ndef process_time(time_string, incr):\n \"\"\"\n Increment the given time_string by 'incr' seconds\n \n The time-string has the form '00:00:00.000',\n and converts to the following format string:\n '{0:02d}:{1:02d}:{2:06.3f}'\n \n \"\"\"\n hrs = int(time_string[0:2])\n mins = int(time_string[3:5])\n secs = float(time_string[6:12])\n \n time = timedelta(hours=hrs, minutes=mins, seconds=secs)\n incr = timedelta(seconds=incr)\n \n # incr can be negative, so the new time can be too:\n time = time + incr\n time = time.total_seconds()\n \n if time >= 0:\n # Since time is a float, hrs and mins need to be converted back to int for the string format\n hrs = int(time // 3600)\n mins = int((time % 3600) // 60)\n secs = (time % 3600) % 60\n \n time_string = '{0:02d}:{1:02d}:{2:06.3f}'.format(hrs, mins, secs)\n \n else:\n # time < 0: the subtitles are now scheduled before the start of the movie,\n # so we can delete them\n time_string = '(DELETED)\\n\\n'\n \n return time_string\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)", "repo_name": "davidde/subserver_flask", "sub_path": "subserver.py", "file_name": "subserver.py", "file_ext": "py", "file_size_in_byte": 10629, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "chardet.detect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request.files.get", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 80, "usage_type": "call"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.form.get", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 104, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 124, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 134, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 143, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 194, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 232, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 304, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 305, "usage_type": "call"}]} +{"seq_id": "74871416163", "text": "import torch.nn as nn\nfrom torchvision import models\n\n\ndef get_vgg():\n # загрузим модель\n vgg = models.vgg19(pretrained=True)\n\n # отключим градиенты\n for param in vgg.parameters():\n param.requires_grad = False\n\n for i, layer in enumerate(vgg.features):\n if isinstance(layer, nn.MaxPool2d):\n vgg.features[i] = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)\n\n return vgg\n", "repo_name": "andrei-ermakov/bot_nst_and_gan", "sub_path": "handler/Class_NST/vgg_model.py", "file_name": "vgg_model.py", "file_ext": "py", "file_size_in_byte": 445, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torchvision.models.vgg19", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "202466831", "text": "#!/usr/bin/env python\n#python test_22.py -i de.txt -o de_single.txt\nimport argparse\nimport sys\nimport re\nparser = argparse.ArgumentParser(description=\"This is just a test\")\nparser.add_argument(\"-i\",\"--input\",help=\"the first argument\")\nparser.add_argument(\"-o\",\"--output\",help=\"the second argument\")\nargs = parser.parse_args()\nfile1=open(args.input,'r')\nfile2=open(args.output,'w')\neachline=''\neach=[]\nname=''\nseq=''\nfor eachline in file1:\n eachline=eachline.strip('\\n')\n each=eachline.split('\\t')\n if each[2].find(','):\n for name in each[2].split(','):\n each[2]=name\n seq='\\t'.join(each)\n file2.writelines(seq+'\\n')\nfile1.close()\nfile2.close()\n", "repo_name": "BioXiao/LncRNA-project", "sub_path": "Python/test_22.py", "file_name": "test_22.py", "file_ext": "py", "file_size_in_byte": 696, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "12990673118", "text": "import numpy as np\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_moons\nfrom sklearn.metrics import zero_one_loss\nfrom sklearn.model_selection import train_test_split, KFold\n\n__date__ = '11/10/2021'\n__author__ = 'Eng Hock Lee'\n__email__ = 'leex7132@umn.edu'\n\ndef HistogramOfProjections(dist, ytrue, title=None, numbin=20, savefig=False):\n ######\n # Histogram of Projection function\n # argument dist [numpy [n,]]: euclidean distance from decision boundary for n data points\n\n # argement ytrue [numpy [n,]]: binary label for n data points.\n # The index of data points should corresponding to\n # the same data points in arguement dist\n\n # numbin [int]: number of bin (default = 20)\n ######\n\n assert np.unique(ytrue).shape[0] == 2 # assert binary class label\n\n cls1, cls2 = np.unique(ytrue)\n cls1_index = np.where(ytrue==cls1)\n cls2_index = np.where(ytrue==cls2)\n\n cls1_dist = dist[cls1_index]\n cls2_dist = dist[cls2_index]\n\n # Perform histograming\n cls1_binedge = np.linspace(np.min(cls1_dist), np.max(cls1_dist), numbin)\n cls2_binedge = np.linspace(np.min(cls2_dist), np.max(cls2_dist), numbin)\n cls1_hist, cls1_hist_dist = np.histogram(cls1_dist,cls1_binedge)\n cls2_hist, cls2_hist_dist = np.histogram(cls2_dist,cls2_binedge)\n cls1_hist_dist = cls1_hist_dist + np.abs(np.abs(cls1_hist_dist[0]) - np.abs(cls1_hist_dist[1]))/2\n cls1_hd = cls1_hist_dist[:-1].copy()\n cls2_hist_dist = cls2_hist_dist + np.abs(np.abs(cls2_hist_dist[0]) - np.abs(cls2_hist_dist[1]))/2\n cls2_hd = cls2_hist_dist[:-1].copy()\n\n # Plot figure\n plt.figure(figsize=(8,6))\n max_y = np.amax(np.maximum(cls1_hist,cls2_hist))\n\n plt.scatter(dist, np.ones(dist.shape[0])*max_y/2, c=ytrue, s=30, cmap=plt.cm.Paired)\n plt.plot(cls1_hd, cls1_hist, cls2_hd, cls2_hist)\n plt.plot(np.zeros(100), np.linspace(-10,max_y+20,100),'k')\n plt.plot(np.zeros(100)-1, np.linspace(-10,max_y+20,100),'k--')\n plt.plot(np.zeros(100)+1, np.linspace(-10,max_y+20,100),'k--')\n\n plt.ylim((0, max_y+20))\n if title == None:\n plt.title('Histogram of Projections', fontsize='xx-large')\n\n if savefig:\n plt.savefig('hop.png')\n else:\n plt.title(title, fontsize='xx-large')\n\n if savefig:\n plt.savefig(title+'.png')\n\n\ndef plot_HOP(model, X, y, title=None, savefig=False):\n ## Plot histogram of projection\n # Input Argument:\n # model [Sklearn.SVM class] = trained SVM model\n # X [numpy array [n,d]] = input X\n # y [numpy array [n,]] = output y\n # title [string] = title for HOP\n\n dist_ypred = model.decision_function(X)\n HistogramOfProjections(dist_ypred, y, title=title, savefig=savefig)\n\n\ndef plot_decision_boundary(X, y, model):\n # plot decision boundary\n\n # get the separating hyperplane\n w = model.coef_[0]\n a = -w[0] / w[1]\n xx = np.linspace(X[:,0].min(), X[:,0].max())\n yy = a * xx - (model.intercept_[0]) / w[1]\n margin = 1 / np.sqrt(np.sum(model.coef_ ** 2))\n yy_down = yy - np.sqrt(1 + a ** 2) * margin\n yy_up = yy + np.sqrt(1 + a ** 2) * margin\n\n plt.figure(figsize=(8, 6))\n plt.clf()\n plt.plot(xx, yy, \"k-\")\n plt.plot(xx, yy_down, \"k--\")\n plt.plot(xx, yy_up, \"k--\")\n plt.title('Decision Boundary', fontsize=16)\n\n plt.scatter(X[:,0], X[:,1], c=y)\n plt.savefig('boundary.png')\n\n\ndef train_linear_SVC(X, y, Cs, k_fold):\n # linear SVC with single resampling technique\n\n # Input Argument:\n # X [numpy array [n,d]] = input X\n # y [numpy array [n,]] = output y\n # Cs [list] = list of parameter C\n # k_fold [int] = number of k fold cv\n\n # Output:\n # opt_model [sklearn.svc class] = optimal model\n # trn_error [float] = training error\n # tst_error [float] = test error\n # optC [float] = optimal C parameter\n\n # setup variables\n val_errors = np.zeros((len(Cs),))\n\n # Split data into training and test set\n trnX, tstX, trny, tsty = train_test_split(X, y, test_size=0.2)\n\n # perform single resampling technique for model selection\n kf = KFold(n_splits=k_fold)\n for lrn_idx, val_idx in kf.split(trnX):\n\n # split into learning and validation set\n lrnX, valX = trnX[lrn_idx,:], trnX[val_idx, :]\n lrny, valy = trny[lrn_idx], trny[val_idx]\n\n # for each parameter C\n for i, C in enumerate(Cs):\n\n # model fitting\n model = SVC(C=C, kernel='linear')\n model.fit(lrnX, lrny)\n\n # model validating\n ypred = model.predict(valX)\n val_errors[i] += zero_one_loss(valy, ypred)\n\n # divide val_errors by the number of k fold\n val_errors /= k_fold\n\n # find optimal parameters with smallest validation error\n optC_idx = np.where(val_errors == val_errors.min())\n optC = Cs[optC_idx[0][0]]\n\n # train optimal model\n opt_model = SVC(C=optC, kernel='linear')\n opt_model.fit(trnX, trny)\n\n # get training Error\n trn_ypred = opt_model.predict(trnX)\n trn_error = zero_one_loss(trny, trn_ypred)\n\n # get test Error\n tst_ypred = opt_model.predict(tstX)\n tst_error = zero_one_loss(tsty, tst_ypred)\n\n # generate training and test HOP figure\n plot_HOP(opt_model, trnX, trny, title = 'Training HOP', savefig=True)\n plot_HOP(opt_model, tstX, tsty, title = 'Test HOP', savefig=True)\n\n return opt_model, trn_error, tst_error, optC\n\n\ndef main():\n\n # generate randopm data for binary classification\n X, y = make_moons(n_samples=1000, noise=0.1)\n\n # parameters for linear SVC\n Cs = [10**i for i in range(-3,4)]\n\n # number of k fold for resampling\n k_fold = 5\n\n # perform linear SVC\n opt_model, trn_error, tst_error, optC = train_linear_SVC(X, y, Cs, k_fold)\n print('Optimal C: ', optC, ' Training Error: ', trn_error, ' Test Error: ', tst_error)\n\n # plot decision boundary\n plot_decision_boundary(X, y, opt_model)\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "enghock1/dump", "sub_path": "Linear SVC with Single Resampling/example_linear_svc.py", "file_name": "example_linear_svc.py", "file_ext": "py", "file_size_in_byte": 6016, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.unique", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "numpy.amax", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 47, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.metrics.zero_one_loss", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 145, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.metrics.zero_one_loss", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.metrics.zero_one_loss", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_moons", "line_number": 170, "usage_type": "call"}]} +{"seq_id": "4940258914", "text": "import pandas as pd\nimport numpy as np\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\nfrom gensim.test.utils import datapath\nfrom sqlalchemy import create_engine\nimport scispacy\nimport spacy\nfrom pprint import pprint\nimport pyLDAvis\nimport pyLDAvis.gensim_models\nfrom wordcloud import WordCloud\nfrom pathlib import Path\nimport nltk\nimport os\nfrom background_task import background\nfrom django.conf import settings\nimport os\n\nNUM_TOPICS = 25\n\n# lda_model path\nMODEL_PATH = \"/app/app/lda_model/\"\nLDA_MODEL_PATH = MODEL_PATH + \"lda_model\"\nWORD_DICT_PATH = MODEL_PATH + \"word_dictionart\"\n\nid2word = corpora.Dictionary.load_from_text(WORD_DICT_PATH)\nlda_model = gensim.models.ldamodel.LdaModel.load(LDA_MODEL_PATH)\n\n# nltk stopwords\nnltk.download('stopwords')\nnltk.download('punkt')\nfrom nltk.corpus import stopwords\nstop_words = stopwords.words('english')\nstop_words.extend(['background', 'methods', 'introduction', 'conclusions', 'results', \n 'purpose', 'materials', 'discussions','methodology', 'abstract', 'section', 'text'])\n\nnlp = spacy.load('en_core_sci_sm', disable=['parser', 'ner'])\n\n@background(schedule=5)\ndef tokenize():\n postgres_str = ('postgresql://{username}:{password}@{ipaddress}:{port}/{dbname}'.format(\n username=os.environ.get('RDS_DATABASE_USER'),\n password=os.environ.get('RDS_DATABASE_PASSWORD'),\n ipaddress=os.environ.get('RDS_DATABASE_HOST'),\n port='5432',\n dbname=os.environ.get('RDS_DATABASE_NAME')))\n # Create the connection\n\n cnx = create_engine(postgres_str)\n data = pd.read_sql_query('''SELECT * FROM app_article''', con=cnx)\n print(len(data))\n print(str(data[\"Tokenized\"]))\n\n for index, row in data.iterrows():\n if row['Title'].startswith(\"OrderedDict\") or row['Abstract'].startswith(\"OrderedDict\"):\n continue\n list_of_text = row.Title + \" \" + row.Abstract\n for item in row.Keywords:\n list_of_text = list_of_text + \" \" + item\n # print(list_of_text)\n list_of_words = list(gensim.utils.simple_preprocess(str(list_of_text), deacc=True))\n # print(list_of_words)\n cleaned_words = [word for word in simple_preprocess(str(list_of_words)) if word not in stop_words]\n row.Tokenized = cleaned_words\n \n print(data.head(5))\n\n data.to_sql('app_cleanarticle', con=cnx, if_exists='replace', index=False)\n\n\ndef lda_to_output_labels(lda_result, class_count):\n \"\"\" \n This function returns the output vector of a given LDA result\n For class count of 10 for example, (3, 0.97) becomes [0, 0, 0, 0.97, 0, 0, 0, 0, 0, 0]\n \"\"\"\n output = np.zeros(class_count)\n for res in lda_result:\n output[res[0]] = res[1]\n return output\n\n\ndef lemmatization(tokens, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n\n \"\"\"https://spacy.io/api/annotation\"\"\"\n doc = nlp(\" \".join(tokens))\n return [token.lemma_ for token in doc if token.pos_ in allowed_postags]\n\ndef analyze(sentence):\n # tokenize sentence\n tokenized_words = gensim.utils.simple_preprocess(str(sentence), deacc=True)\n\n # remove stop words\n clean_words = [word for word in simple_preprocess(str(tokenized_words)) if word not in stop_words]\n\n tokens = [word for word in clean_words if word in id2word.token2id.keys()]\n \n # lemmatize clean worlds\n data_lemmatized = lemmatization(tokens, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n print(\"tokens:\")\n print(data_lemmatized)\n \n corpus = id2word.doc2bow(data_lemmatized)\n lda_output = lda_to_output_labels(lda_model[corpus][0], NUM_TOPICS)\n class_number = np.argmax(lda_output)\n print(\"class number\" + str(class_number))\n return class_number\n", "repo_name": "ofhasirci/swe-573", "sub_path": "backend/app/analysis.py", "file_name": "analysis.py", "file_ext": "py", "file_size_in_byte": 3795, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gensim.corpora.Dictionary.load_from_text", "line_number": 29, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 29, "usage_type": "attribute"}, {"api_name": "gensim.corpora", "line_number": 29, "usage_type": "name"}, {"api_name": "gensim.models.ldamodel.LdaModel.load", "line_number": 30, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 30, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 33, "usage_type": "call"}, {"api_name": "nltk.download", "line_number": 34, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 36, "usage_type": "name"}, {"api_name": "spacy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 46, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 47, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 49, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.read_sql_query", "line_number": 53, "usage_type": "call"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 64, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 64, "usage_type": "attribute"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 66, "usage_type": "call"}, {"api_name": "background_task.background", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 93, "usage_type": "call"}, {"api_name": "gensim.utils", "line_number": 93, "usage_type": "attribute"}, {"api_name": "gensim.utils.simple_preprocess", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "41393543656", "text": "from coinpy.model.constants.bitcoin import TARGET_SPACING, PROOF_OF_WORK_LIMIT,\\\n TARGET_INTERVAL, TARGET_TIMESPAN\nfrom coinpy.lib.blocks.difficulty import compact_difficulty, uint256_difficulty\nfrom coinpy.model.protocol.structures.uint256 import Uint256\nfrom coinpy.tools.functools import first, nth\nimport itertools\nimport collections\nfrom coinpy.model.protocol.runmode import TESTNET, TESTNET3, MAIN\n\n#See GetNextWorkRequired: main.cpp:819\n\nclass DifficultyIterator(object):\n \"\"\" time(int) + target(Uint256) organized by heigth.\n \n This allows to test blockchain difficulty algorithms without\n requiring full blockheaders.\n \"\"\"\n Difficulty = collections.namedtuple(\"Difficulty\", \"time bits\")\n def get_item(self, n):\n pass\n \ndef retarget(time_2weekago, \n time_now, \n target_timespan,\n current_target, # compact format\n proof_of_work_limit):\n actual_timespan = time_now - time_2weekago\n # Limit adjustment step\n if actual_timespan < target_timespan/4:\n actual_timespan = target_timespan/4;\n if actual_timespan > target_timespan*4:\n actual_timespan = target_timespan*4;\n # Retarget\n new_target = Uint256.from_bignum(uint256_difficulty(current_target).get_bignum() * actual_timespan / target_timespan)\n if new_target > proof_of_work_limit:\n new_target = proof_of_work_limit\n return compact_difficulty(new_target)\n\ndef adjust(height, difficulty_history, current_time, proof_of_work_limit):\n \"\"\" Testnet>1329264000 and Testnet3 difficulty adjusting algorithm.\n \n If there is not block during 2*TARGET_SPACING, reset difficulty to min-difficulty\n \"\"\"\n prevblocktime = difficulty_history.get_item(height-1).time\n if (current_time - prevblocktime > TARGET_SPACING * 2 or \n current_time < prevblocktime):\n #reset difficulty to min-difficulty\n return compact_difficulty(proof_of_work_limit)\n else:\n #keep the last non-special difficulty\n h = height - 1\n d = difficulty_history.get_item(h).bits\n while (h % TARGET_INTERVAL != 0 and d == compact_difficulty(proof_of_work_limit)):\n h -= 1\n d = difficulty_history.get_item(h).bits\n return d\n\ndef get_retargeted_difficulty(height, difficulty_history, runmode):\n # Note the \"off-by-one\" bug (2015 instead of 2016) \n # E.g. 2015 differences in block times, but using a TARGET_TIMESPAN of 2016\n return retarget(difficulty_history.get_item(height-TARGET_INTERVAL).time, \n difficulty_history.get_item(height-1).time, \n TARGET_TIMESPAN,\n difficulty_history.get_item(height-1).bits,\n PROOF_OF_WORK_LIMIT[runmode])\n\ndef normal_difficulty(height, \n difficulty_history,\n runmode):\n if height % TARGET_INTERVAL!= 0:\n return difficulty_history.get_item(height-1).bits\n return get_retargeted_difficulty(height, difficulty_history, runmode)\n\ndef adjusting_difficulty(height, \n difficulty_history,\n current_blocktime,\n runmode):\n if height % TARGET_INTERVAL != 0:\n return adjust(height, difficulty_history, current_blocktime, PROOF_OF_WORK_LIMIT[runmode])\n return get_retargeted_difficulty(height, difficulty_history, runmode)\n\ndef get_work_required(height,\n difficulty_history,\n current_blocktime,\n runmode):\n if (runmode == TESTNET and current_blocktime > 1329264000 or\n runmode == TESTNET3):\n return adjusting_difficulty(height, difficulty_history, current_blocktime, runmode)\n return normal_difficulty(height, difficulty_history, runmode)\n\n\n", "repo_name": "sirk390/coinpy", "sub_path": "coinpy-lib/src/coinpy/lib/blockchain/bockchain_work.py", "file_name": "bockchain_work.py", "file_ext": "py", "file_size_in_byte": 3806, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "collections.namedtuple", "line_number": 18, "usage_type": "call"}, {"api_name": "coinpy.model.protocol.structures.uint256.Uint256.from_bignum", "line_number": 34, "usage_type": "call"}, {"api_name": "coinpy.model.protocol.structures.uint256.Uint256", "line_number": 34, "usage_type": "name"}, {"api_name": "coinpy.lib.blocks.difficulty.uint256_difficulty", "line_number": 34, "usage_type": "call"}, {"api_name": "coinpy.lib.blocks.difficulty.compact_difficulty", "line_number": 37, "usage_type": "call"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_SPACING", "line_number": 45, "usage_type": "name"}, {"api_name": "coinpy.lib.blocks.difficulty.compact_difficulty", "line_number": 48, "usage_type": "call"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_INTERVAL", "line_number": 53, "usage_type": "name"}, {"api_name": "coinpy.lib.blocks.difficulty.compact_difficulty", "line_number": 53, "usage_type": "call"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_TIMESPAN", "line_number": 63, "usage_type": "argument"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_INTERVAL", "line_number": 61, "usage_type": "name"}, {"api_name": "coinpy.model.constants.bitcoin.PROOF_OF_WORK_LIMIT", "line_number": 65, "usage_type": "name"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_INTERVAL", "line_number": 70, "usage_type": "name"}, {"api_name": "coinpy.model.constants.bitcoin.TARGET_INTERVAL", "line_number": 78, "usage_type": "name"}, {"api_name": "coinpy.model.constants.bitcoin.PROOF_OF_WORK_LIMIT", "line_number": 79, "usage_type": "name"}, {"api_name": "coinpy.model.protocol.runmode.TESTNET", "line_number": 86, "usage_type": "name"}, {"api_name": "coinpy.model.protocol.runmode.TESTNET3", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "8654766852", "text": "from email import message\nimport turtle\nimport sys, time, os\n\nmessage = \"Hello Mother F####\"\n\ndef typewritter(message):\n for char in message:\n sys.stdout.write(char)\n sys.stdout.flush()\n\n if char !=\"\\n\":\n time.sleep(0.1)\n else:\n time.sleep(1)\n\nos.system(\"cls\")\ntypewritter(message)\n\nt = turtle.Turtle()\nwn = turtle.Screen()\nwn.bgcolor(\"black\")\nt.shape(\"turtle\")\nt.color(\"white\",\"white\")\nt.left(90)\nt.forward(40)\nt.right(90)\nt.circle(40,90)\nt.forward(80)\nt.circle(20,180)\nt.right(180)\nt.circle(20,180)\nt.right(180)\nt.forward(80)\nt.circle(20,180)\nt.forward(80)\nt.right(180)\nt.circle(20,180)\nt.forward(80)\nt.right(180)\nt.penup()\nt.forward(40)\nt.pendown()\nt.circle(20,180)\nt.left(50)\nt.forward(50)\nt.right(50)\nt.circle(40,90)\nt.right(90)\nt.forward(40)\nt.left(90)\nt.forward(80)\n\nturtle.done()", "repo_name": "RasinthaDilshanJayarathne/Fractal_Tree", "sub_path": "middle_finger.py", "file_name": "middle_finger.py", "file_ext": "py", "file_size_in_byte": 840, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "email.message", "line_number": 5, "usage_type": "name"}, {"api_name": "email.message", "line_number": 8, "usage_type": "name"}, {"api_name": "sys.stdout.write", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 10, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "os.system", "line_number": 17, "usage_type": "call"}, {"api_name": "email.message", "line_number": 18, "usage_type": "argument"}, {"api_name": "turtle.Turtle", "line_number": 20, "usage_type": "call"}, {"api_name": "turtle.Screen", "line_number": 21, "usage_type": "call"}, {"api_name": "turtle.done", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "33179230626", "text": "from collections import Counter\nfrom rich import print\nfrom unstructured.partition.html import partition_html\nfrom unstructured.cleaners.core import clean_ligatures, clean_non_ascii_chars\nimport re\n\nBASE_DIR = \"../data/html\"\nfile_name = \"inference-pipelines.html\"\nfile_path = f\"{BASE_DIR}/{file_name}\"\n\nelements = partition_html(\n filename=file_path, chunking_strategy=\"by_title\", skip_header_and_footers=True\n)\n\nprint(Counter(type(element) for element in elements))\n\nextracted_text = \"\\n\".join([element.text for element in elements])\n\n# Remove common text that appears at the beginning of a page\npattern1 = r\"^AWS.*?Developer Guide\"\nextracted_text = re.sub(pattern1, \"\", extracted_text, flags=re.DOTALL)\n# Remove junk text from end of page\npattern2 = r\"Javascript is disabled.*?make the documentation better\\.\"\nextracted_text = re.sub(pattern2, \"\", extracted_text, flags=re.DOTALL)\n\n# Clean text using cleaning bricks\nextracted_text = clean_ligatures(extracted_text)\nextracted_text = clean_non_ascii_chars(extracted_text)\n\n\n# Extracting link_urls from element's metadata\nextracted_urls = []\nfor element in elements:\n urls = element.metadata.link_urls\n if urls is not None:\n extracted_urls.extend(urls)\n\nprint(f\"Extracted: URLs:\\n{extracted_urls}\")\nprint(f\"Extracted Text: {extracted_text}\")\n", "repo_name": "praveenc/data-extraction", "sub_path": "unstructured/parse_html_partitions.py", "file_name": "parse_html_partitions.py", "file_ext": "py", "file_size_in_byte": 1308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unstructured.partition.html.partition_html", "line_number": 11, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 15, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 24, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "unstructured.cleaners.core.clean_ligatures", "line_number": 27, "usage_type": "call"}, {"api_name": "unstructured.cleaners.core.clean_non_ascii_chars", "line_number": 28, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 38, "usage_type": "call"}, {"api_name": "rich.print", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "38356991052", "text": "from os import sep\nfrom git import Git\nimport pandas as pd\nimport numpy as np \nimport streamlit as st\nfrom pycaret.classification import load_model, predict_model\nfrom pycaret import *\nimport PIL\nfrom PIL import Image\nfrom sklearn.metrics import *\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom pycaret.utils import check_metric\nfrom streamlit_lottie import st_lottie\nfrom streamlit_echarts import st_echarts\nimport requests\n# remove warnings do streamlit\nst.set_option('deprecation.showPyplotGlobalUse', False)\n\n\ndef load_lottieurl(url: str):\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.json()\nico = Image.open('ico.ico')\nst.set_page_config(\n page_title=\"SVM Team\",\n page_icon= ico, \n layout=\"wide\", #centered\",\n initial_sidebar_state='auto',\n menu_items=None)\npaginas = ['Home','Análise python','Análise de Churn BI', 'Data Science', \"Demonstação\",\"Filtro\", \"Predição de Churn\",\"Consulta Cliente\", \"Dashbord comparativo\"]\nsite = \"\"\nsite_pred = \"\"\n###### SIDE BAR ######\ncol1, col2, col3 = st.sidebar.columns([0.5, 1, 1])\nwith col2:\n image1 = Image.open('logo_size.jpg')\n st.image(image1, width=120)\n pagina = st.sidebar.selectbox(\"Navegação\", paginas)\n###### PAGINA INICIAL ######\nif pagina == 'Home':\n lottie_1 = load_lottieurl('https://assets2.lottiefiles.com/packages/lf20_3FIGvm.json')\n st_lottie(lottie_1, speed=0.3, height=150, key=\"initial\") \n col1,col2,col3 = st.columns([1,2,3])\n site = \"https://docs.google.com/presentation/d/e/2PACX-1vRuiyA_eVhRcdNymAubZKRNTewo3f2zpg1KZbqrMu2nBhkh7C_XBeBHyp74Efost0X0jsMKCxLULA1_/embed?start=false&loop=false&delayms=3000\"\n st.components.v1.iframe(site, width=960, height=569) \n###### PAGINA Análise python ######\nif pagina == 'Análise python':\n lottie_2 = load_lottieurl('https://assets9.lottiefiles.com/packages/lf20_m9zragkd.json')\n st_lottie(lottie_2, speed=0.5, height=150, key=\"initial\")\n st.subheader(\"Problema do Negócio\")\n HtmlFile = open(\"Analise_churn.html\", 'r', encoding='utf-8')\n source_code = HtmlFile.read() \n #print(source_code)\n st.components.v1.html(source_code,height = 27500) \n\n\n###### BI ######\nif pagina == 'Análise de Churn BI':\n st.subheader(\"Análise de Churn power BI\") \n col1,col2,col3 = st.columns([1,2,3])\n site = \"https://app.powerbi.com/view?r=eyJrIjoiZGVmMDc4N2ItMWYyYi00MDE3LWFkYzItOTc0YzQ4NThlNzZhIiwidCI6Ijg5ZmI0ZjFmLTg1NjctNDEyOC1iMWUzLWNhM2IyZTVhYmRmOCJ9\"\n st.components.v1.iframe(site, width=960, height=600, scrolling=True) \n \n###### PAGINA Data Science ######\nif pagina == 'Data Science':\n lottie_3 = load_lottieurl('https://assets9.lottiefiles.com/packages/lf20_q5qeoo3q.json')\n st_lottie(lottie_3, speed=0.5, height=150, key=\"initial\")\n st.subheader(\"Proposta de solução usando Machine Learning\")\n HtmlFile = open(\"DataScience.html\", 'r', encoding='utf-8')\n ds_code = HtmlFile.read() \n #print(source_code)\n st.components.v1.html(ds_code,height = 34000) \n\n###### Demonstação do modelo de machine learning ######\nif pagina == 'Demonstação':\n st.sidebar.write(\"\"\"Nesse exemplo: o cliente irá carregar a outra parte da base de dados, essa base deve estar no mesmo formato da primeira base.\n obs.: caso desejado poderá utilizar um arquivo de exemplo 'validação_base.csv' basta copiar esse link: [validação_base.csv](https://raw.githubusercontent.com/Jcnok/Stack_Labs_Churn/main/Data/valida%C3%A7%C3%A3o_base.csv) ao clicar em Browse files cole o caminho e clique em abrir.\n \n \"\"\")\n\n st.markdown(\"### Carregue a base de dados no formato .csv contendo o restante da base de dados dos Clientes\")\n st.markdown(\"---\")\n uploaded_file = st.file_uploader(\"escolha o arquivo *.csv\")\n if uploaded_file is not None:\n dados = pd.read_csv(uploaded_file)\n #dados = pd.read_csv(uploaded_file)\n st.write(dados.head()) # checar a saída no terminal\n \n if st.button('CLIQUE AQUI PARA EXECUTAR O MODELO'):\n modelo = load_model('./lgbm_tune_pycaret') \n pred = predict_model(modelo, data = dados) \n classe_churn = pred.query('Exited == 1')\n classe_label_churn = pred.query('Label == 1')['Label'].count()\n count_pred = (pred['Label'] == 1).sum()\n count_pred_Label = (classe_churn['Label']==1).sum()\n count_total = (classe_churn['Exited']).count()\n recal = check_metric(pred['Exited'], pred['Label'], metric='Recall')\n result = f'''\n Em um total de {classe_label_churn} chutes, o modelo foi capaz de identificar {count_pred_Label} clientes, dos {count_total} que realmente saíram por algum motivo. Dentro da lista, o modelo encontrou {round((recal * 100),2)}% de todos os clientes que deram churn. Lembrando que apesar de um bom resultado, essa é apenas uma demonstração. Podemos facilmente melhorar essa precisão.'''\n st.subheader(result)\n st.markdown(\"---\")\n st.markdown('### Caso desejado, você pode realizar o download do resultado no formato .csv clicando logo abaixo!')\n @st.cache\n def convert_df(df):\n # IMPORTANT: Cache the conversion to prevent computation on every rerun \n return df.to_csv().encode('utf-8') \n csv = convert_df(pred) \n st.download_button(\n label=\"Download do aquivo .CSV\",\n data=csv,\n file_name='predict.csv',\n mime='text/csv',\n )\n####### Filtro para o modelo #############\nif pagina == 'Filtro':\n dados = pd.read_csv('https://raw.githubusercontent.com/Jcnok/Stack_Labs_Churn/main/Data/valida%C3%A7%C3%A3o_base.csv')\n modelo = load_model('./lgbm_tune_pycaret') \n pred = predict_model(modelo, data = dados) \n lista_score = (pred.Score.unique()).round(2) \n lista_score = sorted(lista_score)\n minimo, maximo = st.sidebar.select_slider('Selecione o filtro desejado:',\n lista_score,value=[min(lista_score),max(lista_score)])\n lista = (pred.query(f'Score <= {maximo} and Score >= {minimo}'))\n st.write(lista.tail())\n \n # Função para o conjunto de validação.\n def test_score_report(data_unseen, predict_unseen):\n accuracy = accuracy_score(data_unseen[\"Exited\"], predict_unseen[\"Label\"])\n roc_auc = roc_auc_score(data_unseen[\"Exited\"], predict_unseen[\"Label\"])\n precision = precision_score(data_unseen[\"Exited\"], predict_unseen[\"Label\"])\n recall = recall_score(data_unseen[\"Exited\"], predict_unseen[\"Label\"])\n f1 = f1_score(data_unseen[\"Exited\"], predict_unseen[\"Label\"])\n shape = data_unseen.shape[0]\n\n df_unseen = pd.DataFrame({\n \"Acurácia\" : [accuracy],\n \"AUC\" : [roc_auc],\n \"Recall\" : [recall],\n \"Precisão\" : [precision],\n \"F1 Score\" : [f1],\n \"Tamanho do Conjunto\":[shape]\n })\n return df_unseen\n # Confusion Matrix\n def conf_mat(data_unseen, predict_unseen):\n unique_label = data_unseen[\"Exited\"].unique()\n cmtx = pd.DataFrame(\n confusion_matrix(data_unseen[\"Exited\"],\n predict_unseen[\"Label\"],\n labels=unique_label), \n index=['{:}'.format(x) for x in unique_label], \n columns=['{:}'.format(x) for x in unique_label]\n )\n ax = sns.set(rc={'figure.figsize':(4,2)})\n ax = sns.heatmap(cmtx, annot=True, fmt=\"d\", cmap=\"YlGnBu\")\n ax.set_ylabel('Predito')\n ax.set_xlabel('Real')\n ax.set_title(\"Matriz de Confusão do conjunto de Validação\", size=10)\n return st.pyplot() \n st.write(test_score_report(lista, lista)) \n conf_mat(lista, lista)\n @st.cache\n def convert_df(df): \n return df.to_csv(sep=';',decimal='.',index=False).encode('utf-8')\n\n list_class_1 = lista.query('Label == 1')\n st.markdown('### Análise estatística dos Clientes classificados como Churn.')\n st.write(list_class_1[['CreditScore','Age','Tenure','NumOfProducts','Balance','EstimatedSalary']].describe()) \n st.markdown('### Soma do Saldo e do Salarário dos clientes classificados como Churn.')\n st.write(list_class_1[['Balance','EstimatedSalary']].sum())\n st.markdown('### Opção para salvar o filtro da lista dos clientes!')\n csv = convert_df(list_class_1) \n st.download_button(\n label=\"Download do aquivo .CSV\",\n data=csv,\n file_name='churn_high.csv',\n mime='text/csv',\n )\n \n \n###### Modelo de predição ######\nif pagina == 'Predição de Churn':\n st.markdown('### Selecione as opções de acordo com os dados dos Clientes e execute o modelo!')\n st.sidebar.write(\"\"\"Aqui o cliente consegue selecionar os dados do perfil do cliente de forma individual.\n O modelo irá informar se esse perfil tem ou não uma tendência maior ao Churn.\n \"\"\")\n \n st.markdown('---') \n sexo = st.radio('Selecione o Sexo',['MASCULINO', 'FEMININO'])\n idade = np.int64(st.slider('Entre com a idade:', 18, 92, 38))\t\n pais = st.selectbox('Informe o País:',['França', 'Alemanha', 'Espanha'])\n qtd_produtos = st.selectbox('Quantidade de produtos:',[1,2,3,4])\n tenure = st.selectbox('Tempo de permanência:', [0,1,2,3,4,5,6,7,8,9,10])\n tem_cartao = st.radio('Possui cartão de Crédito:', ['Sim','Não'])\n membro_ativo = st.radio('É membro ativo:', ['Sim','Não'])\n score = np.int64(st.slider('Crédito Score:', 350,850,650))\n salario = np.int64(st.slider('Selecione o Salário estimado:', 10,200000,100000)) \n saldo = np.float64(st.slider('Selecione o Saldo em conta:',0,251000, 76500))\n \n st.markdown('---')\n \n dic = {'Gender': [sexo], 'Age': [idade], 'Geography': [pais],'NumOfProducts': [qtd_produtos],\n 'Tenure': [tenure], 'HasCrCard': [tem_cartao],'IsActiveMember':[membro_ativo],\n 'CreditScore':[score],'Balance':[saldo],'EstimatedSalary': [salario]} \n teste = pd.DataFrame(dic)\n\n teste[\"HasCrCard\"] = teste[\"HasCrCard\"].map({ 0 : 'Não', 1 : 'Sim'})\n teste[\"IsActiveMember\"] = teste[\"IsActiveMember\"].map({ 0 : 'Não', 1 : 'Sim'})\n teste[\"Gender\"] = teste[\"Gender\"].map({ 'Male' : 'MASCULINO', 'Female' : 'FEMININO'}) \n\n\n if st.button('CLIQUE AQUI PARA EXECUTAR O MODELO'):\n modelo = load_model('./lgbm_tune_pycaret') \n pred_test = predict_model(modelo, data = teste)\n #prob = list(pred_test.Score.round(2)*100)\n value = ((pred_test.Score.astype('float')[0])*100).round(2)\n \n if pred_test.Label.values == 1:\n ##função Js para Grafico de Gauge.\n color = [[0.25, '#ffa173'],[0.5, '#fa6644'],[0.75, '#f52c15'],[1, '#900000']]\n option = {\n \"series\": [\n {\n \"type\": 'gauge',\n \"startAngle\": 180,\n \"endAngle\": 0,\n \"min\": 50,\n \"max\": 100,\n \"splitNumber\": 8,\n \"axisLine\": {\n \"lineStyle\": {\n \"width\": 6,\n \"color\": color \n }\n },\n \"pointer\": {\n \"icon\": 'path://M12.8,0.7l12,40.1H0.7L12.8,0.7z',\n \"length\": \"12%\",\n \"width\": 20,\n \"offsetCenter\": [0, '-60%'],\n \"itemStyle\": {\n \"color\": 'auto'\n }\n },\n \"axisTick\": {\n \"length\": 12,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 2\n }\n },\n \"splitLine\": {\n \"length\": 20,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 5\n }\n },\n \"axisLabel\": {\n \"color\": '#464646',\n \"fontSize\": 15,\n \"distance\": -60\n \n },\n \"title\": {\n \"offsetCenter\": [0, '-20%'],\n \"fontSize\": 20\n },\n \"detail\": {\n \"fontSize\": 20,\n \"offsetCenter\": [0, '0%'],\n \"valueAnimation\": \"true\",\n \"formatter\":value, \n \"color\": 'auto'\n },\n \"data\": [\n {\n \"value\": value,\n \"name\": 'Churn Rating'\n }\n ]\n }\n ]\n }; \n st.markdown(f'### Probabilidade do cliente Cancelar o serviço: {value}%.') \n ##Plot Gauge\n st_echarts(options=option, width=\"100%\", key=value) \n \n else:\n ##função Js para Grafico de Gauge.\n color = [[0.25, '#7eab70'],[0.5, '#659259'],[0.75, '#4d7841'],[1, '#056003']]\n option = {\n \"series\": [\n {\n \"type\": 'gauge',\n \"startAngle\": 180,\n \"endAngle\": 0,\n \"min\": 50,\n \"max\": 100,\n \"splitNumber\": 8,\n \"axisLine\": {\n \"lineStyle\": {\n \"width\": 6,\n \"color\": color \n }\n },\n \"pointer\": {\n \"icon\": 'path://M12.8,0.7l12,40.1H0.7L12.8,0.7z',\n \"length\": \"12%\",\n \"width\": 20,\n \"offsetCenter\": [0, '-60%'],\n \"itemStyle\": {\n \"color\": 'auto'\n }\n },\n \"axisTick\": {\n \"length\": 12,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 2\n }\n },\n \"splitLine\": {\n \"length\": 20,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 5\n }\n },\n \"axisLabel\": {\n \"color\": '#464646',\n \"fontSize\": 15,\n \"distance\": -60\n \n },\n \"title\": {\n \"offsetCenter\": [0, '-20%'],\n \"fontSize\": 20\n },\n \"detail\": {\n \"fontSize\": 20,\n \"offsetCenter\": [0, '0%'],\n \"valueAnimation\": \"true\",\n \"formatter\":value, \n \"color\": 'auto'\n },\n \"data\": [\n {\n \"value\": value,\n \"name\": 'Churn Rating'\n }\n ]\n }\n ]\n }\n st.markdown(f'### Probabilidade do cliente permanecer com o serviço: {value}%.') \n ##Plot Gauge\n st_echarts(options=option, width=\"100%\", key=value)\n\n###### Consulta Cliente ########\nif pagina == \"Consulta Cliente\" :\n df = pd.read_csv(\"https://raw.githubusercontent.com/Jcnok/Stack_Labs_Churn/main/Data/Churn_Modelling.csv\")\n ids = df.CustomerId.unique() \n modelo = load_model('./lgbm_tune_pycaret')\n st.markdown(\"### Sugestão de Ids para consulta:\")\n st.write(df['CustomerId'].sample(5))\n id = st.number_input(\"Informe o ID do Cliente\",ids.min(),ids.max())\n if id in ids:\n filtro = df.query(f'CustomerId=={id}')\n st.dataframe(filtro)\n pred_filtro = predict_model(modelo,data=filtro)\n value = round((pred_filtro.Score.astype('float').to_list()[0])*100,2)\n if pred_filtro.Label.values == 1:\n ##função Js para Grafico de Gauge.\n color = [[0.25, '#ffa173'],[0.5, '#fa6644'],[0.75, '#f52c15'],[1, '#900000']]\n option = {\n \"series\": [\n {\n \"type\": 'gauge',\n \"startAngle\": 180,\n \"endAngle\": 0,\n \"min\": 50,\n \"max\": 100,\n \"splitNumber\": 8,\n \"axisLine\": {\n \"lineStyle\": {\n \"width\": 6,\n \"color\": color \n }\n },\n \"pointer\": {\n \"icon\": 'path://M12.8,0.7l12,40.1H0.7L12.8,0.7z',\n \"length\": \"12%\",\n \"width\": 20,\n \"offsetCenter\": [0, '-60%'],\n \"itemStyle\": {\n \"color\": 'auto'\n }\n },\n \"axisTick\": {\n \"length\": 12,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 2\n }\n },\n \"splitLine\": {\n \"length\": 20,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 5\n }\n },\n \"axisLabel\": {\n \"color\": '#464646',\n \"fontSize\": 15,\n \"distance\": -60\n \n },\n \"title\": {\n \"offsetCenter\": [0, '-20%'],\n \"fontSize\": 20\n },\n \"detail\": {\n \"fontSize\": 20,\n \"offsetCenter\": [0, '0%'],\n \"valueAnimation\": \"true\",\n \"formatter\":value, \n \"color\": 'auto'\n },\n \"data\": [\n {\n \"value\": value,\n \"name\": 'Churn Rating'\n }\n ]\n }\n ]\n }; \n st.markdown(f'### Probabilidade do cliente Cancelar o serviço: {value}%.') \n ##Plot Gauge\n st_echarts(options=option, width=\"100%\", key=value) \n else:\n ##função Js para Grafico de Gauge.\n color = [[0.25, '#7eab70'],[0.5, '#659259'],[0.75, '#4d7841'],[1, '#056003']]\n option = {\n \"series\": [\n {\n \"type\": 'gauge',\n \"startAngle\": 180,\n \"endAngle\": 0,\n \"min\": 50,\n \"max\": 100,\n \"splitNumber\": 8,\n \"axisLine\": {\n \"lineStyle\": {\n \"width\": 6,\n \"color\": color \n }\n },\n \"pointer\": {\n \"icon\": 'path://M12.8,0.7l12,40.1H0.7L12.8,0.7z',\n \"length\": \"12%\",\n \"width\": 20,\n \"offsetCenter\": [0, '-60%'],\n \"itemStyle\": {\n \"color\": 'auto'\n }\n },\n \"axisTick\": {\n \"length\": 12,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 2\n }\n },\n \"splitLine\": {\n \"length\": 20,\n \"lineStyle\": {\n \"color\": 'auto',\n \"width\": 5\n }\n },\n \"axisLabel\": {\n \"color\": '#464646',\n \"fontSize\": 15,\n \"distance\": -60\n \n },\n \"title\": {\n \"offsetCenter\": [0, '-20%'],\n \"fontSize\": 20\n },\n \"detail\": {\n \"fontSize\": 20,\n \"offsetCenter\": [0, '0%'],\n \"valueAnimation\": \"true\",\n \"formatter\":value, \n \"color\": 'auto'\n },\n \"data\": [\n {\n \"value\": value,\n \"name\": 'Churn Rating'\n }\n ]\n }\n ]\n }\n st.markdown(f'### Probabilidade do cliente permanecer com o serviço: {value}%.') \n ##Plot Gauge\n st_echarts(options=option, width=\"100%\", key=value)\n else:\n st.markdown(\"### Cliente inexistente, informe um id válido!\") \n\n###### Dashboard Compartivo ######\nif pagina == 'Dashbord comparativo': \n st.subheader(\"Dashboard compartivo entre o resultado real Vs resultado do modelo\") \n col1,col2,col3 = st.columns([1,2,3])\n site = \"https://app.powerbi.com/view?r=eyJrIjoiZDBlNTgzMzktOGJkYy00NTkxLWExMDYtODJmOTU1MmVjODE5IiwidCI6Ijg5ZmI0ZjFmLTg1NjctNDEyOC1iMWUzLWNhM2IyZTVhYmRmOCJ9\"\n st.components.v1.iframe(site, width=960, height=600, scrolling=True)\n\n st.sidebar.write(\"\"\"O Dashbord é interativo, posicione o mouse sobre os gráficos para obter o comparativo de acertos. \n \"\"\")\n \n \n\n \n", "repo_name": "Jcnok/app_svm", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 24586, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.set_option", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 26, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 26, "usage_type": "name"}, {"api_name": "streamlit.set_page_config", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.sidebar.columns", "line_number": 37, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 37, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 41, "usage_type": "attribute"}, {"api_name": "streamlit_lottie.st_lottie", "line_number": 45, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 46, "usage_type": "call"}, {"api_name": "streamlit.components.v1.iframe", "line_number": 48, "usage_type": "call"}, {"api_name": "streamlit.components", "line_number": 48, "usage_type": "attribute"}, {"api_name": "streamlit_lottie.st_lottie", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 53, "usage_type": "call"}, {"api_name": "streamlit.components.v1.html", "line_number": 57, "usage_type": "call"}, {"api_name": "streamlit.components", "line_number": 57, "usage_type": "attribute"}, {"api_name": "streamlit.subheader", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.components.v1.iframe", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.components", "line_number": 65, "usage_type": "attribute"}, {"api_name": "streamlit_lottie.st_lottie", "line_number": 70, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 71, "usage_type": "call"}, {"api_name": "streamlit.components.v1.html", "line_number": 75, "usage_type": "call"}, {"api_name": "streamlit.components", "line_number": 75, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 79, "usage_type": "attribute"}, {"api_name": "streamlit.markdown", "line_number": 84, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 88, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 90, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 92, "usage_type": "call"}, {"api_name": "pycaret.classification.load_model", "line_number": 93, "usage_type": "call"}, {"api_name": "pycaret.classification.predict_model", "line_number": 94, "usage_type": "call"}, {"api_name": "pycaret.utils.check_metric", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 103, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 105, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 106, "usage_type": "attribute"}, {"api_name": "streamlit.download_button", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 119, "usage_type": "call"}, {"api_name": "pycaret.classification.load_model", "line_number": 120, "usage_type": "call"}, {"api_name": "pycaret.classification.predict_model", "line_number": 121, "usage_type": "call"}, {"api_name": "streamlit.sidebar.select_slider", "line_number": 124, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 124, "usage_type": "attribute"}, {"api_name": "streamlit.write", "line_number": 127, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 150, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 157, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 158, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 162, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 163, "usage_type": "call"}, {"api_name": "streamlit.cache", "line_number": 165, "usage_type": "attribute"}, {"api_name": "streamlit.markdown", "line_number": 170, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 171, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 172, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 173, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 174, "usage_type": "call"}, {"api_name": "streamlit.download_button", "line_number": 176, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 186, "usage_type": "call"}, {"api_name": "streamlit.sidebar.write", "line_number": 187, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 187, "usage_type": "attribute"}, {"api_name": "streamlit.markdown", "line_number": 191, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 193, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 193, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 194, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 195, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 196, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 197, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 199, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 200, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 201, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 201, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 208, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 215, "usage_type": "call"}, {"api_name": "pycaret.classification.load_model", "line_number": 216, "usage_type": "call"}, {"api_name": "pycaret.classification.predict_model", "line_number": 217, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 288, "usage_type": "call"}, {"api_name": "streamlit_echarts.st_echarts", "line_number": 290, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 359, "usage_type": "call"}, {"api_name": "streamlit_echarts.st_echarts", "line_number": 361, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 365, "usage_type": "call"}, {"api_name": "pycaret.classification.load_model", "line_number": 367, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 368, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 369, "usage_type": "call"}, {"api_name": "streamlit.number_input", "line_number": 370, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 373, "usage_type": "call"}, {"api_name": "pycaret.classification.predict_model", "line_number": 374, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 443, "usage_type": "call"}, {"api_name": "streamlit_echarts.st_echarts", "line_number": 445, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 513, "usage_type": "call"}, {"api_name": "streamlit_echarts.st_echarts", "line_number": 515, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 517, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 521, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 522, "usage_type": "call"}, {"api_name": "streamlit.components.v1.iframe", "line_number": 524, "usage_type": "call"}, {"api_name": "streamlit.components", "line_number": 524, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.write", "line_number": 526, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 526, "usage_type": "attribute"}]} +{"seq_id": "30104353408", "text": "from Parser import MONTHS\nfrom math import ceil\nfrom termcolor import colored\n\nfrom Calculations import Calculations\n\n\nclass Reports:\n def __init__(self):\n self.calculations = Calculations()\n\n def annual_report(self, year):\n min_temp, max_temp, max_humidity = self.calculations.annual_calculation(year)\n print( \"Highest: \", max_temp[\"max_temperature\"], \"C on \", MONTHS[max_temp[\"max_temp_month\"] - 1], max_temp[\"max_temp_day\"] )\n print( \"Lowest: \", min_temp[\"min_temperature\"], \"C on \", MONTHS[min_temp[\"min_temp_month\"] - 1], min_temp[\"min_temp_day\"] )\n print( \"Humidity: \", max_humidity[\"max_humidity\"], \"% on \", MONTHS[max_humidity[\"max_humidity_month\"] - 1], max_humidity[\"max_humidity_day\"] )\n\n return True\n\n def monthly_report(self, year, month):\n min_temps, max_temps, mean_humidities = self.calculations.monthly_calculation(year, month)\n print(\"Highest Average: \", ceil(max_temps[\"avg_temp_max\"]), \"C\")\n print(\"Lowest Average: \", ceil(min_temps[\"avg_temp_min\"]), \"C\")\n print( \"Average Mean Humidity: \", ceil(mean_humidities[\"avg_humidity_mean\"]), \"%\")\n\n return True\n\n def bar_chart_report(self, year, month):\n min_temperature, max_temperature = self.calculations.bar_chart_calculation(year, month)\n for val in range(min_temperature):\n text = colored(\"-\", \"red\", attrs=[\"reverse\", \"blink\"])\n print(text, end=\"\")\n print(min_temperature, \"C\")\n\n for val in range(max_temperature):\n text = colored(\"+\", \"blue\", attrs=[\"reverse\", \"blink\"])\n print(text, end=\"\")\n print(max_temperature, \"C\")\n\n return\n", "repo_name": "mosafdarr/my_repo", "sub_path": "Reports.py", "file_name": "Reports.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Calculations.Calculations", "line_number": 10, "usage_type": "call"}, {"api_name": "Parser.MONTHS", "line_number": 14, "usage_type": "name"}, {"api_name": "Parser.MONTHS", "line_number": 15, "usage_type": "name"}, {"api_name": "Parser.MONTHS", "line_number": 16, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 22, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 23, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 24, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 31, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "41468489748", "text": "import sys\nimport webbrowser\nimport pytest\n\nfrom PySide6 import QtCore\nfrom PySide6 import QtGui, QtWidgets\n\n# SV imports\nfrom sasdata.dataloader.loader import Loader\nfrom sas.qtgui.MainWindow.DataManager import DataManager\nfrom sas.qtgui.Plotting.PlotterData import Data1D\nfrom sas.qtgui.Plotting.PlotterData import Data2D\n\n# Tested module\nfrom sas.qtgui.Utilities.GuiUtils import *\n\n\nclass GuiUtilsTest:\n '''Test the GUI Utilities methods'''\n\n def testDefaults(self):\n \"\"\"\n Test all the global constants defined in the file.\n \"\"\"\n # Should probably test the constants in the file,\n # but this will done after trimming down GuiUtils\n # and retaining only necessary variables.\n pass\n\n def testGetAppDir(self):\n \"\"\"\n \"\"\"\n pass\n\n\n def testCommunicate(self, qapp):\n \"\"\"\n Test the container class with signal definitions\n \"\"\"\n com = Communicate()\n\n # All defined signals\n list_of_signals = [\n 'fileReadSignal',\n 'fileDataReceivedSignal',\n 'statusBarUpdateSignal',\n 'updatePerspectiveWithDataSignal',\n 'updateModelFromPerspectiveSignal',\n 'plotRequestedSignal',\n 'progressBarUpdateSignal',\n 'activeGraphName',\n 'sendDataToPanelSignal',\n 'updateModelFromDataOperationPanelSignal'\n ]\n\n # Assure all signals are defined.\n for signal in list_of_signals:\n assert signal in dir(com)\n\n def testupdateModelItem(self, qapp):\n \"\"\"\n Test the generic QModelItem update method\n \"\"\"\n test_item = QtGui.QStandardItem()\n test_list = ['aa', 4, True, ]\n name = \"Black Sabbath\"\n\n # update the item\n updateModelItem(test_item, test_list, name)\n\n # Make sure test_item got all data added\n assert test_item.child(0).text() == name\n list_from_item = test_item.child(0).data()\n assert isinstance(list_from_item, list)\n assert list_from_item[0] == test_list[0]\n assert list_from_item[1] == test_list[1]\n assert list_from_item[2] == test_list[2]\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testupdateModelItemWithPlot(self, qapp):\n \"\"\"\n Test the QModelItem checkbox update method\n \"\"\"\n # test_item = QtGui.QStandardItem()\n # test_list = ['aa','11']\n # update_data = test_list\n # name = \"Black Sabbath\"\n\n # # update the item\n # updateModelItemWithPlot(test_item, update_data, name)\n\n test_item = QtGui.QStandardItem()\n update_data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0])\n name = \"Black Sabbath\"\n update_data.id = '[0]data0'\n update_data.name = 'data0'\n # update the item\n updateModelItemWithPlot(test_item, update_data, name)\n\n # Make sure test_item got all data added\n assert test_item.child(0).text() == name\n assert test_item.child(0).isCheckable()\n data_from_item = test_item.child(0).child(0).data()\n assert isinstance(data_from_item, Data1D)\n assert list(data_from_item.x) == [1.0, 2.0, 3.0]\n assert list(data_from_item.y) == [10.0, 11.0, 12.0]\n assert test_item.rowCount() == 1\n\n # add another dataset (different from the first one)\n update_data1 = Data1D(x=[1.1, 2.1, 3.1], y=[10.1, 11.1, 12.1])\n update_data1.id = '[0]data1'\n update_data1.name = 'data1'\n name1 = \"Black Sabbath1\"\n # update the item and check number of rows\n updateModelItemWithPlot(test_item, update_data1, name1)\n\n assert test_item.rowCount() == 2\n\n # add another dataset (with the same name as the first one)\n # check that number of rows was not changed but data have been updated\n update_data2 = Data1D(x=[4.0, 5.0, 6.0], y=[13.0, 14.0, 15.0])\n update_data2.id = '[1]data0'\n update_data2.name = 'data0'\n name2 = \"Black Sabbath2\"\n updateModelItemWithPlot(test_item, update_data2, name2)\n assert test_item.rowCount() == 2\n\n data_from_item = test_item.child(0).child(0).data()\n assert list(data_from_item.x) == [4.0, 5.0, 6.0]\n assert list(data_from_item.y) == [13.0, 14.0, 15.0]\n\n\n def testPlotsFromCheckedItems(self, qapp):\n \"\"\"\n Test addition of a plottable to the model\n \"\"\"\n\n # Mockup data\n test_list0 = \"FRIDAY\"\n test_list1 = \"SATURDAY\"\n test_list2 = \"MONDAY\"\n\n # Main item (\"file\")\n checkbox_model = QtGui.QStandardItemModel()\n checkbox_item = QtGui.QStandardItem(True)\n checkbox_item.setCheckable(True)\n checkbox_item.setCheckState(QtCore.Qt.Checked)\n test_item0 = QtGui.QStandardItem()\n test_item0.setData(test_list0)\n\n # Checked item 1\n test_item1 = QtGui.QStandardItem(True)\n test_item1.setCheckable(True)\n test_item1.setCheckState(QtCore.Qt.Checked)\n object_item = QtGui.QStandardItem()\n object_item.setData(test_list1)\n test_item1.setChild(0, object_item)\n\n checkbox_item.setChild(0, test_item0)\n checkbox_item.appendRow(test_item1)\n\n # Unchecked item 2\n test_item2 = QtGui.QStandardItem(True)\n test_item2.setCheckable(True)\n test_item2.setCheckState(QtCore.Qt.Unchecked)\n object_item = QtGui.QStandardItem()\n object_item.setData(test_list2)\n test_item2.setChild(0, object_item)\n checkbox_item.appendRow(test_item2)\n\n checkbox_model.appendRow(checkbox_item)\n\n # Pull out the \"plottable\" documents\n plot_list = plotsFromCheckedItems(checkbox_model)\n\n # Make sure only the checked data is present\n # FRIDAY IN\n assert test_list0 in plot_list[0]\n # SATURDAY IN\n assert test_list1 in plot_list[1]\n # MONDAY NOT IN\n assert test_list2 not in plot_list[0]\n assert test_list2 not in plot_list[1]\n\n @pytest.mark.xfail(reason=\"2022-09 already broken - input file issue\")\n def testInfoFromData(self, qapp):\n \"\"\"\n Test Info element extraction from a plottable object\n \"\"\"\n loader = Loader()\n manager = DataManager()\n\n # get Data1D\n p_file=\"cyl_400_20.txt\"\n output_object = loader.load(p_file)\n new_data = manager.create_gui_data(output_object[0], p_file)\n\n # Extract Info elements into a model item\n item = infoFromData(new_data)\n\n # Test the item and its children\n assert isinstance(item, QtGui.QStandardItem)\n assert item.rowCount() == 5\n assert item.text() == \"Info\"\n assert p_file in item.child(0).text()\n assert \"Run\" in item.child(1).text()\n assert \"Data1D\" in item.child(2).text()\n assert p_file in item.child(3).text()\n assert \"Process\" in item.child(4).text()\n\n def testOpenLink(self, mocker):\n \"\"\"\n Opening a link in the external browser\n \"\"\"\n good_url1 = r\"http://test.test.com\"\n good_url2 = r\"mailto:test@mail.com\"\n good_url3 = r\"https://127.0.0.1\"\n\n bad_url1 = \"\"\n bad_url2 = QtGui.QStandardItem()\n bad_url3 = r\"poop;//**I.am.a.!bad@url\"\n\n mocker.patch.object(webbrowser, 'open')\n openLink(good_url1)\n openLink(good_url2)\n openLink(good_url3)\n assert webbrowser.open.call_count == 3\n\n with pytest.raises(AttributeError):\n openLink(bad_url1)\n with pytest.raises(AttributeError):\n openLink(bad_url2)\n with pytest.raises(AttributeError):\n openLink(bad_url3)\n\n def testRetrieveData1d(self):\n \"\"\"\n \"\"\"\n with pytest.raises(AttributeError):\n retrieveData1d(\"BOOP\")\n\n #data = Data1D()\n #with pytest.raises(ValueError):\n # retrieveData1d(data)\n\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0])\n\n text = retrieveData1d(data)\n\n assert \"Temperature:\" in text\n assert \"Beam_size:\" in text\n assert \"X_min = 1.0: X_max = 3.0\" in text\n assert \"3.0 \\t12.0 \\t0.0 \\t0.0\" in text\n\n def testRetrieveData2d(self):\n \"\"\"\n \"\"\"\n with pytest.raises(AttributeError):\n retrieveData2d(\"BOOP\")\n data = Data2D(image=[1.0, 2.0, 3.0],\n err_image=[0.01, 0.02, 0.03],\n qx_data=[0.1, 0.2, 0.3],\n qy_data=[0.1, 0.2, 0.3])\n\n text = retrieveData2d(data)\n\n assert \"Type: Data2D\" in text\n assert \"I_min = 1.0\" in text\n assert \"I_max = 3.0\" in text\n assert \"2 \\t0.3 \\t0.3 \\t3.0 \\t0.03 \\t0.0 \\t0.0\" in text\n\n def testOnTXTSave(self):\n \"\"\"\n Test the file writer for saving 1d/2d data\n \"\"\"\n path = \"test123\"\n save_path = path + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n if os.path.isfile(save_path):\n os.remove(save_path)\n\n # Broken data\n data = Data1D(x=[1.0, 2.0, 3.0], y=[])\n # Expect a raise\n with pytest.raises(IndexError):\n onTXTSave(data, path)\n\n # Good data - no dX/dY\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0])\n onTXTSave(data, path)\n\n assert os.path.isfile(save_path)\n with open(save_path,'r') as out:\n data_read = out.read()\n expected = \\\n \" \\n\"+\\\n \"1.000000000000000e+00 1.000000000000000e+01\\n\" +\\\n \"2.000000000000000e+00 1.100000000000000e+01\\n\" +\\\n \"3.000000000000000e+00 1.200000000000000e+01\\n\"\n\n assert expected == data_read\n\n if os.path.isfile(save_path):\n os.remove(save_path)\n\n # Good data - with dX/dY\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0],\n dx=[0.1, 0.2, 0.3], dy=[0.1, 0.2, 0.3])\n\n onTXTSave(data, path)\n with open(save_path,'r') as out:\n data_read = out.read()\n assert \" \\n\" in data_read\n assert \"1.000000000000000e+00 1.000000000000000e+01 1.000000000000000e-01 1.000000000000000e-01\\n\" in data_read\n assert \"2.000000000000000e+00 1.100000000000000e+01 2.000000000000000e-01 2.000000000000000e-01\\n\" in data_read\n assert \"3.000000000000000e+00 1.200000000000000e+01 3.000000000000000e-01 3.000000000000000e-01\\n\" in data_read\n\n if os.path.isfile(save_path):\n os.remove(save_path)\n\n def testSaveAnyData(self, qapp, caplog, mocker):\n \"\"\"\n Test the generic GUIUtils.saveAnyData method\n \"\"\"\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0],\n dx=[0.1, 0.2, 0.3], dy=[0.1, 0.2, 0.3])\n\n # Test the .txt format\n file_name = \"test123_out\"\n file_name_save = \"test123_out.txt\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.txt\"\n self.genericFileSaveTest(data, file_name, file_name_save, \"ASCII\", caplog=caplog)\n\n data = Data2D(image=[1.0, 2.0, 3.0],\n err_image=[0.01, 0.02, 0.03],\n qx_data=[0.1, 0.2, 0.3],\n qy_data=[0.1, 0.2, 0.3])\n\n # Test the .txt format\n file_name = \"test123_out\"\n file_name_save = \"test123_out.dat\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.dat\"\n self.genericFileSaveTest(data, file_name, file_name_save, \"IGOR\", caplog=caplog)\n\n def testSaveData1D(self, qapp, caplog, mocker):\n \"\"\"\n Test the 1D file save method\n \"\"\"\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0],\n dx=[0.1, 0.2, 0.3], dy=[0.1, 0.2, 0.3])\n\n # Test the .txt format\n file_name = \"test123_out.txt\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.txt\"\n self.genericFileSaveTest(data, file_name)\n\n # Test the .xml format\n file_name = \"test123_out.xml\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.xml\"\n self.genericFileSaveTest(data, file_name)\n\n # Test the wrong format\n file_name = \"test123_out.mp3\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.mp3\"\n self.genericFileSaveTest(data, file_name, file_name, \"ASCII\", \"1D\", caplog=caplog)\n\n def testSaveData2D(self, qapp, caplog, mocker):\n \"\"\"\n Test the 1D file save method\n \"\"\"\n data = Data2D(image=[1.0, 2.0, 3.0],\n err_image=[0.01, 0.02, 0.03],\n qx_data=[0.1, 0.2, 0.3],\n qy_data=[0.1, 0.2, 0.3])\n\n # Test the .txt format\n file_name = \"test123_out.dat\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.dat\"\n self.genericFileSaveTest(data, file_name)\n\n # Test the wrong format\n file_name = \"test123_out.mp3\"\n mocker.patch.object(QtWidgets.QFileDialog, 'getSaveFileName', return_value=(file_name,''))\n data.filename = \"test123.mp3\"\n self.genericFileSaveTest(data, file_name, file_name, \"IGOR\", \"2D\", caplog=caplog)\n\n def genericFileSaveTest(self, data, name, name_full=\"\", file_format=\"ASCII\", level=None, caplog=False):\n if level == '1D':\n saveMethod = saveData1D\n elif level == \"2D\":\n saveMethod = saveData2D\n else:\n saveMethod = saveAnyData\n\n name_full = name if name_full == \"\" else name_full\n\n if caplog:\n with caplog.at_level(logging.WARNING):\n saveMethod(data)\n #assert len(cm.output) == 1\n assert (f\"Unknown file type specified when saving {name}.\"\n + f\" Saving in {file_format} format.\") in caplog.text\n else:\n saveMethod(data)\n assert os.path.isfile(name_full)\n os.remove(name_full)\n assert not os.path.isfile(name_full)\n\n def testXYTransform(self, qapp):\n \"\"\" Assure the unit/legend transformation is correct\"\"\"\n data = Data1D(x=[1.0, 2.0, 3.0], y=[10.0, 11.0, 12.0],\n dx=[0.1, 0.2, 0.3], dy=[0.1, 0.2, 0.3])\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"y\")\n assert xLabel == \"()\"\n assert xscale == \"linear\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x^(2)\", yLabel=\"1/y\")\n assert xLabel == \"^{2}(()^{2})\"\n assert yLabel == \"1/(()^{-1})\"\n assert xscale == \"linear\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x^(4)\", yLabel=\"ln(y)\")\n assert xLabel == \"^{4}(()^{4})\"\n assert yLabel == \"\\\\ln{()}()\"\n assert xscale == \"linear\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"ln(x)\", yLabel=\"y^(2)\")\n assert xLabel == \"\\\\ln{()}()\"\n assert yLabel == \"^{2}(()^{2})\"\n assert xscale == \"linear\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"log10(x)\", yLabel=\"y*x^(2)\")\n assert xLabel == \"()\"\n assert yLabel == \" \\\\ \\\\ ^{2}(()^{2})\"\n assert xscale == \"log\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"log10(x^(4))\", yLabel=\"y*x^(4)\")\n assert xLabel == \"^{4}(()^{4})\"\n assert yLabel == \" \\\\ \\\\ ^{4}(()^{16})\"\n assert xscale == \"log\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"1/sqrt(y)\")\n assert yLabel == \"1/\\\\sqrt{}(()^{-0.5})\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"log10(y)\")\n assert yLabel == \"()\"\n assert yscale == \"log\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"ln(y*x)\")\n assert yLabel == \"\\\\ln{( \\\\ \\\\ )}()\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"ln(y*x^(2))\")\n assert yLabel == \"\\\\ln ( \\\\ \\\\ ^{2})(()^{2})\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"ln(y*x^(4))\")\n assert yLabel == \"\\\\ln ( \\\\ \\\\ ^{4})(()^{4})\"\n assert yscale == \"linear\"\n\n xLabel, yLabel, xscale, yscale = xyTransform(data, xLabel=\"x\", yLabel=\"log10(y*x^(4))\")\n assert yLabel == \" \\\\ \\\\ ^{4}(()^{4})\"\n assert yscale == \"log\"\n\n def testReplaceHTMLwithUTF8(self):\n ''' test single character replacement '''\n s = None\n with pytest.raises(AttributeError):\n result = replaceHTMLwithUTF8(s)\n\n s = \"\"\n assert replaceHTMLwithUTF8(s) == s\n\n s = \"aaaa\"\n assert replaceHTMLwithUTF8(s) == s\n\n s = \"Å ∞ ±\"\n assert replaceHTMLwithUTF8(s) == \"Å ∞ ±\"\n\n def testReplaceHTMLwithASCII(self):\n ''' test single character replacement'''\n s = None\n with pytest.raises(AttributeError):\n result = replaceHTMLwithASCII(s)\n\n s = \"\"\n assert replaceHTMLwithASCII(s) == s\n\n s = \"aaaa\"\n assert replaceHTMLwithASCII(s) == s\n\n s = \"Å ∞ ±\"\n assert replaceHTMLwithASCII(s) == \"Ang inf +/-\"\n\n def testrstToHtml(self):\n ''' test rst to html conversion'''\n s = None\n with pytest.raises(TypeError):\n result = rstToHtml(s)\n\n s = \".. |Ang| unicode:: U+212B\"\n assert rstToHtml(s) == ('Ang', 'Å')\n s = \".. |Ang^-1| replace:: |Ang|\\ :sup:`-1`\"\n assert rstToHtml(s) == ('Ang^-1', 'Å-1')\n s = \".. |1e-6Ang^-2| replace:: 10\\ :sup:`-6`\\ |Ang|\\ :sup:`-2`\"\n assert rstToHtml(s) == ('1e-6Ang^-2', '10-6 Å-2')\n s = \".. |cm^-1| replace:: cm\\ :sup:`-1`\"\n assert rstToHtml(s) == ('cm^-1', 'cm-1')\n s = \".. |deg| unicode:: U+00B0\"\n assert rstToHtml(s) == ('deg', '°')\n s = \".. |cdot| unicode:: U+00B7\"\n assert rstToHtml(s) == ('cdot', '·')\n s = \"bad string\"\n assert rstToHtml(s) == (None, None)\n\n\n def testConvertUnitToHTML(self):\n ''' test unit string replacement'''\n s = None\n assert convertUnitToHTML(s) is \"\"\n\n s = \"\"\n assert convertUnitToHTML(s) == s\n\n s = \"aaaa\"\n assert convertUnitToHTML(s) == s\n\n s = \"1/A\"\n assert convertUnitToHTML(s) == \"Å-1\"\n\n s = \"Ang\"\n assert convertUnitToHTML(s) == \"Å\"\n\n s = \"1e-6/Ang^2\"\n assert convertUnitToHTML(s) == \"10-62\"\n\n s = \"inf\"\n assert convertUnitToHTML(s) == \"∞\"\n s = \"-inf\"\n\n assert convertUnitToHTML(s) == \"-∞\"\n\n s = \"1/cm\"\n assert convertUnitToHTML(s) == \"cm-1\"\n\n s = \"degrees\"\n assert convertUnitToHTML(s) == \"°\"\n\n def testParseName(self):\n '''test parse out a string from the beinning of a string'''\n # good input\n value = \"_test\"\n assert parseName(value, '_') == 'test'\n value = \"____test____\"\n assert parseName(value, '_') == '___test____'\n assert parseName(value, '___') == '_test____'\n assert parseName(value, 'test') == '____test____'\n # bad input\n with pytest.raises(TypeError):\n parseName(value, None)\n with pytest.raises(TypeError):\n parseName(None, '_')\n value = []\n with pytest.raises(TypeError):\n parseName(value, '_')\n value = 1.44\n with pytest.raises(TypeError):\n parseName(value, 'p')\n value = 100\n with pytest.raises(TypeError):\n parseName(value, 'p')\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testToDouble(self):\n '''test homemade string-> double converter'''\n #good values\n value = \"1\"\n assert toDouble(value) == 1.0\n value = \"1.2\"\n # has to be AlmostEqual due to numerical rounding\n assert toDouble(value) == pytest.approx(1.2, abs=1e-6)\n value = \"2,1\"\n assert toDouble(value) == pytest.approx(2.1, abs=1e-6)\n\n # bad values\n value = None\n with pytest.raises(TypeError):\n toDouble(value)\n value = \"MyDouble\"\n with pytest.raises(TypeError):\n toDouble(value)\n value = [1,2.2]\n with pytest.raises(TypeError):\n toDouble(value)\n\n\nclass DoubleValidatorTest:\n \"\"\" Test the validator for floats \"\"\"\n @pytest.fixture(autouse=True)\n def validator(self, qapp):\n '''Create/Destroy the validator'''\n v = DoubleValidator()\n yield v\n\n def testValidateGood(self, validator):\n \"\"\"Test a valid float \"\"\"\n QtCore.QLocale.setDefault(QtCore.QLocale('en_US'))\n float_good = \"170\"\n assert validator.validate(float_good, 1)[0] == QtGui.QValidator.Acceptable\n float_good = \"170.11\"\n ## investigate: a double returns Invalid here!\n ##assert self.validator.validate(float_good, 1)[0] == QtGui.QValidator.Acceptable\n float_good = \"17e2\"\n assert validator.validate(float_good, 1)[0] == QtGui.QValidator.Acceptable\n\n def testValidateBad(self, validator):\n \"\"\"Test a bad float \"\"\"\n float_bad = None\n assert validator.validate(float_bad, 1)[0] == QtGui.QValidator.Intermediate\n float_bad = [1]\n with pytest.raises(TypeError):\n validator.validate(float_bad, 1)\n float_bad = \"1,3\"\n assert validator.validate(float_bad, 1)[0] == QtGui.QValidator.Invalid\n\n def notestFixup(self, validator):\n \"\"\"Fixup of a float\"\"\"\n float_to_fixup = \"1,3\"\n validator.fixup(float_to_fixup)\n assert float_to_fixup == \"13\"\n\n\nclass FormulaValidatorTest:\n \"\"\" Test the formula validator \"\"\"\n @pytest.fixture(autouse=True)\n def validator(self, qapp):\n '''Create/Destroy the validator'''\n v = FormulaValidator()\n yield v\n\n def testValidateGood(self, validator):\n \"\"\"Test a valid Formula \"\"\"\n formula_good = \"H24O12C4C6N2Pu\"\n assert validator.validate(formula_good, 1)[0] == QtGui.QValidator.Acceptable\n\n formula_good = \"(H2O)0.5(D2O)0.5\"\n assert validator.validate(formula_good, 1)[0] == QtGui.QValidator.Acceptable\n\n @pytest.mark.xfail(reason=\"2022-09 already broken\")\n def testValidateBad(self, validator):\n \"\"\"Test an invalid Formula \"\"\"\n formula_bad = \"H24 %%%O12C4C6N2Pu\"\n pytest.raises(validator.validate(formula_bad, 1)[0])\n assert validator.validate(formula_bad, 1)[0] == QtGui.QValidator.Intermediate\n\n formula_bad = [1]\n assert self.validator.validate(formula_bad, 1)[0] == QtGui.QValidator.Intermediate\n\nclass HashableStandardItemTest:\n \"\"\" Test the reimplementation of QStandardItem \"\"\"\n @pytest.fixture(autouse=True)\n def item(self, qapp):\n '''Create/Destroy the HashableStandardItem'''\n i = HashableStandardItem()\n yield i\n\n def testHash(self, item):\n '''assure the item returns hash'''\n assert item.__hash__() == 0\n\n def testIndexing(self, item):\n '''test that we can use HashableSI as an index'''\n dictionary = {}\n dictionary[item] = \"wow!\"\n assert dictionary[item] == \"wow!\"\n\n def testClone(self, item):\n '''let's see if we can clone the item'''\n item_clone = item.clone()\n assert item_clone.__hash__() == 0\n\n def testGetConstraints(self):\n '''test the method that reads constraints from a project and returns\n a dict with the constraints'''\n # create a project dict with constraints\n constraint1 = ['scale', 'scale', 'M2.scale', True, 'M2.scale']\n constraint2 = ['scale', 'scale', 'M1.scale', True, 'M1.scale']\n fit_params1 = {'tab_name': ['M1'], 'scale': [True, '1.0', None,\n '0.0', 'inf',\n constraint1], 'foo': 'bar'}\n fit_params2 = {'tab_name': ['M2'], 'scale': [True, '1.0', None,\n '0.0', 'inf',\n constraint2], 'foo': 'bar'}\n fit_page1 = {'fit_data': None, 'fit_params': [fit_params1]}\n fit_page2 = {'fit_data': None, 'fit_params': [fit_params2]}\n fit_project = {'dataset1': fit_page1, 'dataset2': fit_page2}\n # get the constraint_dict\n constraint_dict = getConstraints(fit_project)\n # we have two constraints on different fit pages\n assert len(constraint_dict) == 2\n # we have one constraint per fit page\n assert len(constraint_dict['M1']) == 1\n assert len(constraint_dict['M2']) == 1\n # check the constraints in the constraint_dict\n assert constraint_dict['M1'][0] == constraint1\n assert constraint_dict['M2'][0] == constraint2\n", "repo_name": "SasView/sasview", "sub_path": "src/sas/qtgui/Utilities/UnitTesting/GuiUtilsTest.py", "file_name": "GuiUtilsTest.py", "file_ext": "py", "file_size_in_byte": 25676, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 44, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PySide6.QtGui.QStandardItem", "line_number": 64, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 64, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 92, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 92, "usage_type": "name"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 93, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 104, "usage_type": "argument"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 110, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 121, "usage_type": "call"}, {"api_name": "pytest.mark.xfail", "line_number": 79, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 79, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui.QStandardItemModel", "line_number": 144, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 144, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 145, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 145, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 147, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 147, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 148, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 148, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 152, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 152, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 154, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 154, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 155, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 155, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 163, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 163, "usage_type": "name"}, {"api_name": "PySide6.QtCore.Qt", "line_number": 165, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 165, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 166, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 166, "usage_type": "name"}, {"api_name": "sasdata.dataloader.loader.Loader", "line_number": 190, "usage_type": "call"}, {"api_name": "sas.qtgui.MainWindow.DataManager.DataManager", "line_number": 191, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 202, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 202, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 185, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 185, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui.QStandardItem", "line_number": 220, "usage_type": "call"}, {"api_name": "PySide6.QtGui", "line_number": 220, "usage_type": "name"}, {"api_name": "webbrowser.open", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 229, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 231, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 233, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 239, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 246, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 258, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data2D", "line_number": 260, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 284, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 286, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 290, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 308, "usage_type": "call"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 326, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 332, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 332, "usage_type": "name"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data2D", "line_number": 336, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 344, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 344, "usage_type": "name"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 352, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 357, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 357, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 363, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 363, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 369, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 369, "usage_type": "name"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data2D", "line_number": 377, "usage_type": "call"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 384, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 384, "usage_type": "name"}, {"api_name": "PySide6.QtWidgets.QFileDialog", "line_number": 390, "usage_type": "attribute"}, {"api_name": "PySide6.QtWidgets", "line_number": 390, "usage_type": "name"}, {"api_name": "sas.qtgui.Plotting.PlotterData.Data1D", "line_number": 418, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 483, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 498, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 513, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 574, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 576, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 579, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 582, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 585, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 596, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 598, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 602, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 605, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 608, "usage_type": "call"}, {"api_name": "pytest.mark.xfail", "line_number": 588, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 588, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 614, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QLocale.setDefault", "line_number": 622, "usage_type": "call"}, {"api_name": "PySide6.QtCore.QLocale", "line_number": 622, "usage_type": "attribute"}, {"api_name": "PySide6.QtCore", "line_number": 622, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 624, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 624, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 629, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 629, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 634, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 634, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 636, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 639, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 639, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 650, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 659, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 659, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 662, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 662, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 668, "usage_type": "call"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 669, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 669, "usage_type": "name"}, {"api_name": "PySide6.QtGui.QValidator", "line_number": 672, "usage_type": "attribute"}, {"api_name": "PySide6.QtGui", "line_number": 672, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 664, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 664, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 676, "usage_type": "call"}]} +{"seq_id": "72550621924", "text": "import random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import Player, Device\n\n\nclass Greedy(Player):\n def __init__(self, coins: int):\n super().__init__(coins)\n\n def logic(self) -> Device:\n best_dev = sorted(self.devices, key=lambda x:x.estimate_mean, reverse=True)[0]\n return best_dev\n \nclass E_Greedy(Player):\n def __init__(self, coins: int, eps):\n self.eps = eps\n super().__init__(coins)\n\n def logic(self) -> Device:\n p = random.random()\n if self.eps > p:\n self.eps = self.eps * .75\n best_dev = random.choice(self.devices)\n else:\n best_dev = sorted(self.devices, key=lambda x:x.estimate_mean)[0]\n return best_dev\n\nif __name__ == \"__main__\":\n \n greedy = Greedy(100)\n e_greedy = E_Greedy(100,.01)\n greedy.play()\n e_greedy.play()\n stop", "repo_name": "msajad79/Multi-ArmBandit", "sub_path": "greedy.py", "file_name": "greedy.py", "file_ext": "py", "file_size_in_byte": 884, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.Player", "line_number": 7, "usage_type": "name"}, {"api_name": "utils.Device", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.Player", "line_number": 15, "usage_type": "name"}, {"api_name": "random.random", "line_number": 21, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.Device", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "20776662827", "text": "# Future Imports for py2/3 backwards compat.\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom builtins import object\nfrom .xml_utils import get_attribute, get_content_of, get_children_of, create_element, as_string, as_xml, get_element\nfrom future import standard_library\nstandard_library.install_aliases()\n\n\nclass Range(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end if end else start\n\n def AsXML(self):\n attributes = {}\n attributes['from'] = self.start\n if self.end != self.start:\n attributes['to'] = self.end\n xml_data = create_element('range', attributes)\n return xml_data\n\n\nclass Host(object):\n def __init__(self, name):\n self.name = name\n\n def AsXML(self):\n xml_data = create_element('host')\n xml_data.text = self.name\n return xml_data\n\n\ndef _host_to_object(host):\n if host.tag == \"host\":\n return Host(host.text)\n if host.tag == \"range\":\n return Range(get_attribute(host, 'from'), get_attribute(host, 'to'))\n raise ValueError('Unknown host type: {0}'.format(host.tag))\n\n\nclass ScanConfiguration(object):\n def __init__(self):\n self.id = 0\n self.name = ''\n self.version = 0\n self.template_id = \"full-audit-without-web-spider\"\n self.engine_id = 0\n\n\nclass SiteBase(object):\n def InitalizeFromXML(self, xml_data):\n self.id = int(get_attribute(xml_data, 'id', self.id))\n self.name = get_attribute(xml_data, 'name', self.name)\n self.short_description = get_attribute(xml_data, 'description', self.short_description)\n self.risk_factor = float(get_attribute(xml_data, 'riskfactor', self.risk_factor))\n\n def __init__(self):\n self.id = 0\n self.name = ''\n self.short_description = '' # newlines are removed by Nexpose, use SiteConfiguration.description instead\n self.risk_factor = 1.0\n\n\nclass SiteSummary(SiteBase):\n @staticmethod\n def CreateFromXML(xml_data):\n summary = SiteSummary()\n summary.InitalizeFromXML(xml_data)\n summary.risk_score = float(get_attribute(xml_data, 'riskscore', summary.risk_score))\n return summary\n\n def __init__(self):\n SiteBase.__init__(self)\n self.risk_score = 0.0\n\n\nclass SiteConfiguration(SiteBase):\n @staticmethod\n def CreateFromXML(xml_data):\n config = SiteConfiguration()\n config.InitalizeFromXML(xml_data)\n config.description = get_content_of(xml_data, 'Description', config.description)\n config.is_dynamic = get_attribute(xml_data, 'isDynamic', config.is_dynamic) in ['1', 'true', True]\n config.hosts = [_host_to_object(host) for host in get_children_of(xml_data, 'Hosts')]\n config.alerting = [alert for alert in get_children_of(xml_data, 'Alerting')]\n config.credentials = [credential for credential in get_children_of(xml_data, 'Credentials')]\n config.users = [user for user in get_children_of(xml_data, 'Users')]\n\n # Use scanconfig elements for the SiteConfiguration\n scanconfig = get_element(xml_data, \"ScanConfig\")\n config.configid = scanconfig.get(\"configID\")\n config.configtemplateid = scanconfig.get(\"templateID\")\n config.configname = scanconfig.get(\"name\")\n config.configversion = scanconfig.get(\"configVersion\")\n config.configengineid = scanconfig.get(\"engineID\")\n config.schedules = [schedule for schedule in get_children_of(scanconfig, 'Schedules')]\n\n return config\n\n @staticmethod\n def Create():\n config = SiteConfiguration()\n config.id = -1\n return config\n\n @staticmethod\n def CreateNamed(name):\n config = SiteConfiguration.Create()\n config.name = name\n return config\n\n def __init__(self):\n SiteBase.__init__(self)\n self.description = ''\n self.is_dynamic = False\n self.hosts = []\n self.credentials = []\n self.alerting = []\n self.scan_configuration = [] # TODO\n self.configid = self.id\n self.configtemplateid = \"full-audit-without-web-spider\"\n self.configname = \"Full audit without Web Spider\"\n self.configversion = 3\n self.configengineid = 3\n self.users = []\n self.schedules = []\n\n def AsXML(self, exclude_id):\n attributes = {}\n if not exclude_id:\n attributes['id'] = self.id\n attributes['name'] = self.name\n attributes['description'] = self.short_description\n attributes['isDynamic'] = '1' if self.is_dynamic else '0'\n attributes['riskfactor'] = self.risk_factor\n\n xml_data = create_element('Site', attributes)\n\n xml_description = create_element('Description')\n xml_description.text = self.description\n xml_data.append(xml_description)\n\n xml_hosts = create_element('Hosts')\n for host in self.hosts:\n xml_hosts.append(host.AsXML())\n xml_data.append(xml_hosts)\n\n xml_credentials = create_element('Credentials')\n for credential in self.credentials:\n xml_credentials.append(credential)\n xml_data.append(xml_credentials)\n\n xml_alerting = create_element('Alerting')\n for alert in self.alerting:\n xml_alerting.append(alert)\n xml_data.append(xml_alerting)\n\n xml_users = create_element('Users')\n for user in self.users:\n xml_users.append(user)\n xml_data.append(xml_users)\n\n # Include ScanConfig attributes\n attributes = {}\n attributes['configID'] = self.configid\n attributes['name'] = self.configname\n attributes['templateID'] = self.configtemplateid\n attributes['engineID'] = self.configengineid\n attributes['configVersion'] = self.configversion\n\n xml_scanconfig = create_element('ScanConfig', attributes)\n xml_scheduling = create_element('Schedules')\n for schedule in self.schedules:\n xml_scheduling.append(schedule)\n xml_scanconfig.append(xml_scheduling)\n xml_data.append(xml_scanconfig)\n\n # TODO: implement the xxxPrivileges\n # print(as_string(as_xml(as_string(xml_data))))\n return xml_data\n", "repo_name": "rapid7/nexpose-client-python", "sub_path": "nexpose/nexpose_site.py", "file_name": "nexpose_site.py", "file_ext": "py", "file_size_in_byte": 6310, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 25, "dataset": "github-code", "pt": "52", "api": [{"api_name": "future.standard_library.install_aliases", "line_number": 7, "usage_type": "call"}, {"api_name": "future.standard_library", "line_number": 7, "usage_type": "name"}, {"api_name": "builtins.object", "line_number": 10, "usage_type": "name"}, {"api_name": "xml_utils.create_element", "line_number": 20, "usage_type": "call"}, {"api_name": "builtins.object", "line_number": 24, "usage_type": "name"}, {"api_name": "xml_utils.create_element", "line_number": 29, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 38, "usage_type": "call"}, {"api_name": "builtins.object", "line_number": 42, "usage_type": "name"}, {"api_name": "builtins.object", "line_number": 51, "usage_type": "name"}, {"api_name": "xml_utils.get_attribute", "line_number": 53, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 54, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 55, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 56, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 70, "usage_type": "call"}, {"api_name": "xml_utils.get_content_of", "line_number": 83, "usage_type": "call"}, {"api_name": "xml_utils.get_attribute", "line_number": 84, "usage_type": "call"}, {"api_name": "xml_utils.get_children_of", "line_number": 85, "usage_type": "call"}, {"api_name": "xml_utils.get_children_of", "line_number": 86, "usage_type": "call"}, {"api_name": "xml_utils.get_children_of", "line_number": 87, "usage_type": "call"}, {"api_name": "xml_utils.get_children_of", "line_number": 88, "usage_type": "call"}, {"api_name": "xml_utils.get_element", "line_number": 91, "usage_type": "call"}, {"api_name": "xml_utils.get_children_of", "line_number": 97, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 138, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 140, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 144, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 149, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 154, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 159, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 172, "usage_type": "call"}, {"api_name": "xml_utils.create_element", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "12118373367", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 25 11:15:19 2014\n\nNoise figure test without a noise source. \nThere are two sections in this test: 1) Connect an RF source to the input\nof DUT and measure the gain\n2) Terminate the input of DUT with a 50 ohm and measure the output noise\n\n@author: cicek\n\"\"\"\n\nfrom pyvisa import visa\n\ndef PromptUser():\n \"\"\"prompt user to get necessary information: GPIB addresses etc.\"\"\"\n addr = raw_input('Enter GPIB address of Spectrum Analyzer: ') \n if (addr < '0'):\n print(\"GPIB addr cannot be smaller than 0\")\n return None\n else:\n addr = \"GPIB::\" + addr \n spa = visa.instrument(addr)\n \n addr = raw_input('Enter GPIB address of RF Signal Source: ') \n if (addr < '0'):\n print(\"GPIB addr cannot be smaller than 0\")\n return None\n else:\n addr = \"GPIB::\" + addr\n rf = visa.instrument(addr)\n \n addr = raw_input('Enter GPIB address of Power Supply: ') \n if (addr < '0'):\n print(\"GPIB addr cannot be smaller than 0\")\n return None\n else:\n addr = \"GPIB::\" + addr()\n ps = visa.instrument(addr)\n \ndef InitializeInstruments():\n \"\"\"Initialize instruments\"\"\"\n \n", "repo_name": "cicekozkan/python-examples", "sub_path": "noise_figure.py", "file_name": "noise_figure.py", "file_ext": "py", "file_size_in_byte": 1231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pyvisa.visa.instrument", "line_number": 23, "usage_type": "call"}, {"api_name": "pyvisa.visa", "line_number": 23, "usage_type": "name"}, {"api_name": "pyvisa.visa.instrument", "line_number": 31, "usage_type": "call"}, {"api_name": "pyvisa.visa", "line_number": 31, "usage_type": "name"}, {"api_name": "pyvisa.visa.instrument", "line_number": 39, "usage_type": "call"}, {"api_name": "pyvisa.visa", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "17627119283", "text": "import math # For normal distribution\nimport csv # For saving a list of lists\nimport numpy as np # For array operations\nimport matplotlib.pyplot as plt # For plots\n\nimport flt_file as fileop # For paths and error reports\nimport flt_config as scfg # IO directory and file names\n\n\n# Plot constants for figures\nHEIGHT = 8 # Figure height (inches)\nWIDTH = 12 # Figure width (inches)\nLEGEND_FONT = 13 # Font size for figure title (pt)\nTYP_FONT = 12 # Font size for axes numbers (pt)\nRESOLVE = 90 # Figure display resolution (dpi)\nVERY_SMALL = 1.0E-16 # limit on plot values\n\n# Title Plot - Time history series\nLEGEND_SERIES = \"\\n\\n Fault-Flo Results for CO2 Migration through a Fault \\n\"\n\n# Font definitions\nREG_FONT = {'fontname': 'Arial', # X-axis, Y-axis font\n 'color': 'black',\n 'size': TYP_FONT}\nLEG_FONT = {'fontname': 'Arial', # Title font\n 'color': 'black',\n 'size': LEGEND_FONT}\n\n# General Statements\nMAJ_IN = \"\\n --> \" # Page + general message start\nLEAD_IN = \" --> \" # General screen message start\nCMD_IN = \" >>> \" # Command screen message start\nERR_IN = \" *** \" # Error message start\nSTART_TXT = LEAD_IN + \"START Realization >\" # Multi-history plot start\nEND_TXT = LEAD_IN + \"END Realization >\" # Multi-history plot end\nCAUTION = (CMD_IN + \"If Paused, \"\n + \"Hit Exit at Top-Right of Plot Window to Proceed ...\")\nAGAIN = \"Please Try Again.\" # Simple repeat request\nAGAIN2 = \"\\n\" + CMD_IN + AGAIN # Repeat request with new line\nEMPTY = (\"\\n\" + CMD_IN + \"Y Value Essentially Zero For Selected Simulations!!\"\n + \"\\n\" + CMD_IN + \"- No Plot; Try Again.\")\n\n# Controls and warnings\nNEGATIVES = ('n', 'N', 'q', 'Q') # Keys for negative answer\nEXIT_COMMAND = ('x', 'X') # Keys that will exit\nDOUBLES = ('nn', 'NN', 'qq', 'QQ', 'xx', 'XX', 'yy', 'YY') # Common errors\nPOSITIVES = ('y', 'Y')\nSTAR_IN = \"\\n ?-> \" # Question start\nCIRC_IN = \" --> \" # Other question/statement\nMORE_IN = \"\\n ?-> \" # Question start + newline\nSERIES_START = MORE_IN + \"Create a Time-Series Plot? ('Q'=quit): \"\nSERIES_END = MORE_IN + \"Create Another Time-Series Plot? ('Q'=quit): \"\nSAVE_FIGURE = True # Save plots to file\n\n\ndef test_data(z_data, max_all_data):\n \"\"\"Examine data to prevent an error in time series plot.\n\n Parameters\n ----------\n z_data = (Numpy array) data to plot\n max_all_data = maximum y value of all data\n\n Returns\n -------\n answer = (bool): True = data OK; False = error / do not plot\n \"\"\"\n # Examine if spread in data - check min/max.\n answer = True\n z_min = np.min(z_data)\n z_max = np.max(z_data)\n\n if z_max <= VERY_SMALL or z_min == z_max:\n # No plot possible!\n answer = False\n elif max_all_data <= VERY_SMALL:\n # No good plot possible!\n answer = False\n\n return answer\n\n\ndef check_series(simulation, final_max):\n \"\"\"Provide error check of series data.\n\n Parameters\n ----------\n simulation = (Numpy array) one simulation from analysis\n final_max = maximum value of all data\n\n Returns\n -------\n match_err = (bool) error in time series\n data_err = (bool) error in data\n \"\"\"\n # Error check time data (use one series to check)!\n # --> Check that the time string is greater than 1.\n leaker, time_match = \\\n setup_time_history_data(simulation)\n\n # --> Data range error - check on data errors.\n err_check = test_data(leaker, final_max)\n if err_check:\n data_err = False\n else:\n data_err = True # Error found\n\n # --> Time series error - check on length of time series.\n if len(time_match) > 1:\n match_err = False\n else:\n match_err = True # Error found\n\n return match_err, data_err\n\n\ndef round_exp(exp_value):\n \"\"\"Round-up exponential to desired degree.\n\n Parameters\n ----------\n exp_value = (float) axis value\n\n Returns\n -------\n target = (float) rounded exponential\n \"\"\"\n # Trap error if = 0.0.\n if exp_value != 0.0:\n # Make the number a decimal.\n core = abs(exp_value)\n level = math.trunc(math.log10(core))\n base = math.pow(10.0, level)\n root_exp = math.ceil(exp_value / base)\n\n # Round and then reconstitute the number.\n # adjust_number = round(root_exp, 1)\n target = root_exp * base # round to zero decimal\n else:\n target = exp_value\n\n return target\n\n\ndef setup_time_history_data(simulation):\n \"\"\"Get time history data from file.\n\n Parameters\n ----------\n simulation = (int) simulation number to plot\n\n Returns\n -------\n leakage_data = (NumPy array) CO2 data\n time_data = (NumPy array) time values\n \"\"\"\n # Construct path name to file.\n sim_number = str(simulation)\n file_name = scfg.RESULTS_NAME + sim_number\n subdirectory_path, destination = fileop.get_path_name(scfg.OUTPUT_DIR,\n file_name,\n scfg.EXTENSION_CSV)\n try:\n with open(destination, 'r', encoding=\"utf8\") as csvfile:\n csv_reader = csv.reader(csvfile)\n\n # Skip 2 header lines.\n header = next(csv_reader, None)\n next(csv_reader)\n\n # Do not read file without a header / empty file.\n if header is None:\n # No data.\n fileop.data_error(\"Data Error - Array Header is None! \"\n + \"-> No data found in file.\",\n subdirectory_path, file_name)\n else:\n # Create data array - iterate over each row after the header.\n data_list = []\n for row in csv_reader:\n data_list.append(row)\n\n # Convert list to NumPy array.\n npa = np.asarray(data_list, dtype=np.float64)\n\n # Slice array for data.\n time_data = npa[:, 1]\n leakage_data = npa[:, 2]\n\n except OSError as err:\n fileop.io_snag(err, subdirectory_path, file_name)\n time_data = np.array([]) # for inspection\n leakage_data = np.array([]) # for inspection\n\n return leakage_data, time_data\n\n\ndef simulation_query(max_realizations, start_value=0):\n \"\"\"Get the simulation number to plot.\n\n Parameters\n ----------\n max_realizations = (int) maximum number of realizations\n start_value = (int) minimum number of realizations\n\n Returns\n -------\n simulation = (int) simulation number to plot - integer\n \"\"\"\n # Default values.\n repeat_loop = True\n response = False\n realization = -1\n entered = \"\"\n\n # Loop to get an appropriate response.\n while repeat_loop:\n try:\n # Get input with prompt.\n code = input(STAR_IN + \"Enter Realization Number \"\n + \"to Plot ('Q'=quit): \")\n # Check if user wishes to quit (alpha character).\n if code in NEGATIVES or code in EXIT_COMMAND:\n print(CIRC_IN + \"Exiting Plot Option. \\n\")\n repeat_loop = False\n break\n except ValueError:\n # Parse fails.\n print(ERR_IN + \"Invalid Number. \" + AGAIN2)\n continue\n\n # Check if \"entered\" is a number and then within correct range.\n try:\n entered = int(code)\n if entered > max_realizations:\n print(ERR_IN + f'You typed = {entered}')\n print(ERR_IN + \"This Number Exceeds the Maximum \"\n + f' of {max_realizations}! ', end='')\n print(AGAIN)\n elif entered <= 0:\n print(ERR_IN + f'You typed = {entered}')\n print(ERR_IN + \"The Input is Equal to, or Less than Zero! \",\n end='')\n print(AGAIN)\n elif entered <= start_value:\n print(ERR_IN + f'You typed = {entered}')\n print(ERR_IN + \"The Input is Less Than Starting Value\" +\n f' of {start_value}!', end='')\n print(AGAIN)\n else:\n # Input OK!\n # print(CIRC_IN + f\"Simulation Selected = {entered}\")\n realization = entered\n response = True\n repeat_loop = False\n break\n except ValueError:\n print(ERR_IN + f'You typed = {entered}')\n print(ERR_IN + \"This is Not a Number!! \", end='')\n print(AGAIN)\n continue\n\n # end while\n\n return response, realization\n\n\ndef create_history_query(query_stage):\n \"\"\"Check if user wants to plot a time series.\n\n Parameters\n ----------\n query_stage = (int) question queue\n 0 = first time\n 1 = plot again?\n\n Returns\n -------\n reply = answer plot question\n \"\"\"\n # Change message depending on place in query queue.\n if query_stage == 0:\n code = input(SERIES_START)\n else:\n code = input(SERIES_END)\n\n # correct typing error.\n if code in DOUBLES:\n code = code[0]\n\n # Get response.\n if code in POSITIVES:\n reply = True\n elif NEGATIVES or code in EXIT_COMMAND:\n reply = False\n else:\n reply = False # Default is negative!\n\n return reply\n\n\ndef range_query(max_realizations):\n \"\"\"Get simulation number to plot.\n\n Parameters\n ----------\n max_realizations = (int) maximum number of realizations\n\n Returns\n -------\n response = to plot or not - Boolean\n min_simulation = minimum simulation to plot - integer\n max_simulation = maximum simulation to plot - integer\n \"\"\"\n # Default values.\n end_run = 0\n\n # Get start simulation for start of run with prompt.\n print(START_TXT, end='')\n\n # Get simulation number.\n starter = 0\n response, start_run = simulation_query(max_realizations, starter)\n\n # If OK, get end of range of simulations.\n if response:\n print(END_TXT, end=\"\")\n starter = start_run\n response, end_run = simulation_query(max_realizations, starter)\n\n return response, start_run, end_run\n\n\ndef control_time_series(max_realizations, existence):\n \"\"\"Provide overall control to plot a series of simulation from output.\n\n Parameters\n ----------\n max_realizations = (int) upper limit of simulations from analysis\n existence = (dict) existence of fault\n\n Returns\n -------\n N/A\n \"\"\"\n # Loop until finished plotting.\n while True:\n # Query to get simulation numbers (start and end).\n response, start_run, end_run = range_query(max_realizations)\n\n # Plot - if user desires a plot.\n if response:\n # Get data and save NumPy arrays in list.\n # -- adjust counter to catch last run.\n data_list = []\n time_data = []\n final_max = 0.0\n\n # Construct plot data of series.\n existing_run = start_run\n plotted = []\n\n for realization in range(start_run, (end_run + 1)):\n # Only include a plot if fault data exists.\n # --> Existence list starts at 0!\n if existence[realization - 1]:\n leakage_data, time_data = \\\n setup_time_history_data(realization)\n data_list.append(leakage_data)\n instance_max = leakage_data.max()\n existing_run = realization\n if instance_max > final_max:\n final_max = instance_max\n plotted.append(realization)\n\n # Check and plot data.\n run_plot(time_data, data_list, final_max, plotted, existing_run)\n\n # Ask about another plot; exit if not.\n plot_again = create_history_query(1)\n if not plot_again:\n break\n else:\n break\n # end while loop\n\n # return None\n\n\ndef run_plot(time_data, data_list, final_max, plotted, existing_run):\n \"\"\"Check data and plot a series of simulation from output.\n\n Parameters\n ----------\n time_data = (NumPy array) time values\n data_list = (list of arrays) a list of NumPy arrays of leakage\n final_max = (float) maximum of y data\n plotted = (list) of runs\n existing_run = (NumPy array) of initial data array\n\n Returns\n -------\n N/A\n \"\"\"\n # Check data!\n match_err, data_err = check_series(existing_run, final_max)\n\n if len(plotted) > 0:\n print(LEAD_IN + \"Events Plotted with Faults = \", plotted)\n else:\n print(LEAD_IN + \"No Simulations in Range with Faults!\")\n\n # Plot data arrays on same plot diagram if data OK.\n if not match_err and not data_err:\n # Data is AOK - Plot!\n plot_time_series(time_data, data_list, final_max, plotted)\n\n elif match_err:\n # Time string error - terminate plotting.\n reason = \"Error: No Time Data for Plot!\"\n fileop.opx_problem(reason, err='')\n else:\n # Value error - continue logic.\n print(CMD_IN + \"Plot Issue! Data Negative Or Series Has \"\n + \"Maximum Equal To Minimum!\")\n print(CMD_IN + \"Plot Attempt Is Discontinued!\")\n\n # return None\n\n\ndef save_figure_to_file():\n \"\"\"Save figure of results to output file.\n\n Parameters\n ----------\n N/A\n\n Returns\n -------\n N/A\n \"\"\"\n # Create filename and then save plot to file.\n file_name = \"Fault_Flo-plot_\" + str(scfg.PLOT_NO)\n _, destination = fileop.get_path_name(scfg.OUTPUT_DIR,\n file_name,\n scfg.EXTENSION_PNG)\n plt.savefig(destination)\n\n # Increase figure number for next plot.\n scfg.PLOT_NO += 1\n\n # return None\n\n\ndef plot_time_series(x_data, data_list, data_maximum, plotted):\n \"\"\"Plot a series of leakage time histories (results).\n\n Parameters\n ----------\n x_data = (array) time data - assumed same for all\n data_list = (list of arrays) a list of NumPy arrays of leakage\n data_maximum = (float) maximum of y data\n plotted = (list) of runs\n\n Returns\n -------\n N/A\n\n Notes\n -----\n 1. Uses matplotlib functions to plot realizations.\n 2. Uses LaTex to format text.\n \"\"\"\n # Establish window size for plotting in inches.\n plt.figure(figsize=(WIDTH, HEIGHT), dpi=RESOLVE,\n num='Fault_Flux Simulations \\n')\n\n # Get exponent for y-axis - to normalize data.\n y_max = round_exp(data_maximum)\n cite = math.trunc(math.log10(abs(y_max)))\n y_max /= math.pow(10.0, cite)\n\n # Plot each \"normalized\" data set as line with label.\n for sim, results_array in enumerate(data_list):\n run_number = plotted[sim]\n describe = \"Simulation #\" + str(run_number)\n y_data = results_array / math.pow(10.0, cite)\n plt.plot(x_data, y_data, linestyle='solid', linewidth=1.0,\n label=describe)\n\n # Set axes limits to limit white space along axes.\n x_min_plot = math.floor(np.min(x_data))\n x_max_plot = math.ceil(np.max(x_data))\n plt.xlim(x_min_plot, x_max_plot)\n plt.ylim(0.0, y_max)\n\n # Hide axis title.\n pivot = plt.gca()\n pivot.yaxis.offsetText.set_visible(False)\n\n # Construct plot title w. Latex and provide axis labels and figure grid.\n new_label = r'Leakage ($\\times$10$^{%d}$ tonnes)' % cite\n plt.title(LEGEND_SERIES, fontdict=LEG_FONT)\n plt.xlabel('Time (years)', fontdict=REG_FONT)\n plt.ylabel(new_label, fontdict=REG_FONT)\n plt.grid(which='major', linewidth='0.5')\n\n # Plot key of some sort.\n if len(data_list) <= 25:\n # Plot key at upper left - if enough space - say 25 lines.\n plt.legend(loc=2, shadow=True, fancybox=True)\n else:\n # If large number of lines, plot number of lines in top left corner.\n new_label = f'Number of Simulations = {len(data_list)}'\n plt.text(150, 600, new_label, ha='left', va='center',\n transform=None, fontdict=REG_FONT)\n\n # Option to save plot to file or show on console.\n if SAVE_FIGURE:\n # Save to output file.\n save_figure_to_file()\n plt.show()\n else:\n # Else show figure on console.\n # Note: WINDOW BUG: interactive window and plot window\n # are not be active together.\n print(CAUTION)\n plt.show()\n # plt.waitforbuttonpress(0) # this will wait for indefinite time\n\n # Clear plot to prevent over-writes.\n plt.clf()\n\n # return None\n\n\ndef plot_manager(alive, fault_controls, existence):\n \"\"\"Control plots produced by program.\n\n Parameters\n ----------\n alive = (bool) status of program; alive = True if stand-alone\n fault_controls = (dict) dictionary of fault controls\n existence = (dict) of fault existence\n\n Returns\n -------\n N/A\n \"\"\"\n # Check if in stand-alone operation mode.\n if alive:\n # Print header, if plotting is desired.\n if fault_controls['plot_time_history']:\n print(MAJ_IN + \"PLOT OPTIONS.\", end='')\n\n # Inquire on plotting; limit selection of plot numbers in query.\n max_realizations = (fault_controls['realizations'])\n initial_query = 0\n response = create_history_query(initial_query)\n\n if response:\n control_time_series(max_realizations, existence)\n\n # return None\n\n\n#\n# -----------------------------------------------------------------------------\n# End of module\n", "repo_name": "equinor/NRAP-Open-IAM_GH", "sub_path": "source/components/fault/fault_flow/flt_plot.py", "file_name": "flt_plot.py", "file_ext": "py", "file_size_in_byte": 18031, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.min", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 71, "usage_type": "call"}, {"api_name": "math.trunc", "line_number": 132, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 132, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 133, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 134, "usage_type": "call"}, {"api_name": "flt_config.RESULTS_NAME", "line_number": 159, "usage_type": "attribute"}, {"api_name": "flt_file.get_path_name", "line_number": 160, "usage_type": "call"}, {"api_name": "flt_config.OUTPUT_DIR", "line_number": 160, "usage_type": "attribute"}, {"api_name": "flt_config.EXTENSION_CSV", "line_number": 162, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 165, "usage_type": "call"}, {"api_name": "flt_file.data_error", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 184, "usage_type": "attribute"}, {"api_name": "flt_file.io_snag", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 193, "usage_type": "call"}, {"api_name": "flt_file.opx_problem", "line_number": 421, "usage_type": "call"}, {"api_name": "flt_config.PLOT_NO", "line_number": 443, "usage_type": "attribute"}, {"api_name": "flt_file.get_path_name", "line_number": 444, "usage_type": "call"}, {"api_name": "flt_config.OUTPUT_DIR", "line_number": 444, "usage_type": "attribute"}, {"api_name": "flt_config.EXTENSION_PNG", "line_number": 446, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 447, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 447, "usage_type": "name"}, {"api_name": "flt_config.PLOT_NO", "line_number": 450, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 475, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 475, "usage_type": "name"}, {"api_name": "math.trunc", "line_number": 480, "usage_type": "call"}, {"api_name": "math.log10", "line_number": 480, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 481, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "math.floor", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 492, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 493, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 493, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 494, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 498, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 503, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 503, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 504, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 504, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 505, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 505, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 506, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 506, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 511, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 511, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 515, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 515, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}]} +{"seq_id": "10307034968", "text": "import numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass NN:\n def __init__(self, x, y, layer_dimension, learning_rate):\n \"\"\"\n :param x: training data x\n :param y: training data y\n :param layer_dimension: dimension of network ie: [num, num...]\n :param learning_rate: learning rate alpha\n \"\"\"\n self.n, self.m = x.shape\n self.layer_dim = [self.n] + layer_dimension\n self.y = y\n self.alpha = learning_rate\n self.L = len(self.layer_dim)\n self.parameters = {}\n self.grad = {}\n\n def ini_parameters(self):\n \"\"\"\n initialize parameters (two methods)\n :return: parameters dic(w,b)\n \"\"\"\n for i in range(1, self.L):\n # self.parameters['w' + str(i)] = np.random.randn(self.layer_dim[i],self.layer_dim[i-1]) * 0.01 # method 1\n self.parameters['w' + str(i)] = np.random.randn(self.layer_dim[i], self.layer_dim[i-1])\\\n * np.sqrt(2.0 / self.layer_dim[i-1]) # method 2\n self.parameters['b' + str(i)] = np.zeros((self.layer_dim[i], 1))\n assert self.parameters['w' + str(i)].shape == (self.layer_dim[i], self.layer_dim[i-1])\n assert self.parameters['b' + str(i)].shape == (self.layer_dim[i], 1)\n\n return self.parameters\n\n @staticmethod\n def sigmoid(z):\n # Sigmoid function\n A = 1 / (1 + np.exp(-z))\n cache = z # activation cache\n assert A.shape == z.shape\n\n return A, cache\n\n @staticmethod\n def relu(z):\n # ReLU function\n A = np.maximum(0, z)\n cache = z # activation cache\n assert A.shape == z.shape\n return A, cache\n\n @staticmethod\n def linear_forward(A, W, b):\n \"\"\"\n :param A: activation\n :param W: weights\n :param b: bias\n :return: z, linear cache\n \"\"\"\n # print(f'W:{W.shape}')\n # print(f'A:{A.shape}')\n z = W.dot(A) + b\n cache = (A, W, b) # linear cache\n assert z.shape == (W.shape[0], A.shape[1])\n return z, cache\n\n def activation_forward(self, pre_A, W, b, method):\n \"\"\"\n :param pre_A: activation from previous layer\n :param W: weights\n :param b: bias\n :param method: Sigmoid/ReLU\n :return: cache set of linear cache and activation cache\n \"\"\"\n if method == 'sigmoid':\n # use a[l-1] to compute a[l]\n z, linear_cache = self.linear_forward(pre_A, W, b)\n A, activation_cache = self.sigmoid(z)\n if method == 'relu':\n # use a[l-1] to compute a[l]\n z, linear_cache = self.linear_forward(pre_A, W, b)\n A, activation_cache = self.relu(z)\n\n assert A.shape == (W.shape[0], pre_A.shape[1])\n\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\n def forward_prop(self, x, parameters):\n \"\"\"\n forward propagation\n :param x: data x\n :param parameters: parameters dic\n :return: activation of the last layer, cache list of cache sets\n \"\"\"\n A = x # A0 = x\n cache_list = []\n L = len(parameters) // 2\n\n # forward propagation through hidden layers\n for l in range(1, L):\n pre_A = A\n A, cache = self.activation_forward(pre_A, parameters['w' + str(l)], parameters['b' + str(l)], method='relu')\n cache_list.append(cache) # (linear_cache, activation_cache)\n\n # forward propagation through last layer\n final_A, cache = self.activation_forward(A, parameters['w' + str(L)], parameters['b' + str(L)], method='sigmoid')\n cache_list.append(cache)\n assert final_A.shape == (1, x.shape[1]) # (1, m)\n\n return final_A, cache_list\n\n def cost_function(self, final_A, y):\n \"\"\"\n compute cost\n :param final_A: activation of the last layer\n :param y: y data\n :return: cost\n \"\"\"\n # binary classification loss\n cost = (-1/self.m) * np.sum(y*np.log(final_A) + (1-y)*np.log(1-final_A), axis=1, keepdims=True)\n cost = np.squeeze(cost)\n assert cost.shape == ()\n\n return cost\n\n @staticmethod\n def sigmoid_back(dA, activation_cache):\n \"\"\"\n :param dA: d(loss)/d(a)\n :param activation_cache: z\n :return: d(loss)/d(z)\n \"\"\"\n z = activation_cache\n s = 1/(1+np.exp(-z))\n dz = dA * s * (1-s) # d(loss)/d(z) = d(loss)/d(a) * d(a)/d(z)\n assert dz.shape == z.shape\n\n return dz\n\n @staticmethod\n def relu_back(dA, activation_cache):\n \"\"\"\n :param dA: d(loss)/d(a)\n :param activation_cache: z\n :return: d(loss)/d(z)\n \"\"\"\n z = activation_cache\n dz = np.array(dA)\n dz[z <= 0] = 0\n return dz\n\n @staticmethod\n def linear_back(dz, linear_cache):\n \"\"\"\n :param dz: d(loss)/d(z)\n :param linear_cache: previous layer activation, weights, bias\n :return: d(loss)/d(a_previous_layer), d(loss)/d(weights), d(loss)/d(bias)\n \"\"\"\n pre_A, W, b = linear_cache\n m = pre_A.shape[1]\n\n dw = (1/m) * dz.dot(pre_A.T)\n db = (1/m) * np.sum(dz, axis=1, keepdims=True)\n dpre_A = np.dot(W.T, dz)\n\n assert dw.shape == W.shape\n assert db.shape == b.shape\n assert dpre_A.shape == pre_A.shape\n\n return dpre_A, dw, db\n\n def activation_back(self, dA, cache_list, method):\n \"\"\"\n :param dA: d(loss).d(activation)\n :param cache_list: cache set of linear cache and activation cache\n :param method: Sigmoid/ReLU\n :return: d(loss)/d(activation_previous_layer), d(loss)/d(weights), d(loss)/d(bias)\n \"\"\"\n linear_cache, activation_cache = cache_list\n\n if method == 'sigmoid':\n dz = self.sigmoid_back(dA, activation_cache)\n dpre_A, dw, db = self.linear_back(dz, linear_cache)\n if method == 'relu':\n dz = self.relu_back(dA, activation_cache)\n dpre_A, dw, db = self.linear_back(dz, linear_cache)\n return dpre_A, dw, db\n\n def back_prop(self, final_A, cache_list):\n \"\"\"\n back propagation\n :param final_A: activation of the last layer\n :param cache_list: cache list of cache sets\n :return: gradient\n \"\"\"\n # print(f'final_A: {final_A.shape}')\n assert final_A.shape == self.y.shape\n L = len(cache_list)\n\n # back propagation though last layer\n dfinal_A = - (np.divide(self.y, final_A) - np.divide(1-self.y, 1-final_A))\n\n grad_cache = self.activation_back(dfinal_A, cache_list[L-1], 'sigmoid')\n self.grad['dA' + str(L-1)] = grad_cache[0]\n self.grad['dw' + str(L)] = grad_cache[1]\n self.grad['db' + str(L)] = grad_cache[2]\n\n # back propagation though the last layer(L-1) to layer 1(l+1)\n for l in reversed(range(L-1)):\n grad_cache = self.activation_back(self.grad['dA' + str(l+1)], cache_list[l], 'relu')\n self.grad['dA' + str(l)] = grad_cache[0]\n self.grad['dw' + str(l+1)] = grad_cache[1]\n self.grad['db' + str(l+1)] = grad_cache[2]\n return self.grad\n\n def parameter_update(self):\n \"\"\"\n :return: updated parameters(weights, bias)\n \"\"\"\n L = len(self.parameters) // 2\n for l in range(L):\n self.parameters['w' + str(l+1)] = self.parameters['w' + str(l+1)] - self.alpha * self.grad['dw' + str(l+1)]\n self.parameters['b' + str(l+1)] = self.parameters['b' + str(l+1)] - self.alpha * self.grad['db' + str(l+1)]\n return self.parameters\n\n def train(self, x, y, iter_num=100):\n \"\"\"\n :param x: data x\n :param y: data y\n :param iter_num: number of iterations\n :return: parameters(weights, bias)\n \"\"\"\n self.parameters = self.ini_parameters()\n costs = []\n\n for i in range(iter_num):\n final_A, cache_list = self.forward_prop(x, self.parameters)\n cost = self.cost_function(final_A, y)\n costs.append(cost)\n self.grad = self.back_prop(final_A, cache_list)\n self.parameters = self.parameter_update()\n if i % 100 == 0 and i != 0:\n # print cost every 100 iterations\n print(f'iteration: {i} cost: {cost}')\n\n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('epochs')\n plt.title(f'Learning rate = {self.alpha}')\n plt.show()\n return self.parameters\n\n def predict(self, x, y):\n \"\"\"\n predict labels\n :param x: test x\n :param y: test y\n :return:\n \"\"\"\n n, m = x.shape\n y_pred, _ = self.forward_prop(x, self.parameters)\n for i in range(0, y_pred.shape[1]):\n if y_pred[0, i] > 0.5:\n y_pred[0, i] = 1\n else:\n y_pred[0, i] = 0\n print(f'Accuracy: {np.sum(y_pred==y)/m}')\n", "repo_name": "svakeczw/MachineLearning-Algorithm-Python", "sub_path": "NeuralNetwork.py", "file_name": "NeuralNetwork.py", "file_ext": "py", "file_size_in_byte": 9078, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.random.randn", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 250, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 250, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "40556480307", "text": "from omni.kit.scripting import BehaviorScript\nfrom pxr import Sdf, UsdLux\nimport omni.kit.commands\nimport carb\nimport numpy\nimport math\n\nclass Light(BehaviorScript):\n def on_init(self):\n # Timeline Subscription\n timeline_stream = self.timeline.get_timeline_event_stream()\n self._timeline_sub = timeline_stream.create_subscription_to_pop(self._on_timeline_event)\n \n # Parameters\n self.recieved_data = False\n self.pitch = []\n self.total_duration = 0.0\n self.start_times = []\n self.curr_index = 1\n self.new_radius = 0\n self.prim = self._stage.GetPrimAtPath(self._prim_path)\n self._change_light_size(0)\n self.pitch_pos = int(self.prim.GetAttribute('Pitch').Get()) - 1\n\n def on_destroy(self):\n self._timeline_sub = None\n\n def on_update(self, current_time: float, delta_time: float):\n carb.log_info(f\"Current Time: {current_time}\")\n if not self.recieved_data:\n world = self._stage.GetPrimAtPath('/World')\n attr_name = 'pitch' + str(self.pitch_pos)\n self.pitch = world.GetAttribute(attr_name).Get()\n self.total_duration = world.GetAttribute('duration').Get()\n self.start_times = world.GetAttribute('beat_start_time').Get()\n self.recieved_data = True\n\n if self._is_time_close(current_time, self.start_times[self.curr_index]):\n self.new_radius = self._get_light_value()\n self.curr_index += 1\n\n light = self._lerp(self.new_radius, 0, self._get_time_diff(current_time))\n self._change_light_size(light)\n\n # Are we close between the current time and the next start time?\n def _is_time_close(self, a, b) -> bool:\n if a > b:\n return True\n return math.isclose(a, b, abs_tol=1e-2)\n\n def _lerp(self, a, b, c) -> float:\n c = numpy.clip(c, 0, 1)\n return (c * a) + ((1 - c) * b)\n\n def _get_time_diff(self, curr_time):\n top = self.start_times[self.curr_index] - curr_time\n bottom = (self.start_times[self.curr_index] - self.start_times[self.curr_index - 1])\n x = top / bottom\n return x\n\n def _get_light_value(self):\n pitch_value = self.pitch[self.curr_index] * 35\n return pitch_value \n\n def _change_light_size(self, light_size):\n if not self.prim.IsValid():\n return\n if self.prim.IsA(UsdLux.DiskLight):\n omni.kit.commands.execute('ChangeProperty',\n prop_path=Sdf.Path(str(self._prim_path) + '.radius'),\n value=light_size,\n prev=0)\n elif self.prim.IsA(UsdLux.RectLight):\n omni.kit.commands.execute('ChangeProperty',\n prop_path=Sdf.Path(str(self._prim_path) + '.intensity'),\n value=light_size*10,\n prev=0)\n \n def _on_timeline_event(self, e: carb.events.IEvent):\n if e.type == int(omni.timeline.TimelineEventType.STOP):\n self._change_light_size(0)\n self.pitch = []\n self.total_duration = 0.0\n self.start_times = []\n self.curr_index = 1\n self.recieved_data = False\n \n", "repo_name": "JenNVIDIA/musical-lights", "sub_path": "exts/jen.music.lights/behaviorscripts/light.py", "file_name": "light.py", "file_ext": "py", "file_size_in_byte": 3219, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "omni.kit.scripting.BehaviorScript", "line_number": 8, "usage_type": "name"}, {"api_name": "carb.log_info", "line_number": 29, "usage_type": "call"}, {"api_name": "math.isclose", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 52, "usage_type": "call"}, {"api_name": "pxr.UsdLux.DiskLight", "line_number": 68, "usage_type": "attribute"}, {"api_name": "pxr.UsdLux", "line_number": 68, "usage_type": "name"}, {"api_name": "omni.kit.scripting.kit.commands.execute", "line_number": 69, "usage_type": "call"}, {"api_name": "omni.kit.scripting.kit", "line_number": 69, "usage_type": "attribute"}, {"api_name": "omni.kit.scripting", "line_number": 69, "usage_type": "name"}, {"api_name": "pxr.Sdf.Path", "line_number": 70, "usage_type": "call"}, {"api_name": "pxr.Sdf", "line_number": 70, "usage_type": "name"}, {"api_name": "pxr.UsdLux.RectLight", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pxr.UsdLux", "line_number": 73, "usage_type": "name"}, {"api_name": "omni.kit.scripting.kit.commands.execute", "line_number": 74, "usage_type": "call"}, {"api_name": "omni.kit.scripting.kit", "line_number": 74, "usage_type": "attribute"}, {"api_name": "omni.kit.scripting", "line_number": 74, "usage_type": "name"}, {"api_name": "pxr.Sdf.Path", "line_number": 75, "usage_type": "call"}, {"api_name": "pxr.Sdf", "line_number": 75, "usage_type": "name"}, {"api_name": "carb.events", "line_number": 79, "usage_type": "attribute"}, {"api_name": "omni.kit.scripting.timeline", "line_number": 80, "usage_type": "attribute"}, {"api_name": "omni.kit.scripting", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "13661647712", "text": "import sys\nfrom math import *\nfrom subprocess import *\nfrom pathlib import Path\nfrom shutil import rmtree\n\nn_frames = 1024\nresolution = \"720p\"\n\nresolutions = {\n \"2160p\": (3840, 2160),\n \"1440p\": (2560, 1440),\n \"1080p\": (1920, 1080),\n \"720p\": (1280, 720),\n \"480p\": (854, 480),\n \"360p\": (640, 360),\n \"240p\": (426, 240),\n}\n\nwidth, height = resolutions[resolution]\n\nbasepath = Path(\"frames\")\nrmtree(basepath, ignore_errors=True)\nbasepath.mkdir(exist_ok=True)\n\ndef render_frames(indices):\n ps = []\n for i in indices:\n print(\"Launching\", i + 1, \"/\", n_frames)\n filepath = basepath / \"frame_{:05d}.png\".format(i)\n t = i * 2*pi / n_frames\n p1 = Popen([\n \"./bin/render\",\n \"--time\", str(t),\n \"--width\", str(width),\n \"--height\", str(height)],\n stdout=PIPE, stderr=DEVNULL)\n p2 = Popen([\"convert\", \"pgm:-\", str(filepath)], stdin=p1.stdout)\n ps.append(p2)\n\n for i, p in zip(indices, ps):\n print(\"Waiting for\", i + 1, \"/\", len(ps))\n p.communicate()\n\nn_processes = 14\nn_rendered = 0\nwhile n_rendered < n_frames:\n end = min(n_rendered + n_processes, n_frames)\n render_frames(list(range(n_rendered, end)))\n n_rendered += n_processes\n\np = Popen([\n \"ffmpeg\", \"-framerate\", \"30\",\n \"-i\", str(basepath / \"frame_%05d.png\"),\n \"-s:v\", \"{}x{}\".format(width, height),\n \"-c:v\", \"libx264\",\n \"-profile:v\", \"high\",\n \"-crf\", \"20\",\n \"-pix_fmt\", \"yuv420p\",\n \"-y\",\n \"output.mp4\"])\np.communicate()\n", "repo_name": "frostburn/raytrace", "sub_path": "scripts/animate.py", "file_name": "animate.py", "file_ext": "py", "file_size_in_byte": 1538, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 22, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "39659378851", "text": "#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport sys\nfrom collections import Counter\nfrom ruamel.yaml import YAML\nimport typing as tp\nyaml = YAML(typ='safe')\n\nPROG = os.path.basename(sys.argv[0])\n\ndef main():\n parser = argparse.ArgumentParser(\n description='compute total weapon upgrade requirements',\n )\n parser.add_argument('--file', default='data/weapon-upgrades.yaml', help='ingredients yaml file')\n parser.add_argument('--current-levels', metavar='YAMLFILE', help='read current levels from a YAML file, so that the displayed counts do not count upgrades you already have')\n parser.add_argument('--markdown', action='store_true')\n parser.add_argument('--item-list', dest='order_file', metavar='TXTFILE', help='order the ingredients by this file')\n args = parser.parse_args()\n\n weapons = read_weapons(args.file)\n current_levels = None\n if args.current_levels is not None:\n current_levels = CurrentLevelsFile.from_path(args.current_levels)\n current_levels.validate_against_weapons(weapons)\n\n order = None\n if args.order_file is not None:\n order = OrderFile.from_path(args.order_file)\n\n counts = count_ingredients(weapons, current_levels)\n counts = apply_ordering(counts, order)\n if args.markdown:\n display_counts_md(counts)\n else:\n display_counts(counts)\n\ndef read_weapons(path: str):\n with open(path) as f:\n return yaml.load(f)\n\nclass CurrentLevelsFile:\n def __init__(self, levels: dict[str, int], path: str):\n self.levels = levels\n self.path = path\n\n @classmethod\n def from_path(cls, path: str):\n with open(path) as f:\n d = yaml.load(f)\n if not isinstance(d, dict):\n die(f'{path}: file must be a YAML mapping from weapon names to level numbers, got a {type(d)}')\n for key, level in d.items():\n if not isinstance(level, int):\n die(f'{path}: at {repr(key)}: level must be integer')\n if level < 1:\n die(f'{path}: at {repr(key)}: minimum level is 1')\n return cls(d, path)\n\n def validate_against_weapons(self, weapons: list):\n all_names = set(weapon['name'] for weapon in weapons)\n for name in self.levels:\n if name not in all_names:\n die(f'{self.path}: {repr(name)} is not a known weapon')\n\nclass OrderFile:\n def __init__(self, order: list[str], path: str):\n self.order = order\n self.path = path\n\n @classmethod\n def from_path(cls, path: str):\n with open(path) as f:\n order = list(f)\n order = [line.strip() for line in order]\n order = [line for line in order if line]\n return cls(order, path)\n\ndef count_ingredients(weapons: list, current_levels: CurrentLevelsFile | None):\n counter = Counter()\n for weapon in weapons:\n name = weapon['name']\n skipped_levels = 0 if current_levels is None else current_levels.levels.get(name, 1) - 1\n for upgrade in weapon['ingredients'][skipped_levels:]:\n counter.update(upgrade)\n return counter\n\ndef apply_ordering(counts: Counter[str], order: OrderFile | None) -> list[tuple[str, int]]:\n if order is None:\n return counts.most_common()\n else:\n return [(key, counts[key]) for key in order.order if counts[key] > 0]\n\ndef display_counts(counts: list[tuple[str, int]]):\n maxlen = max(len(key) for (key, _) in counts)\n for ingredient, count in counts:\n print(f'{ingredient:>{maxlen}} : {count}')\n\ndef display_counts_md(counts: list[tuple[str, int]]):\n maxlen = max(len(key) for (key, _) in counts)\n print('| Material | Count |')\n print('| ---:| ---:|')\n for ingredient, count in counts:\n print(f'| {ingredient:>{maxlen}} | {count:3} |')\n\n# ------------------------------------------------------\n\ndef warn(*args, **kw):\n print(f'{PROG}:', *args, file=sys.stderr, **kw)\n\ndef die(*args, code=1):\n warn('Fatal:', *args)\n sys.exit(code)\n\n# ------------------------------------------------------\n\nif __name__ == '__main__':\n main()\n", "repo_name": "exphp-share/automata-materials", "sub_path": "compute-totals.py", "file_name": "compute-totals.py", "file_ext": "py", "file_size_in_byte": 4105, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ruamel.yaml.YAML", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 82, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 90, "usage_type": "name"}, {"api_name": "sys.stderr", "line_number": 111, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 115, "usage_type": "call"}]} +{"seq_id": "22403620560", "text": "\"\"\"\nA lambda to encode a manifest URL so that it points to the \n\"ManifestProxy\" lambda. The \"ManifestProxy\" lambda is able\nto extract the original URL and fetch it from the origin.\n\"\"\"\nimport binascii\nimport json\nimport logging\nimport urllib\n\nimport azure.functions as func\n\n# pylint: disable=relative-beyond-top-level\nfrom ..shared_code.request import encode_url\n\ndef extract_url_from_request(req: func.HttpRequest) -> str:\n \"\"\"\n Check the request for a \"url\" field.\n It searches for a CGI parameter, an application/x-www-form-urlencoded\n form or an application/json payload.\n \"\"\"\n url = req.params.get('url')\n if url:\n return url\n content_type = req.headers.get('Content-Type', '')\n logging.debug('content type: %s', content_type)\n if 'json' in content_type:\n req_body = req.get_json()\n return req_body.get('url')\n if 'x-www-form-urlencoded' in content_type:\n form = urllib.parse.parse_qs(req.get_body())\n if b'url' in form:\n return str(form[b'url'][0], 'utf-8')\n return form[\"url\"]\n raise ValueError(\"Unknown payload type\")\n\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('URLCreate HTTP trigger function processed a request. %s',\n req.method)\n\n if req.method == 'POST' or req.params.get('url'):\n try:\n mpd_url = extract_url_from_request(req)\n except (ValueError, KeyError) as err:\n logging.error(\"Failed to extract URL field: %s %s\", type(err), err)\n mpd_url = None\n if not mpd_url:\n return func.HttpResponse(\n 'Field \"url\" is required either in the query string or in the request body',\n status_code=400\n )\n mpd_parts = urllib.parse.urlparse(mpd_url)\n mpd_url = urllib.parse.urlunsplit((mpd_parts.scheme, mpd_parts.netloc,\n mpd_parts.path, '', ''))\n parts = urllib.parse.urlparse(req.url)\n manifest_url = ['http://', parts.hostname]\n if parts.port and parts.port != 80:\n manifest_url.append(f':{parts.port}')\n manifest_url.append('/api/dash/')\n manifest_url.append(encode_url(mpd_url))\n if mpd_parts.query:\n manifest_url.append('?')\n manifest_url.append(mpd_parts.query)\n result = dict(url=''.join(manifest_url))\n return func.HttpResponse(body=json.dumps(result), mimetype=\"application/json\",\n status_code=200)\n body = \"\"\"\n Manifest URL generator\n \n
\n \n \n \n
\n \n \"\"\"\n return func.HttpResponse(body=body, mimetype='text/html', status_code=200)\n", "repo_name": "asrashley/dashpiff", "sub_path": "URLCreate/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2893, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "azure.functions.HttpRequest", "line_number": 16, "usage_type": "attribute"}, {"api_name": "azure.functions", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.parse.parse_qs", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 31, "usage_type": "attribute"}, {"api_name": "azure.functions.HttpRequest", "line_number": 38, "usage_type": "attribute"}, {"api_name": "azure.functions", "line_number": 38, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 46, "usage_type": "call"}, {"api_name": "azure.functions.HttpResponse", "line_number": 49, "usage_type": "call"}, {"api_name": "azure.functions", "line_number": 49, "usage_type": "name"}, {"api_name": "urllib.parse.urlparse", "line_number": 53, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 53, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlunsplit", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 54, "usage_type": "attribute"}, {"api_name": "urllib.parse.urlparse", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 56, "usage_type": "attribute"}, {"api_name": "shared_code.request.encode_url", "line_number": 61, "usage_type": "call"}, {"api_name": "azure.functions.HttpResponse", "line_number": 66, "usage_type": "call"}, {"api_name": "azure.functions", "line_number": 66, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 66, "usage_type": "call"}, {"api_name": "azure.functions.HttpResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "azure.functions", "line_number": 78, "usage_type": "name"}, {"api_name": "azure.functions.HttpResponse", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "15348100169", "text": "import discord\nfrom discord.ext import commands\nimport utils\n\nimport math\n\nfrom fuzzywuzzy import fuzz, process\n\nhelp = utils.get_json(\"help\")\nfor i in range(len(help)): help[i][\"embed\"] = discord.Embed.from_dict(help[i][\"embed\"])\n\n# Show command descriptions.\nclass Help(commands.Cog):\n\tdef __init__(self, client):\n\t\tself.client = client\n\t\n\t@commands.command(aliases = [\"commands\", \"cmd\"])\n\tasync def help(self, ctx, *cmd):\n\t\tif len(cmd) == 0:\n\t\t\t\n\t\t\tasync def help_menu(_):\n\t\t\t\tyield math.ceil(len(help) / 5)\n\t\t\t\tfor chunk in utils.chunks(help, 5):\n\t\t\t\t\thelp_embed = (discord.Embed(\n\t\t\t\t\t\ttitle = \"Commands\",\n\t\t\t\t\t\tdescription = \"This bot's prefix is `l.`\",\n\t\t\t\t\t\tcolor = utils.embed_color)\n\t\t\t\t\t\t.set_footer(\n\t\t\t\t\t\t\ttext = f\"https://github.com/Camto/Lad - In {len(self.client.guilds)} servers!\",\n\t\t\t\t\t\t\ticon_url = utils.icons[\"github\"]))\n\t\t\t\t\tfor cmd_help in chunk:\n\t\t\t\t\t\thelp_embed.add_field(\n\t\t\t\t\t\t\tname = cmd_help[\"cmd\"],\n\t\t\t\t\t\t\tvalue = cmd_help[\"msg\"],\n\t\t\t\t\t\t\tinline = False)\n\t\t\t\t\tyield help_embed\n\t\t\t\n\t\t\tawait utils.menus.list(self.client, ctx, help_menu)\n\t\telse:\n\t\t\thelp_category = process.extractOne(cmd[0], list(map(lambda cmd: cmd[\"cmd\"], help)))[0]\n\t\t\tawait ctx.send(embed = list(filter(lambda cmd_help: cmd_help[\"cmd\"] == help_category, help))[0][\"embed\"])\n\ndef setup(client):\n\tclient.add_cog(Help(client))", "repo_name": "Camto/Lad", "sub_path": "Cogs/help.py", "file_name": "help.py", "file_ext": "py", "file_size_in_byte": 1320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "utils.get_json", "line_number": 9, "usage_type": "call"}, {"api_name": "discord.Embed.from_dict", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Cog", "line_number": 13, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 13, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.chunks", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.embed_color", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.icons", "line_number": 30, "usage_type": "attribute"}, {"api_name": "utils.menus.list", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.menus", "line_number": 38, "usage_type": "attribute"}, {"api_name": "fuzzywuzzy.process.extractOne", "line_number": 40, "usage_type": "call"}, {"api_name": "fuzzywuzzy.process", "line_number": 40, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 17, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "6894891018", "text": "\"\"\" This program captures screenshot and the key pressed by the user at the time of the screenshot.\"\"\"\n\n\nimport win32api as wpi\nimport win32con as wpc\nimport time\nimport cv2\nimport numpy as np\n\n\nkeyList = [wpc.VK_SPACE, 0x51]\n\n\ndef keys():\n \"\"\"Retrieves the associated key with snapshot.\"\"\"\n keys_array = []\n for key in keyList:\n if isinstance(key, int):\n if wpi.GetAsynckeyState(key):\n keys_array.append(key)\n if wpc.VK_SPACE in keys_array:\n return wpc.VK_SPACE\n elif 0x51 in keys_array: # 'Q' for quit.\n return 0x51\n\n\ndef screen_capture(x1, y1, x2, y2):\n \"\"\"Captures screenshot.\"\"\"\n hwin = wpi.GetDesktopWindow()\n width = wpi.GetSystemMetrics(wpc.SM_CXVIRTUALSCREEN)\n height = wpi.GetSystemMetrics(wpc.SM_CYVIRTUALSCREEN)\n left = wpi.GetSystemMetrics(wpc.SM_XVIRTUALSCREEN)\n top = wpi.GetSystemMetrics(wpc.SM_YVIRTUALSCREEN)\n\n hwindc = wpi.GetWindowDC(hwin)\n srcdc = wpi.CreateCompatibleDC(hwindc)\n memdc = wpi.CreateCompatibleDC(hwindc)\n\n bmp = wpi.CreateCompatibleBitmap(hwindc, width, height)\n wpi.SelectObject(memdc, bmp)\n wpi.BitBlt(memdc, 0, 0, width, height, srcdc, left, top, wpc.SRCCOPY)\n\n signedIntsArray = wpi.GetBitmapBits(bmp, height * width * 4)\n img = np.frombuffer(signedIntsArray, dtype='uint8')\n img.shape = (height, width, 4)\n\n wpi.DeleteObject(bmp)\n wpi.DeleteDC(srcdc)\n wpi.DeleteDC(memdc)\n wpi.ReleaseDC(hwin, hwindc)\n\n return img\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n print(\"Press 'Q' to quit.\")\n while True:\n if keys() == 0x51: # 'Q' for quit.\n break\n else:\n img = screen_capture(640, 360, 1280, 720)\n cv2.imshow('Screen', img)\n cv2.waitKey(1)\n\n\nif __name__ == '__main__':\n main()", "repo_name": "avelev99/Alto_Adventure_Reinforcement_RL", "sub_path": "Window_capture/ALTERNATIVE_Screencapture.py", "file_name": "ALTERNATIVE_Screencapture.py", "file_ext": "py", "file_size_in_byte": 1799, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "win32con.VK_SPACE", "line_number": 11, "usage_type": "attribute"}, {"api_name": "win32api.GetAsynckeyState", "line_number": 19, "usage_type": "call"}, {"api_name": "win32con.VK_SPACE", "line_number": 21, "usage_type": "attribute"}, {"api_name": "win32con.VK_SPACE", "line_number": 22, "usage_type": "attribute"}, {"api_name": "win32api.GetDesktopWindow", "line_number": 29, "usage_type": "call"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 30, "usage_type": "call"}, {"api_name": "win32con.SM_CXVIRTUALSCREEN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 31, "usage_type": "call"}, {"api_name": "win32con.SM_CYVIRTUALSCREEN", "line_number": 31, "usage_type": "attribute"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 32, "usage_type": "call"}, {"api_name": "win32con.SM_XVIRTUALSCREEN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "win32api.GetSystemMetrics", "line_number": 33, "usage_type": "call"}, {"api_name": "win32con.SM_YVIRTUALSCREEN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "win32api.GetWindowDC", "line_number": 35, "usage_type": "call"}, {"api_name": "win32api.CreateCompatibleDC", "line_number": 36, "usage_type": "call"}, {"api_name": "win32api.CreateCompatibleDC", "line_number": 37, "usage_type": "call"}, {"api_name": "win32api.CreateCompatibleBitmap", "line_number": 39, "usage_type": "call"}, {"api_name": "win32api.SelectObject", "line_number": 40, "usage_type": "call"}, {"api_name": "win32api.BitBlt", "line_number": 41, "usage_type": "call"}, {"api_name": "win32con.SRCCOPY", "line_number": 41, "usage_type": "attribute"}, {"api_name": "win32api.GetBitmapBits", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 44, "usage_type": "call"}, {"api_name": "win32api.DeleteObject", "line_number": 47, "usage_type": "call"}, {"api_name": "win32api.DeleteDC", "line_number": 48, "usage_type": "call"}, {"api_name": "win32api.DeleteDC", "line_number": 49, "usage_type": "call"}, {"api_name": "win32api.ReleaseDC", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 63, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "29687513791", "text": "import os\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton,QFileDialog, QGridLayout, QStackedWidget, QMessageBox\r\nfrom PyQt5.QtGui import QFont, QIcon\r\nimport sys\r\nimport codecs\r\n\r\n\r\nclass Main_Page(QWidget):\r\n judge_file_open = 0\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n\r\n global Path_Raw_Text\r\n Path_Raw_Text = \"C:\\\\Program Files (x86)\\\\ETS-Lindgren\\\\EMQuest\\\\Final Data\" # \"C:\\\\Users\\\\jiyoon_kim\\\\Desktop\\\\USA_Naming\\\\Final Data\"\r\n\r\n btn1 = QPushButton('대상 폴더', self)\r\n\r\n btn1.setMaximumWidth(180)\r\n btn1.setMaximumHeight(60)\r\n\r\n btn1.setFont(QFont('Arial', 13, QFont.Bold))\r\n btn1.setStyleSheet(\"color : white;\"\r\n \"background-color : rgb(255, 190, 11);\"\r\n \"border-radius : 5px;\"\r\n )\r\n\r\n\r\n btn1.clicked.connect(self.getData_raw)\r\n\r\n btn2 = QPushButton(\"Run!!\", self)\r\n\r\n btn2.setMaximumWidth(180)\r\n btn2.setMaximumHeight(60)\r\n\r\n btn2.setFont(QFont('Arial', 13, QFont.Bold))\r\n btn2.setStyleSheet(\"color : white;\"\r\n \"background-color : rgb(255, 190, 11);\"\r\n \"border-radius : 5px;\"\r\n )\r\n\r\n btn2.clicked.connect(self.clickMethod)\r\n btn2.clicked.connect(self.run)\r\n grid = QGridLayout()\r\n grid.addWidget(btn1, 1, 1)\r\n grid.addWidget(btn2, 1, 3)\r\n grid.setColumnStretch(0, 1)\r\n grid.setColumnStretch(1, 1)\r\n grid.setColumnStretch(2, 1)\r\n grid.setColumnStretch(3, 1)\r\n grid.setColumnStretch(4, 1)\r\n\r\n self.setLayout(grid)\r\n\r\n self.show()\r\n\r\n def getData_raw(self):\r\n global Path_Raw\r\n self.judge_file_open = 1\r\n Path_Raw = str(QFileDialog.getExistingDirectory(self, \"Select Directory\"))\r\n print(Path_Raw)\r\n\r\n\r\n def run(self):\r\n if self.judge_file_open == 0:\r\n return 0\r\n\r\n num_of_check = 1000\r\n\r\n # 경로 지정\r\n\r\n raw_names = os.listdir(Path_Raw)\r\n\r\n text_names = os.listdir(Path_Raw_Text)[-num_of_check:]\r\n\r\n print(text_names)\r\n print(raw_names)\r\n\r\n # 텍스트에 있는 이름 저장\r\n for text in text_names:\r\n for config in raw_names:\r\n if text.replace(\" .txt\",\"\") in config:\r\n with codecs.open(Path_Raw_Text + \"\\\\\" + text, 'r', encoding='utf-8', errors='ignore') as f:\r\n lines = f.readlines()\r\n find_date = config[config.find(\".\")-4:config.find(\".\")+15]\r\n if 'BHHR' in Path_Raw:\r\n new_name = lines[10].strip() + ' BHHR ' + find_date + '.raw'\r\n elif 'BHHL' in Path_Raw:\r\n new_name = lines[10].strip() + ' BHHL ' + find_date + '.raw'\r\n elif 'HR' in Path_Raw:\r\n new_name = lines[10].strip() + ' HR ' + find_date + '.raw'\r\n elif 'HL' in Path_Raw:\r\n new_name = lines[10].strip() + ' HL ' + find_date + '.raw'\r\n else:\r\n new_name = lines[10].strip() + ' FS ' + find_date + '.raw'\r\n\r\n os.rename(os.path.join(Path_Raw, config), os.path.join(Path_Raw, new_name))\r\n\r\n #-------------------------------------------------------------------------------------------------------------\r\n\r\n\r\n def clickMethod(self): # 폴더 지정 안했을 때 에러 처리\r\n if os.path.isdir(Path_Raw_Text) == False:\r\n msg = QMessageBox()\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setText(\"에러\")\r\n msg.setText(\"텍스트 파일의 경로가 잘못 되었습니다.\")\r\n msg.setWindowTitle(\"Error\")\r\n msg.exec_()\r\n exit()\r\n\r\n elif self.judge_file_open == 0:\r\n msg = QMessageBox()\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setText(\"에러\")\r\n msg.setText(\"폴더를 선택하세요.\")\r\n msg.setWindowTitle(\"Error\")\r\n msg.exec_()\r\n\r\n else:\r\n QMessageBox.information(self,\"Complete\",\"완료되었습니다.\")\r\n\r\n\r\n\r\nclass Vari_QStackedWidget(QStackedWidget):\r\n\r\n def closeEvent(self, event):\r\n reply = QMessageBox.question(self, 'Message', '종료하시겠습니까?',\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\r\n\r\n if reply == QMessageBox.Yes:\r\n event.accept()\r\n else:\r\n event.ignore()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\r\n app = QApplication(sys.argv)\r\n ex = Main_Page()\r\n widget = Vari_QStackedWidget()\r\n widget.addWidget(ex)\r\n\r\n widget.setWindowTitle(\"SGS Naming Auto_jiyoonkim\")\r\n widget.setWindowIcon(QIcon(\"wraith.ico\"))\r\n widget.resize(1000, 500)\r\n\r\n widget.show()\r\n sys.exit(app.exec_())", "repo_name": "kyie0913/USA_Project-Auto_Naming", "sub_path": "USA_Auto_Naming_1.0.3.py", "file_name": "USA_Auto_Naming_1.0.3.py", "file_ext": "py", "file_size_in_byte": 5130, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont.Bold", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QPushButton", "line_number": 34, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont", "line_number": 39, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QFont.Bold", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QGridLayout", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 63, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 63, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 77, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 86, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 107, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Critical", "line_number": 108, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 108, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 116, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Critical", "line_number": 117, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 117, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.information", "line_number": 124, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 124, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QStackedWidget", "line_number": 128, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.question", "line_number": 131, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 131, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 132, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 132, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.No", "line_number": 132, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox.Yes", "line_number": 134, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMessageBox", "line_number": 134, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 143, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 143, "usage_type": "attribute"}, {"api_name": "PyQt5.QtGui.QIcon", "line_number": 149, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "28023240381", "text": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom feed.models import Post\nfrom users.models import User\nfrom .models import Group\nfrom .views_helpers import *\n\n\nclass GroupCreate(LoginRequiredMixin, CreateView):\n model = Group\n template_name = 'groups/create.html'\n fields = ['title', 'info', 'img']\n \n def get_success_url(self):\n return reverse('groups:group', kwargs={'id':self.object.id})\n \n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.owner = self.request.user\n self.object.save()\n self.object.members.add(self.request.user)\n self.request.user.profile.groups.add(self.object)\n return super().form_valid(form)\n \nclass GroupDetail(LoginRequiredMixin, DetailView):\n model = Group\n template_name = 'groups/detail.html'\n\n def get_object(self):\n return get_object_or_404(Group, id=self.kwargs['id'])\n\nclass GroupPostCreate(LoginRequiredMixin, CreateView):\n model = Post\n template_name = 'feed/post_create.html'\n fields = ['text', 'img']\n\n def get_success_url(self):\n return reverse('groups:group', kwargs={'id':self.object.of_group.id})\n \n def form_valid(self, form):\n self.object = form.save(commit=False)\n self.object.owner = self.request.user\n self.object.of_group = get_object_or_404(Group, id=self.kwargs['id'])\n self.object.save()\n return super().form_valid(form)\n\nclass GroupUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'groups/update.html'\n fields = ['info', 'img']\n\n def form_valid(self, form):\n profile = self.request.user.profile\n profile.info_message = 'Group updated'\n profile.save()\n return super().form_valid(form)\n\n def get_object(self):\n return get_object_or_404(Group, id=self.kwargs['id'])\n\n def get_success_url(self):\n return reverse('groups:group', kwargs={'id': self.kwargs['id']})\n\n def dispatch(self, request, *args, **kwargs):\n self.object = self.get_object()\n if request.user != self.object.owner:\n return redirect('groups:group', id=self.object.id)\n return super(GroupUpdate, self).dispatch(\n request, *args, **kwargs)\n\n@login_required\ndef group_delete(request, id):\n if request.method == 'POST':\n group = get_object_or_404(Group, id=id)\n user = request.user\n if request.user == group.owner:\n group.delete()\n return redirect('feed:home')\n return redirect('groups:group', id=id)\n\n@login_required\ndef handle_membership(request, id):\n if request.method == 'POST':\n group = get_object_or_404(Group, id=id)\n user = request.user\n command = request.POST['command']\n commands = {\n 'JOIN GROUP': handle_join,\n 'CANCEL REQUEST': handle_cancel,\n 'LEAVE GROUP': handle_leave\n }\n response = {'state' : commands[command](group, user)}\n return JsonResponse(response)\n\n@login_required\ndef handle_member(request, id):\n if request.method == 'POST':\n group = get_object_or_404(Group, id=id)\n if request.user != group.owner:\n return\n user_id = request.POST['id']\n user = get_object_or_404(User, id=user_id)\n command = request.POST['command']\n commands = {\n 'APPROVE': handle_approve,\n 'DISAPPROVE': handle_disapprove,\n 'KICK': handle_kick,\n }\n response = {'state' : commands[command](group, user)}\n return JsonResponse(response)\n\n@login_required\ndef manage_members(request, id):\n if request.method == 'GET':\n group = get_object_or_404(Group, id=id)\n if request.user == group.owner:\n return render(request, 'groups/manage.html', {'group': group})\n return redirect('groups:group', id=id)", "repo_name": "georgeballasdev/social_web_app", "sub_path": "WebAppClone/groups/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4142, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 13, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Group", "line_number": 14, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 29, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Group", "line_number": 30, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 36, "usage_type": "name"}, {"api_name": "django.views.generic.CreateView", "line_number": 36, "usage_type": "name"}, {"api_name": "feed.models.Post", "line_number": 37, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 42, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 47, "usage_type": "argument"}, {"api_name": "django.contrib.auth.mixins.LoginRequiredMixin", "line_number": 51, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 51, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 62, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 70, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 77, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 82, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 87, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 87, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 96, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 84, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 101, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 101, "usage_type": "argument"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 105, "usage_type": "call"}, {"api_name": "users.models.User", "line_number": 105, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 113, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 98, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Group", "line_number": 118, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "10318873654", "text": "import pytest\n\nfrom pdtable import TableBundle, Table, TableDataFrame\nfrom pdtable.io.parsers.blocks import parse_blocks\n\ncell_rows = [\n # fmt off\n [\"**foo\"],\n [\"all\"],\n [\"column\"],\n [\"text\"],\n [\"bar\"],\n [\"zoo\"],\n [],\n [\"::\", \"Table foo describes\"],\n [None, \"the fooness of things\"],\n [\":.column\", \"Column is a column in foo\"],\n [],\n [\"**infs\"],\n [\"all\"],\n [\"file_bytes\", \"file_date\", \"has_table\"],\n [\"-\", \"text\", \"onoff\"],\n [15373, \"a\", 0],\n [15326, \"b\", 1],\n []\n # fmt on\n]\n\n\ndef test_bundle_from_csv():\n\n bundle = TableBundle(parse_blocks(cell_rows), as_dataframe=True)\n\n assert bundle.foo.column.values[0] == \"bar\"\n\n\ndef test_TableBundle_as_dataframe():\n \"\"\" Verify that as_dataframe is functioning as expected (switch TableType)\n \"\"\"\n\n # pdtable generator\n bundle = TableBundle(parse_blocks(cell_rows, to=\"pdtable\"), as_dataframe=True)\n assert bundle.infs.file_bytes.values[1] == 15326.0\n assert bundle is not None\n assert len(bundle) == 2\n assert isinstance(bundle[0], TableDataFrame)\n\n # pdtable generator\n bundle = TableBundle(parse_blocks(cell_rows, to=\"pdtable\"), as_dataframe=False)\n assert bundle.infs[\"file_bytes\"].values[1] == 15326.0\n assert bundle is not None\n assert len(bundle) == 2\n assert isinstance(bundle[1], Table)\n\n # do not error on other table types\n bundle = TableBundle(parse_blocks(cell_rows, to=\"cellgrid\"), as_dataframe=True)\n assert bundle is not None\n assert isinstance(bundle[0], list) # cellgrid\n\n\ndef test_TableBundle_iterator():\n \"\"\" Verify that iterator is functioning as expected\n \"\"\"\n bundle = TableBundle(parse_blocks(cell_rows, to=\"pdtable\"))\n count = 0\n seen = {}\n for tab in bundle:\n assert type(tab) is Table\n seen[tab.name] = tab\n count += 1\n assert count == 2\n assert len(seen) == 2\n assert seen[\"foo\"] is not None\n assert seen[\"infs\"] is not None\n\n \"\"\" Verify that we can iterate other types than pdtable\n \"\"\"\n bundle = TableBundle(parse_blocks(cell_rows, to=\"cellgrid\"))\n count = 0\n for tab in bundle:\n assert type(tab) is list\n assert tab[0][0] in {\"**foo\", \"**infs\"}\n count += 1\n assert count == 2\n assert bundle[\"foo\"] is not None\n assert bundle[\"infs\"] is not None\n\n bundle = TableBundle(parse_blocks(cell_rows, to=\"jsondata\"))\n count = 0\n for tab in bundle:\n assert type(tab) is dict\n assert tab[\"name\"] in {\"foo\", \"infs\"}\n count += 1\n assert count == 2\n assert bundle[\"foo\"] is not None\n assert bundle[\"infs\"] is not None\n\n\ndef test_TableBundle_unique():\n \"\"\" Verify that unique() is functioning as expected\n \"\"\"\n bundle1 = TableBundle(parse_blocks(cell_rows))\n # bundle1 now contains one 'foo' and one 'infs'\n assert len(bundle1) == 2\n\n with pytest.raises(LookupError):\n tab = bundle1.unique(\"-not there-\")\n\n tab = bundle1.unique(\"foo\")\n assert tab.name == \"foo\"\n\n tab = bundle1.unique(\"infs\")\n assert tab.name == \"infs\"\n\n cells2 = []\n cells2.extend(cell_rows)\n cells2.extend([])\n cells2.extend(cell_rows)\n\n bundle2 = TableBundle(parse_blocks(cells2))\n # bundle2 now contains two 'foo' and two 'infs'\n assert len(bundle2) == 4\n\n with pytest.raises(LookupError):\n tab = bundle2.unique(\"-not there-\")\n\n with pytest.raises(LookupError):\n tab = bundle2.unique(\"foo\")\n\n with pytest.raises(LookupError):\n tab = bundle2.unique(\"infs\")\n\n\ndef test_TableBundle_getitem():\n \"\"\" Verify that unique() is functioning as expected\n \"\"\"\n bundle1 = TableBundle(parse_blocks(cell_rows))\n # bundle1 now contains one 'foo' and one 'infs'\n assert len(bundle1) == 2\n\n with pytest.raises(LookupError):\n tab = bundle1[\"-not there-\"]\n\n # verify getitem\n with pytest.raises(TypeError):\n tab = bundle1[bundle1]\n\n # hashed\n tab = bundle1[\"foo\"]\n assert tab.name == \"foo\"\n\n tab = bundle1[\"infs\"]\n assert tab.name == \"infs\"\n\n # indexed\n tab = bundle1[0]\n assert tab.name == \"foo\"\n\n tab = bundle1[1]\n assert tab.name == \"infs\"\n\n with pytest.raises(IndexError):\n tab = bundle1[2]\n\n cells2 = []\n cells2.extend(cell_rows)\n cells2.extend([])\n cells2.extend(cell_rows)\n\n bundle2 = TableBundle(parse_blocks(cells2))\n # bundle2 now contains two 'foo' and two 'infs'\n assert len(bundle2) == 4\n\n with pytest.raises(LookupError):\n tab = bundle2[\"-not there-\"]\n\n with pytest.raises(LookupError):\n tab = bundle2[\"foo\"]\n\n with pytest.raises(LookupError):\n tab = bundle2[\"infs\"]\n\n # indexed\n tab = bundle2[0]\n assert tab.name == \"foo\"\n\n tab = bundle2[1]\n assert tab.name == \"infs\"\n\n tab = bundle2[2]\n assert tab.name == \"foo\"\n\n tab = bundle2[3]\n assert tab.name == \"infs\"\n\n with pytest.raises(IndexError):\n tab = bundle2[4]\n\n\ndef test_TableBundle_all():\n \"\"\" Verify that all() is functioning as expected\n \"\"\"\n bundle1 = TableBundle(parse_blocks(cell_rows))\n # bundle1 now contains one 'foo' and one 'infs'\n assert len(bundle1) == 2\n\n lst = bundle1.all(\"-not there-\")\n assert len(lst) == 0\n\n lst = bundle1.all(\"foo\")\n assert len(lst) == 1\n for tab in lst:\n assert tab.name == \"foo\"\n\n lst = bundle1.all(\"infs\")\n assert len(lst) == 1\n for tab in lst:\n assert tab.name == \"infs\"\n\n cells2 = []\n cells2.extend(cell_rows)\n cells2.extend([])\n cells2.extend(cell_rows)\n\n bundle2 = TableBundle(parse_blocks(cells2))\n # bundle2 now contains two 'foo' and two 'infs'\n assert len(bundle2) == 4\n\n lst = bundle2.all(\"-not there-\")\n assert len(lst) == 0\n\n lst = bundle2.all(\"foo\")\n assert len(lst) == 2\n for tab in lst:\n assert tab.name == \"foo\"\n\n lst = bundle2.all(\"infs\")\n assert len(lst) == 2\n for tab in lst:\n assert tab.name == \"infs\"\n\n\ndef test_TableBundle_attribute_error():\n bundle = TableBundle([])\n with pytest.raises(AttributeError):\n bundle.invalid_attribute_name\n\n\ndef test_TableBundle_in_operator():\n bundle = TableBundle(parse_blocks(cell_rows))\n assert \"foo\" in bundle\n assert \"qux\" not in bundle\n", "repo_name": "startable/pdtable", "sub_path": "pdtable/test/test_store.py", "file_name": "test_store.py", "file_ext": "py", "file_size_in_byte": 6251, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pdtable.TableBundle", "line_number": 32, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 32, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 42, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 42, "usage_type": "call"}, {"api_name": "pdtable.TableDataFrame", "line_number": 46, "usage_type": "argument"}, {"api_name": "pdtable.TableBundle", "line_number": 49, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 49, "usage_type": "call"}, {"api_name": "pdtable.Table", "line_number": 53, "usage_type": "argument"}, {"api_name": "pdtable.TableBundle", "line_number": 56, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 56, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 64, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 64, "usage_type": "call"}, {"api_name": "pdtable.Table", "line_number": 68, "usage_type": "name"}, {"api_name": "pdtable.TableBundle", "line_number": 78, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 78, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 88, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 88, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 102, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 102, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 106, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 120, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 120, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 124, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 127, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 130, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 137, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 137, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 141, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 145, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 162, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 170, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 170, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 174, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 177, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 180, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 196, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 203, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 203, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 225, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 225, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 244, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 245, "usage_type": "call"}, {"api_name": "pdtable.TableBundle", "line_number": 250, "usage_type": "call"}, {"api_name": "pdtable.io.parsers.blocks.parse_blocks", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "31594914689", "text": "from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass VaultReplicaSummary(object):\n \"\"\"\n Summary of vault replicas\n \"\"\"\n\n #: A constant which can be used with the status property of a VaultReplicaSummary.\n #: This constant has a value of \"CREATING\"\n STATUS_CREATING = \"CREATING\"\n\n #: A constant which can be used with the status property of a VaultReplicaSummary.\n #: This constant has a value of \"CREATED\"\n STATUS_CREATED = \"CREATED\"\n\n #: A constant which can be used with the status property of a VaultReplicaSummary.\n #: This constant has a value of \"DELETING\"\n STATUS_DELETING = \"DELETING\"\n\n #: A constant which can be used with the status property of a VaultReplicaSummary.\n #: This constant has a value of \"DELETED\"\n STATUS_DELETED = \"DELETED\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new VaultReplicaSummary object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param crypto_endpoint:\n The value to assign to the crypto_endpoint property of this VaultReplicaSummary.\n :type crypto_endpoint: str\n\n :param management_endpoint:\n The value to assign to the management_endpoint property of this VaultReplicaSummary.\n :type management_endpoint: str\n\n :param region:\n The value to assign to the region property of this VaultReplicaSummary.\n :type region: str\n\n :param status:\n The value to assign to the status property of this VaultReplicaSummary.\n Allowed values for this property are: \"CREATING\", \"CREATED\", \"DELETING\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type status: str\n\n \"\"\"\n self.swagger_types = {\n 'crypto_endpoint': 'str',\n 'management_endpoint': 'str',\n 'region': 'str',\n 'status': 'str'\n }\n\n self.attribute_map = {\n 'crypto_endpoint': 'cryptoEndpoint',\n 'management_endpoint': 'managementEndpoint',\n 'region': 'region',\n 'status': 'status'\n }\n\n self._crypto_endpoint = None\n self._management_endpoint = None\n self._region = None\n self._status = None\n\n @property\n def crypto_endpoint(self):\n \"\"\"\n Gets the crypto_endpoint of this VaultReplicaSummary.\n The vault replica's crypto endpoint\n\n\n :return: The crypto_endpoint of this VaultReplicaSummary.\n :rtype: str\n \"\"\"\n return self._crypto_endpoint\n\n @crypto_endpoint.setter\n def crypto_endpoint(self, crypto_endpoint):\n \"\"\"\n Sets the crypto_endpoint of this VaultReplicaSummary.\n The vault replica's crypto endpoint\n\n\n :param crypto_endpoint: The crypto_endpoint of this VaultReplicaSummary.\n :type: str\n \"\"\"\n self._crypto_endpoint = crypto_endpoint\n\n @property\n def management_endpoint(self):\n \"\"\"\n Gets the management_endpoint of this VaultReplicaSummary.\n The vault replica's management endpoint\n\n\n :return: The management_endpoint of this VaultReplicaSummary.\n :rtype: str\n \"\"\"\n return self._management_endpoint\n\n @management_endpoint.setter\n def management_endpoint(self, management_endpoint):\n \"\"\"\n Sets the management_endpoint of this VaultReplicaSummary.\n The vault replica's management endpoint\n\n\n :param management_endpoint: The management_endpoint of this VaultReplicaSummary.\n :type: str\n \"\"\"\n self._management_endpoint = management_endpoint\n\n @property\n def region(self):\n \"\"\"\n Gets the region of this VaultReplicaSummary.\n Region to which vault is replicated to\n\n\n :return: The region of this VaultReplicaSummary.\n :rtype: str\n \"\"\"\n return self._region\n\n @region.setter\n def region(self, region):\n \"\"\"\n Sets the region of this VaultReplicaSummary.\n Region to which vault is replicated to\n\n\n :param region: The region of this VaultReplicaSummary.\n :type: str\n \"\"\"\n self._region = region\n\n @property\n def status(self):\n \"\"\"\n Gets the status of this VaultReplicaSummary.\n Status of the Vault\n\n Allowed values for this property are: \"CREATING\", \"CREATED\", \"DELETING\", \"DELETED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n\n\n :return: The status of this VaultReplicaSummary.\n :rtype: str\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"\n Sets the status of this VaultReplicaSummary.\n Status of the Vault\n\n\n :param status: The status of this VaultReplicaSummary.\n :type: str\n \"\"\"\n allowed_values = [\"CREATING\", \"CREATED\", \"DELETING\", \"DELETED\"]\n if not value_allowed_none_or_none_sentinel(status, allowed_values):\n status = 'UNKNOWN_ENUM_VALUE'\n self._status = status\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/key_management/models/vault_replica_summary.py", "file_name": "vault_replica_summary.py", "file_ext": "py", "file_size_in_byte": 5644, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "oci.util.value_allowed_none_or_none_sentinel", "line_number": 168, "usage_type": "call"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 173, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 5, "usage_type": "name"}]} +{"seq_id": "651215321", "text": "from flask import Flask, render_template, request\r\nfrom ubidots import ApiClient\r\n\r\napp = Flask(__name__)\r\n\r\n# Ubah dengan token dan label yang sesuai dari Ubidots\r\nAPI_TOKEN = \"BBFF-thUhhRPJojoHiUB78bozuZuPy2dKTv\"\r\nLABEL = \"64cb734bdfc2f3000b9aec5b\"\r\n\r\napi = ApiClient(token=API_TOKEN)\r\nvariable = api.get_variable(LABEL)\r\n\r\ndef toggle_value(current_value):\r\n if current_value == 0:\r\n return 1\r\n else:\r\n return 0\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n current_value = variable.get_values(1)[0]['value']\r\n new_value = toggle_value(current_value)\r\n variable.save_value({'value': new_value})\r\n\r\n current_value = variable.get_values(1)[0]['value']\r\n return render_template('index.html', current_value=current_value)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n", "repo_name": "Farmops-Project/Final-Project-Altissimo", "sub_path": "Tombol/tombol.py", "file_name": "tombol.py", "file_ext": "py", "file_size_in_byte": 871, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "ubidots.ApiClient", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "70840990244", "text": "# Identify location\nimport socket\nlocation = socket.gethostname()\nif location == 'Orthanc':\n dropbox = 'E:\\\\Users\\\\Chris\\\\Dropbox\\\\'\nif location == 'sputnik':\n dropbox = '/home/dhris/Dropbox/'\nif location == 'saruman':\n dropbox = '/home/herdata/spx7cjc/Dropbox/'\n\n# Import smorgasbord\nimport os\nimport warnings\nwarnings.simplefilter('ignore', category=Warning)\nimport matplotlib\nmatplotlib.use(\"Pdf\")\nimport matplotlib.pyplot as plt\nplt.ioff()\nimport astropy.logger\nastropy.log.setLevel('ERROR')\nimport AstroCell.Main\n\n\n\n# Main process\nif __name__ == '__main__':\n\n # State input directory for test data (various options)\n test_dir = 'Test_Data/'\n #img_dir = 'Histochemial/3100_zeb1/'\n #img_dir = 'Flourescant/Liver/APCFLOX1668/'\n #img_dir = 'Flourescant/Mammary/Ref_LO/'\n #img_dir = 'Histochemial/Mammary/Ref_LO/'\n img_dir = 'Histochemial/Mammary/Cytoplasm/'\n in_dir = os.path.join(test_dir, img_dir)\n\n # Set output directory for Dills (like pickle jars, these are snapshots to resume AstroCell from a 'saved' point, for testing)\n if location == 'sputnik':\n dill_dir = os.path.join( os.path.expanduser('~'), '/Data/AstroCell/Dills/' )\n else:\n dill_dir = False\n\n\n # Launch AstroCell\n AstroCell.Main.Run(in_dir=in_dir, cell_colours=2, substructure_flag=True, parallel=7, mc_factor=1.0, dill_dir=dill_dir, verbose=True)", "repo_name": "Stargrazer82301/AstroCell", "sub_path": "Test/AstroCell_Test.py", "file_name": "AstroCell_Test.py", "file_ext": "py", "file_size_in_byte": 1378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "socket.gethostname", "line_number": 3, "usage_type": "call"}, {"api_name": "warnings.simplefilter", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.use", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ioff", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "astropy.logger.log.setLevel", "line_number": 20, "usage_type": "call"}, {"api_name": "astropy.logger.log", "line_number": 20, "usage_type": "attribute"}, {"api_name": "astropy.logger", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 39, "usage_type": "call"}, {"api_name": "AstroCell.Main.Main.Run", "line_number": 45, "usage_type": "call"}, {"api_name": "AstroCell.Main.Main", "line_number": 45, "usage_type": "attribute"}, {"api_name": "AstroCell.Main", "line_number": 45, "usage_type": "name"}]} +{"seq_id": "17404034979", "text": "import scipy\nimport scipy.ndimage\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nimport glob\nimport os\n\n#PCA with image reconstruction\ndef pca(img):\n try:\n #Mean centered convariance matrix\n covariance_matrix = img - np.mean(img , axis = 0)\n #Finding eigenValues and Vectors\n eigenValues, eigenVectors = np.linalg.eigh(np.cov(covariance_matrix))\n eigenVecSize = np.size(eigenVectors, axis =0)\n #Sort eigenvalues in descending order\n index = np.argsort(eigenValues)\n index = index[::-1]\n eigenVectors = eigenVectors[:,index]\n eigenValues = eigenValues[index]\n #Number of principal components to be used for reconstruction of image\n numOfPC = 75\n if numOfPC 0:\n eigenVectors = eigenVectors[:, range(numOfPC)]\n #Reconstruction of image using the covariance matrix and the eigen vectors\n reconstructed = np.dot(eigenVectors.T, covariance_matrix)\n reconstructedMeanAdjusted = np.dot(eigenVectors, reconstructed) + np.mean(img, axis = 0).T\n reconstructedImageMatrix = np.uint8(np.absolute(reconstructedMeanAdjusted))\n return reconstructedImageMatrix\n except:\n print(\"\")\n\n\ndef main():\n #Reading all images one by one\n datapath = 'C:\\\\Users\\\\prana\\\\Downloads\\\\data'\n outputpath = \"C:\\\\Users\\\\prana\\\\Downloads\\\\recon\\\\\"\n for filename in glob.glob(datapath + '\\\\*\\\\*.jpg'):\n try:\n img = scipy.ndimage.imread(filename)\n head, tail = os.path.split(filename)\n #print(head)\n #print(tail)\n headtail, tailtail = os.path.split(head)\n #print(headtail)\n #print(tailtail)\n imgMatrix = np.array(img)\n if(len(imgMatrix.shape) > 2):\n #for 3-d images - RGB color images\n imgR = imgMatrix[:,:,0]\n imgG = imgMatrix[:,:,1]\n imgB = imgMatrix[:,:,2]\n imgRReconstructed, imgGReconstructed, imgBReconstructed = pca(imgR), pca(imgG), pca(imgB)\n reconstructedImg = np.dstack((imgRReconstructed, imgGReconstructed, imgBReconstructed))\n reconstructedImg = Image.fromarray(reconstructedImg)\n try:\n os.stat(outputpath + tailtail)\n except:\n os.mkdir(outputpath + tailtail)\n scipy.misc.imsave(outputpath + tailtail + \"\\\\\" + tail, reconstructedImg)\n else:\n #for 2-d images\n imgW = imgMatrix[:, 0]\n imgB = imgMatrix[:, 1]\n imgWReconstructed, imgBReconstructed = pca(imgW), pca(imgB)\n reconstructedImg = np.dstack((imgWReconstructed, imgBReconstructed))\n reconstructedImg = Image.fromarray(reconstructedImg)\n try:\n os.stat(outputpath + tailtail)\n except:\n os.mkdir(outputpath + tailtail)\n scipy.misc.imsave(outputpath + tailtail + \"\\\\\" + tail, reconstructedImg)\n except:\n print('')\n print(\"Done with PCA!\")\n\nif __name__== \"__main__\":\n main()\n", "repo_name": "harshalkgurjar/Object_recognition", "sub_path": "pca.py", "file_name": "pca.py", "file_ext": "py", "file_size_in_byte": 3215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.mean", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.linalg.eigh", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.cov", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 29, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.ndimage.imread", "line_number": 41, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 56, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 56, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 58, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 60, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.dstack", "line_number": 67, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 68, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 68, "usage_type": "name"}, {"api_name": "os.stat", "line_number": 70, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 73, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "28038022020", "text": "import time\nimport os\nimport logging\nimport requests\n\nfrom django.utils.deprecation import MiddlewareMixin\nfrom datadog import DogStatsd\nfrom django.http import QueryDict\nfrom .authentication import KeycloakAuthenticationMixin\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(\"INFO\")\n\nstatsd = DogStatsd(host=\"tator-prometheus-statsd-exporter\", port=9125)\n\n\nclass StatsdMiddleware(MiddlewareMixin):\n def process_request(self, request):\n request.start_time = time.time()\n\n def process_response(self, request, response):\n tokens = request.path.split(\"/\")\n if request.path.startswith(\"/rest\") and len(tokens) > 2:\n endpoint = tokens[2]\n else:\n endpoint = request.path\n statsd.increment(\n \"django_request_count\",\n tags=[\n \"service:tator\",\n f\"method:{request.method}\",\n f\"endpoint:{endpoint}\",\n f\"status:{response.status_code}\",\n ],\n )\n\n response_time = time.time() - request.start_time\n statsd.histogram(\n \"django_request_latency_seconds\",\n response_time,\n tags=[\"service:tator\", f\"endpoint:{endpoint}\"],\n )\n return response\n\n\nclass KeycloakMiddleware(KeycloakAuthenticationMixin):\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n request.user, _ = self.authenticate(request)\n return self.get_response(request)\n\n\nMAIN_HOST = os.getenv(\"MAIN_HOST\")\n", "repo_name": "cvisionai/tator", "sub_path": "api/tator_online/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 1552, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 88, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "datadog.DogStatsd", "line_number": 14, "usage_type": "call"}, {"api_name": "django.utils.deprecation.MiddlewareMixin", "line_number": 17, "usage_type": "name"}, {"api_name": "time.time", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "authentication.KeycloakAuthenticationMixin", "line_number": 46, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "73014940644", "text": "import json\n\nimport weaviate\n\nimport re\ndef read_md(path):\n with open(path, 'r', encoding='utf-8') as f:\n text = f.read()\n\n # 定义正则表达式模式\n title_pattern = r\"^(# .+)\"\n author_pattern = r\">\\s{2}(.+)\"\n\n # 提取标题和作者\n title_match = re.search(title_pattern, text, re.MULTILINE)\n author_match = re.search(author_pattern, text, re.MULTILINE)\n\n if title_match and author_match:\n title = title_match.group(1)\n author = author_match.group(1)\n # 去除标题中的井号和空格\n title = title.replace(\"# \", \"\")\n # 提取内容部分\n content = text[author_match.end():].strip()\n content = content.replace(\"*\", \"\").replace(\"#\", \"\")\n result = {\n \"title\": title,\n \"author\": author,\n \"content\": content\n }\n return result\nproperties=[]\nproperties.append(read_md(\"data/test-dataset-1.md\"))\nproperties.append(read_md(\"data/test-dataset-2.md\"))\nclient = weaviate.Client(\n url=\"https://lcy-3koo0kfs.weaviate.network\", # Replace with your endpoint\n additional_headers={\n \"X-HuggingFace-Api-Key\": \"\"\n }\n)\n\nclass_obj = {\n \"class\": \"Article\",\n \"vectorizer\": \"text2vec-huggingface\",\n \"moduleConfig\": {\n \"text2vec-huggingface\": {\n \"model\": \"bert-base-chinese\",\n \"options\": {\n \"waitForModel\": True,\n }\n }\n }\n}\nclient.schema.delete_class(class_name='Article')\nclient.schema.create_class(class_obj)\n\nwith client.batch(\n batch_size=100\n) as batch:\n # Batch import all Questions\n for i, d in enumerate(properties):\n print(f\"importing data: {i+1}\")\n\n properties = {\n \"title\": d[\"title\"],\n \"author\": d[\"author\"],\n \"content\": d[\"content\"][0:490],\n }\n client.batch.add_data_object(\n properties,\n \"Article\",\n )\n\nresponse = (\n client.query\n .aggregate(\"Article\")\n .with_meta_count()\n .do()\n)\n\nprint(json.dumps(response, indent=2))\n\n", "repo_name": "lcy5058/pythonProject", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2002, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.search", "line_number": 15, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 16, "usage_type": "call"}, {"api_name": "re.MULTILINE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "weaviate.Client", "line_number": 35, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "41434014793", "text": "import speech_recognition as sr\nimport time\n\nr = sr.Recognizer()\nm = sr.Microphone()\n\nwith m as source:\n print(\"Say something\")\n audio = r.listen(source)\n\ntime.sleep(3)\n\ntry:\n print(\"Sphinx thinks you said \" + r.recognize_sphinx(audio))\nexcept sr.UnknownValueError:\n print(\"Sphinx could not understand audio\")\nexcept sr.RequestError as e:\n print(\"Could not request results from Sphinx service; {0}\".format(e))\n", "repo_name": "MasKash3/face-attendance", "sub_path": "speech/speech_reg_sphink.py", "file_name": "speech_reg_sphink.py", "file_ext": "py", "file_size_in_byte": 425, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 4, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 11, "usage_type": "call"}, {"api_name": "speech_recognition.UnknownValueError", "line_number": 15, "usage_type": "attribute"}, {"api_name": "speech_recognition.RequestError", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "6784245850", "text": "from django.utils.translation import ugettext_lazy as _\n\nfrom ..apps import MetricConfig\nfrom ..registry import register\n\n\nclass SalesConfig(MetricConfig):\n label = 'sales'\n name = 'metrics.sales'\n\n def get_metric_models(self):\n return [self.get_model('Sale'),]\n\n def ready(self):\n super(SalesConfig, self).ready()\n\n from .export import SaleDataset, PublicSaleDataset\n\n register('Market Sales', {\n 'add_record_label': 'Add market sale',\n 'model': self.get_model('Sale'),\n 'number': 1,\n 'garden_detail_url_name': 'sales_garden_details',\n 'group': 'Economic Data',\n 'group_number': 4,\n 'dataset': SaleDataset,\n 'public_dataset': PublicSaleDataset,\n 'description': _('Making fresh vegetables accessible and affordable to '\n 'city-dwellers is one of the joys of urban gardening. '\n 'This method helps you track what you\\'re selling, and '\n 'how much you\\'re making, at local farmer\\'s markets. '\n 'These results are powerful source of information for '\n 'adjusting what you choose to sell each week, or for '\n 'quantifying how your garden contributes to the local '\n 'economy.'),\n })\n", "repo_name": "ebrelsford/Farming-Concrete", "sub_path": "barn/metrics/sales/apps.py", "file_name": "apps.py", "file_ext": "py", "file_size_in_byte": 1422, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "apps.MetricConfig", "line_number": 7, "usage_type": "name"}, {"api_name": "registry.register", "line_number": 19, "usage_type": "call"}, {"api_name": "export.SaleDataset", "line_number": 26, "usage_type": "name"}, {"api_name": "export.PublicSaleDataset", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "21723636886", "text": "# import libraries\nimport numpy as np\nimport streamlit as st\nimport pickle\nimport pandas as pd\n\n# Open the pickle file\nfile_path = r\"C:\\Users\\Chitwan Manchanda\\Desktop\\Delhivery_Assignment\\Best Model\\LightGBM_Classifier.pkl\"\npickle_in = open(file_path, 'rb')\nclassifier = pickle.load(pickle_in)\n\n# Predict the chances of getting the booking canceled\ndef predict_booking_cancellation(features):\n \n # Predict the probabiliities based on the features\n prob = classifier.predict_proba(np.array(features).reshape(1,-1))\n \n # Return the probability(chances) of the bookings being canceled\n return np.round(prob[0][1]*100, 2)\n\n# Function for the front end of the webpage\ndef web_page():\n st.title(\"Delhivery Assignment\")\n \n # use the html temp\n html_temp = r\"\"\"\n
\n

Hotel Booking Cancelation Predictor App

\n
\n \n \"\"\"\n \n st.markdown(html_temp, unsafe_allow_html = True)\n \n features = ['hotel', 'lead_time', 'arrival_date_year', 'arrival_date_week_number',\n 'arrival_date_day_of_month', 'stays_in_weekend_nights',\n 'stays_in_week_nights', 'adults', 'country', 'previous_cancellations',\n 'booking_changes', 'agent', 'adr', 'required_car_parking_spaces',\n 'total_of_special_requests', 'reservation_status_date',\n 'arrival_date_month_number', 'arrival_date', 'total_night_stays',\n 'market_segment_Direct', 'market_segment_Groups',\n 'market_segment_Offline TA/TO', 'market_segment_Online TA',\n 'distribution_channel_Direct', 'distribution_channel_TA/TO',\n 'deposit_type_Non Refund', 'customer_type_Transient',\n 'customer_type_Transient-Party', 'reservation_status_No-Show']\n \n feature_list = []\n \n for feature in features:\n \n inp = st.text_input(feature)\n feature_list.append(inp)\n \n result = \"\"\n \n if st.button(\"Predict\"):\n result = predict_booking_cancellation(feature_list)\n st.success(\"The chances of the hotel bookings being canceled is {}%\".format(result))\n \n\nif __name__ =='__main__':\n web_page()\n \n \n \n \n \n \n ", "repo_name": "Chitwan54/Hotel-Booking-Cancelation-Prediction", "sub_path": "DeployingHotelBookingClassifier.py", "file_name": "DeployingHotelBookingClassifier.py", "file_ext": "py", "file_size_in_byte": 2308, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pickle.load", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 23, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 51, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 56, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "36071516030", "text": "from PIL import Image, ImageChops\nimport os # ファイルやフォルダ操作\nimport glob\nimport shutil\nimport datetime # 現在時刻を取得\nimport time\n\ndir_name = \"mikakou\" # 画像が入っているフォルダ\nnew_dir_name = \"new\" # 画像を保存する先のフォルダ\nused_dir_name =\"used\"\n\ndef crop_center(pil_img, crop_width, crop_height): # 画像の中心を切り出し\n img_width, img_height = pil_img.size\n return pil_img.crop(((img_width - crop_width) // 2,\n (img_height - crop_height) // 2,\n (img_width + crop_width) // 2,\n (img_height + crop_height) // 2))\n\ndef func():\n # ディレクトリが存在しない場合は作成する\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n # ディレクトリが存在しない場合は作成する\n if not os.path.exists(new_dir_name):\n os.mkdir(new_dir_name)\n\n # ディレクトリが存在しない場合は作成する\n if not os.path.exists(used_dir_name):\n os.mkdir(used_dir_name)\n\n def move_glob(dst_path, pathname, recursive=True): # glob.glob()で抽出された複数のファイルを一括で移動\n for p in glob.glob(pathname, recursive=recursive):\n shutil.move(p, dst_path)\n\n move_glob(dir_name, '*.png')\n move_glob(dir_name, '*.jpg')\n move_glob(dir_name, '*.jpeg')\n\n files = os.listdir(dir_name)\n\n i = 1\n\n for file in files: # ホーム画面用の処理\n im_original = Image.open(os.path.join(dir_name, file))\n name, ext = os.path.splitext(os.path.basename(file))\n width, height = im_original.size\n\n if height > width: # 縦長\n im = im_original\n im = crop_center(im, width, height - 208)\n else:\n im = im_original\n # im = crop_center(im, width - 50, height)\n\n # 背景色画像を生成\n im2 = im.convert(\"RGB\")\n bg = Image.new(\"RGB\", im2.size, im2.getpixel((0, 0)))\n\n # 背景色画像と元画像の差分画像を生成\n diff = ImageChops.difference(im2, bg)\n\n # 背景との境界を求めて画像を切り抜く\n croprange = diff.convert(\"RGB\").getbbox()\n nim = im.crop(croprange)\n\n dt_now = datetime.datetime.now()\n # print(dt_now.strftime('%Y%m%d_%H%M%S'))\n name_png = str(dt_now.strftime('%Y%m%d_%H%M%S'))\n name_jpg = name_png + \".jpg\"\n name_png += \".png\"\n\n # 切り抜いた画像を保存\n # nim.save(os.path.join(new_dir_name, name_png))\n\n nim = nim.convert('RGB') # RGBA(png)→RGB(jpg)へ変換\n nim.save(os.path.join(new_dir_name, name_jpg), \"JPEG\", quality=95)\n\n print(str(i) + \" done!\")\n i += 1\n time.sleep(1)\n\n move_glob(used_dir_name, \"./mikakou/*.PNG\")\n move_glob(used_dir_name, \"./mikakou/*.JPG\")\n move_glob(used_dir_name, \"./mikakou/*.JPEG\")\n\n # 終了時に元の画像を削除\n # shutil.rmtree(dir_name)\n\n print(\"Exit a program\")\n\nif __name__ == \"__main__\":\n print(\"Execute a program\")\n func()", "repo_name": "khoshi-higashi/edit-screenshots", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2911, "program_lang": "python", "lang": "ja", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 30, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 33, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 34, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 45, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 45, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 46, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 58, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 58, "usage_type": "name"}, {"api_name": "PIL.ImageChops.difference", "line_number": 61, "usage_type": "call"}, {"api_name": "PIL.ImageChops", "line_number": 61, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "11506704936", "text": "import logging\n\nclass log:\n def __init__(self):\n pass\n\n def printlog(self,level,message):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n #用于写入日志文件\n logfile = r'C:\\Users\\admin\\PycharmProjects\\python\\xybao\\log\\logger.txt'\n fh = logging.FileHandler(logfile, mode='a')\n fh.setLevel(logging.INFO)\n\n # 用于输出到控制台\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # 将logger添加到handler里面\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n # 日志\n if level==\"debug\":\n logger.debug(message)\n elif level ==\"info\":\n logger.info(message)\n elif level==\"warning\":\n logger.warning(message)\n elif level==\"error\":\n logger.error(message)\n else:\n logger.critical(message)\n\n def debug(self,message):\n self.printlog(\"debug\",message)\n\n def info(self, message):\n self.printlog(\"info\", message)\n\n def warning(self, message):\n self.printlog(\"warning\",message)\n\n def error(self, message):\n self.printlog(\"error\", message)\n\n def critical(self, message):\n self.printlog(\"critical\", message)\n\n\n# # 打印日志\n# def printlog(level,message):\n# # 第一步,创建一个logger\n# logger = logging.getLogger()\n# logger.setLevel(logging.INFO) # Log等级总开关\n#\n# # 第二步,创建一个handler,用于写入日志文件\n# logfile = 'D:/pycharm/python/Test_project/Log/logger.txt'\n# fh = logging.FileHandler(logfile, mode='a')\n# fh.setLevel(logging.INFO) # 输出到file的log等级的开关\n#\n# # 第三步,再创建一个handler,用于输出到控制台\n# ch = logging.StreamHandler()\n# ch.setLevel(logging.INFO) # 输出到console的log等级的开关\n#\n# # 第四步,定义handler的输出格式\n# formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n# fh.setFormatter(formatter)\n# ch.setFormatter(formatter)\n#\n# # 第五步,将logger添加到handler里面\n# logger.addHandler(fh)\n# logger.addHandler(ch)\n#\n# # 日志\n# if level==\"debug\":\n# logger.debug(message)\n# elif level ==\"info\":\n# logger.info(message)\n# elif level==\"warning\":\n# logger.warning(message)\n# elif level==\"error\":\n# logger.error(message)\n# else:\n# logger.critical(message)\n\n\nif __name__==\"__main__\":\n t=log()\n t.info(\"zwz\")", "repo_name": "IALFML/zwz", "sub_path": "Public/logshow.py", "file_name": "logshow.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 9, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "75111637603", "text": "import sqlite3\nimport os.path\nimport subprocess\nimport pickle\n\n#todo, return tags, avoid duplicates by checking primary key. \n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n print(idx)\n print(col)\n d[col[0]] = row[idx]\n return d\n\n#helper functions\n\nclass DbInterface():\n\n #I need to check if a file exists. If not create a DB from the templace\n \n def __init__(self, dbFilename):\n self.db = self.open_DB(dbFilename)\n self.db.row_factory = dict_factory\n def make_empty_book(self):\n book = {}\n book['title']='Not Found',\n book['subtitle'] = ''\n book['author'] = ''\n book['description'] = 'No Results'\n book['video'] = '0'\n book['count'] = '0'\n book['isbn'] = ''\n book['bid'] = ''\n return book\n def search(self, book):\n for i in book.keys():\n if book[i]=='':\n book[i]='%'\n else:\n b = book[i].split(' ')\n b = '%'.join(b)\n book[i] = b\n curr = self.db.cursor() \n query = \"select * from book where title like '%s' and subtitle like '%s' and author like '%s' and description like '%s' and video like '%s' and count like '%s' and isbn like '%s';\" % (book['title'], book['subtitle'], book['author'], book['description'], book['video'], book['count'], book['isbn'])\n print(query) \n curr.execute('select * from book where title like ? and subtitle like ? and author like ? and description like ? and video like ? and count like ? and isbn like ?;', (book['title'], book['subtitle'], book['author'], book['description'], book['video'], book['count'], book['isbn']))\n list = self.make_list(curr)\n if list ==[]:\n return [self.make_empty_book()]\n return list\n def return_all(self):\n #return all books. Needs to get books and tags. Should be stored in a dict\n curr = self.db.cursor()\n curr.execute('select * from book;')\n return self.make_list(curr)\n def make_list(self, curr):\n \n res = []\n for i in curr.fetchall():\n print(i)\n res.append(i)\n return res\n \n def save(self, book):\n curr = self.db.cursor()\n curr.execute('update book set title=?,subtitle=?,author=?,description=?,video=?,count=?, isbn=? where bid=?', (book['title'], book['subtitle'], book['author'], book['description'], book['video'], book['count'], book['isbn'], book['bid']))\n self.db.commit()\n def add(self, isbn):\n #need some way to call the python2 file and get data\n #try:\n \n r = subprocess.Popen(\"/usr/bin/python booksapi.py \"+ isbn, shell=True, stdout=subprocess.PIPE)\n \n out, err = r.communicate()\n #print(out)\n \n # except Exception as e:\n # out, err \n # pass #book = subprocess.output([\"/usr/bin/python booksapi.py\", isbn], shell=True)\n fl = open('tmp.blah', 'rb')\n book = pickle.load(fl)\n print(book)\n if 'error' in book.keys():\n return False\n #book = { 'title': 'Test book', 'subtitle':'Test subtitle', 'description':'Test Description', 'author':'Test Author' }\n cursor = self.db.cursor()\n if 'description' not in book.keys():\n book['description'] = ''\n if 'subtitle' not in book.keys():\n book['subtitle'] = ''\n if 'averageRating' not in book.keys():\n book['averageRating'] = ''\n if 'authors' not in book.keys():\n book['authors'] = ''\n cursor.execute(\"insert into book (title, subtitle, description, author, count, leader, isbn, pagecount, averagerating, video ) values (?, ?, ?, ?, 0,0, ?, ?, ?, 0);\",\n (book['title'], book['subtitle'], book['description'], ','.join(book['authors']), isbn, book['pageCount'], book['averageRating']))\n self.db.commit()\n return True\n \n def open_DB(self, filename):\n if not os.path.isfile(filename):\n return self.create_DB(filename)\n return sqlite3.connect(filename)\n \n def create_DB(self, filename):\n #check if db exists\n conn = sqlite3.connect(filename)\n cursor = conn.cursor()\n cursor.execute('create table book (bid integer primary key, title varchar, subtitle varchar, description varchar, author varchar, count integer, leader integer, isbn varchar, publisher varchar, pagecount varchar, averagerating varchar, video varchar);')\n conn.commit()\n cursor.execute('create table tag (bid integer not null, note varchar);')\n conn.commit()\n return conn\n \n\nif __name__ == '__main__':\n db = DbInterface('test.db')\n f = open(\"bulk.txt\", \"r\")\n for i in f:\n b = i.rstrip()\n print(b)\n db.add(b)\n\n db.add('9780393063790')\n \n db.return_all()\n db.db.close()\n\n", "repo_name": "keyvin/docnotes", "sub_path": "python/bsdb/datamodel.py", "file_name": "datamodel.py", "file_ext": "py", "file_size_in_byte": 5140, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "52", "api": [{"api_name": "subprocess.Popen", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.path.isfile", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 102, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 104, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "6270124895", "text": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom pathlib import Path\n\nimport h5py\n\nfrom PyQt5.QtWidgets import (QTreeView, QFileDialog, QWidget,\n QHBoxLayout, QLabel, QMenu)\nfrom PyQt5.QtCore import Qt, QItemSelectionModel\nfrom PyQt5.QtGui import QStandardItem, QStandardItemModel\n\nfrom .utils import read_h5_dict\nfrom .items import (file_item_factory, FolderGroupItem, RoiItem,\n H5GiwaxsItem, H5FileItem, AbstractGroupItem,\n AbstractFileItem, H5DatasetItem)\n\nfrom ..basic_widgets import RoundedPushButton\nfrom ..roi.roi_widgets import EmptyROI, FileWidgetRoi\nfrom ..roi.roi_containers import BasicROIContainer\nfrom ..signal_connection import SignalConnector, SignalContainer, StatusChangedContainer\n\nfrom ...utils import Icon, RoiParameters, save_execute\n\nlogger = logging.getLogger(__name__)\n\n\nclass FileModel(QStandardItemModel):\n def __init__(self):\n super(FileModel, self).__init__()\n self.setHorizontalHeaderLabels([''])\n self.setRowCount(0)\n\n def add_folder(self, folder_path: Path):\n folder_item = FolderGroupItem(folder_path)\n self.appendRow(folder_item)\n return folder_item\n\n def add_file(self, filepath: Path, row: int = None):\n item = file_item_factory(filepath)\n if item:\n if row is None:\n self.appendRow(item)\n else:\n self.insertRow(row, item)\n\n return item\n\n\nclass FileWidget(BasicROIContainer, QTreeView):\n def __init__(self, signal_connector: SignalConnector, parent=None):\n BasicROIContainer.__init__(self, signal_connector)\n QTreeView.__init__(self, parent=parent)\n self._model = FileModel()\n self.setEditTriggers(QTreeView.NoEditTriggers)\n self.setModel(self._model)\n self.selectionModel().currentChanged.connect(self._on_clicked)\n # self.clicked.connect(self._on_clicked)\n self.current_dataset = None\n self._future_dataset = None\n self.customContextMenuRequested.connect(\n self.context_menu\n )\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.__init_ui__()\n self.show()\n\n def __init_ui__(self):\n\n add_file_button = RoundedPushButton(icon=Icon('data'), radius=30,\n background_color='transparent')\n add_file_button.clicked.connect(self._open_add_file_menu)\n add_folder_button = RoundedPushButton(icon=Icon('folder'), radius=30,\n background_color='transparent')\n add_folder_button.clicked.connect(self._open_add_folder_menu)\n layout = self._get_header_layout(QStandardItem(), 'Files')\n layout.addWidget(add_file_button)\n layout.addWidget(add_folder_button)\n\n @save_execute('File widget process signal failed.')\n def process_signal(self, s: SignalContainer):\n super().process_signal(s)\n if self.current_dataset:\n for _ in s.geometry_changed():\n self.current_dataset.properties_item.update(\n beam_center=self.image.beam_center\n )\n for _ in s.intensity_limits_changed():\n self.current_dataset.properties_item.update(\n intensity_limits=self.image.intensity_limits\n )\n for _ in s.transformation_added():\n self.current_dataset.properties_item.update(\n transformations=self.image.transformation.transformation_list\n )\n for _ in s.scale_changed():\n self.current_dataset.properties_item.update(\n scale=self.image.scale\n )\n\n def add_roi(self, params: RoiParameters):\n roi = self._get_roi(params)\n if roi:\n roi.value_changed.connect(\n lambda value: self.signal_connector.emit_upward(\n SignalContainer().segment_moved(value)))\n roi.status_changed.connect(self.emit_status_changed)\n roi.arbitrary_signal.connect(self.signal_connector.emit_upward)\n if not self._future_dataset:\n self.roi_dict[params.key] = roi\n self._add_item(roi)\n return roi\n\n def _get_roi(self, params: RoiParameters) -> 'AbstractROI':\n if self.current_dataset or self._future_dataset:\n return FileWidgetRoi(params)\n else:\n return EmptyROI(params)\n\n def _add_item(self, roi: FileWidgetRoi or EmptyROI):\n if ((self.current_dataset or self._future_dataset) and\n isinstance(roi, FileWidgetRoi)):\n item = RoiItem(roi.value.name)\n roi.set_item(item)\n parent = self._future_dataset or self.current_dataset\n parent.appendRow(item)\n # self.setExpanded(parent.index(), True)\n return roi\n\n def _remove_item(self, roi: FileWidgetRoi or EmptyROI):\n if self.current_dataset and isinstance(roi, FileWidgetRoi):\n self.current_dataset.removeRow(roi.item.row())\n\n def _on_status_changed(self, sig: StatusChangedContainer):\n if self.current_dataset:\n if not sig.status:\n for k in sig.keys:\n self.roi_dict[k].set_inactive()\n self.selectionModel().select(\n self.roi_dict[k].item.index(),\n QItemSelectionModel.Deselect)\n else:\n for k in sig.keys:\n self.roi_dict[k].set_active()\n self.selectionModel().select(\n self.roi_dict[k].item.index(),\n QItemSelectionModel.Select)\n\n def _open_add_file_menu(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n filepath, _ = QFileDialog.getOpenFileName(\n self, 'Open image', '',\n 'edf, tiff, h5, hdf5 files (*.tiff *.edf *.h5 *.hdf5)', options=options)\n if filepath:\n self._model.add_file(Path(filepath))\n\n def _open_add_folder_menu(self):\n options = QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks\n folder_path = QFileDialog.getExistingDirectory(\n self, 'Choose directory containing edf, tiff or h5 files', '',\n options=options)\n if folder_path:\n self._model.add_folder(Path(folder_path))\n\n def _get_header_layout(self, item: QStandardItem, label: str):\n header_widget = QWidget(self)\n layout = QHBoxLayout()\n header_widget.setLayout(layout)\n label_widget = QLabel(label)\n layout.addWidget(label_widget, alignment=Qt.AlignLeft)\n layout.addStretch(1)\n self._model.appendRow(item)\n self.setIndexWidget(item.index(), header_widget)\n return layout\n\n def _on_clicked(self, index):\n item = self._model.itemFromIndex(index)\n if isinstance(item, AbstractGroupItem) and not item.content_uploaded:\n item.update_content()\n self.setExpanded(item.index(), True)\n elif isinstance(item, AbstractFileItem):\n if item.should_parse_file:\n self._parse_h5_item(item)\n else:\n data = item.get_data()\n if self.current_dataset != item and data.ndim == 2:\n self._change_image_item(item, data)\n elif (isinstance(item, RoiItem) and\n item.roi and item.parent() is self.current_dataset):\n item.roi.send_active()\n\n @save_execute('Could not read saved h5 image.', silent=False)\n def _parse_h5_item(self, item: H5GiwaxsItem):\n try:\n self._future_dataset = item\n key = item.h5_key\n with h5py.File(item.filepath, 'r') as f:\n group = f[key]\n if 'image' not in group.keys():\n return\n data = group['image'][()]\n if data.ndim != 2:\n return\n item.properties_item.update(**read_h5_dict(group.attrs))\n roi_key = 0\n for k in group.keys():\n dset = group[k]\n if 'radius' in dset.attrs.keys() and 'width' in dset.attrs.keys():\n params_dict = read_h5_dict(dset.attrs)\n params = RoiParameters(**params_dict, key=roi_key)\n self.add_roi(params)\n roi_key += 1\n finally:\n self._future_dataset = None\n item.should_parse_file = False\n self._change_image_item(item, data)\n\n def _change_image_item(self, item, data):\n self.image.set_image(data)\n sc_delete = SignalContainer(app_node=self)\n sc_create = SignalContainer(app_node=self)\n sc_create.geometry_changed(0)\n sc_create.image_changed(0)\n for roi in self.roi_dict.values():\n roi.set_inactive()\n sc_delete.segment_deleted(\n roi.value, signal_type=SignalContainer.SignalTypes.except_for_names)\n self.roi_dict = dict()\n self.current_dataset = item\n for child_item in self.current_dataset.get_child_rois():\n value = child_item.roi.value\n self.roi_dict[value.key] = child_item.roi\n sc_create.segment_created(\n value, signal_type=SignalContainer.SignalTypes.except_for_names)\n if self.current_dataset.has_properties:\n self._set_file_properties_to_image()\n else:\n self._set_init_properties_to_file()\n\n sc_delete.send()\n sc_create.send()\n\n def _set_file_properties_to_image(self):\n properties = self.current_dataset.properties_item.get_dict()\n if 'intensity_limits' in properties.keys():\n self.image.set_image_limits(properties['intensity_limits'])\n if 'beam_center' in properties.keys():\n self.image.set_beam_center(properties['beam_center'])\n if 'transformations' in properties.keys():\n for name in properties['transformations']:\n self.image.add_transformation(name)\n if 'scale' in properties.keys():\n self.image.set_scale(properties['scale'])\n\n def _set_init_properties_to_file(self):\n self.current_dataset.properties_item.update(\n beam_center=self.image.beam_center,\n transformations=self.image.transformation.transformation_list,\n intensity_limits=self.image.intensity_limits,\n scale=self.image.scale\n )\n\n def context_menu(self, position):\n item = self._model.itemFromIndex(self.indexAt(position))\n menu = QMenu()\n if isinstance(item, FolderGroupItem):\n update_folder = menu.addAction('Update folder')\n update_folder.triggered.connect(lambda: self.update_group(item))\n close_folder = menu.addAction('Close folder')\n close_folder.triggered.connect(lambda: self._on_closing_group(item))\n elif isinstance(item, H5FileItem):\n update_folder = menu.addAction('Update h5 file')\n update_folder.triggered.connect(lambda: self.update_group(item))\n close_folder = menu.addAction('Close h5 file')\n close_folder.triggered.connect(lambda: self._on_closing_group(item))\n elif isinstance(item, AbstractFileItem):\n save_menu = menu.addMenu('Save')\n save_as_h5 = save_menu.addAction('Save as h5 file')\n save_to_h5 = save_menu.addAction('Save to existing h5 file')\n save_as_h5.triggered.connect(item.save_as_h5)\n save_to_h5.triggered.connect(item.save_to_h5)\n if isinstance(item, H5DatasetItem):\n save_here = save_menu.addAction('Save to current h5 file')\n save_here.triggered.connect(item.save_here)\n else:\n return\n menu.exec_(self.viewport().mapToGlobal(position))\n\n def _on_closing_group(self, item: H5FileItem or FolderGroupItem):\n if self._group_contains_current_dataset(item):\n self.current_dataset = None\n for k, v in self.roi_dict.items():\n self.roi_dict[k] = EmptyROI(v.value)\n item.close()\n\n def _group_contains_current_dataset(self, item: H5FileItem or FolderGroupItem):\n return (\n self.current_dataset and\n (isinstance(item, H5FileItem) and\n item.filepath == self.current_dataset.filepath\n or\n isinstance(item, FolderGroupItem) and\n item.filepath in self.current_dataset.filepath.parents)\n )\n\n @save_execute('Error occured while trying to update folder.', silent=False)\n def update_group(self, item: H5FileItem or FolderGroupItem):\n if self._group_contains_current_dataset(item):\n self.current_dataset = None\n for k, v in self.roi_dict.items():\n self.roi_dict[k] = EmptyROI(v.value)\n item.removeRows(0, item.rowCount())\n item.update_content()\n self.setExpanded(item.index(), True)\n", "repo_name": "StarostinV/GIWAXS_GUI", "sub_path": "giwaxs_gui/gui/file_manager/file_view.py", "file_name": "file_view.py", "file_ext": "py", "file_size_in_byte": 13169, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QStandardItemModel", "line_number": 29, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 35, "usage_type": "name"}, {"api_name": "items.FolderGroupItem", "line_number": 36, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 40, "usage_type": "name"}, {"api_name": "items.file_item_factory", "line_number": 41, "usage_type": "call"}, {"api_name": "roi.roi_containers.BasicROIContainer", "line_number": 51, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTreeView", "line_number": 51, "usage_type": "name"}, {"api_name": "signal_connection.SignalConnector", "line_number": 52, "usage_type": "name"}, {"api_name": "roi.roi_containers.BasicROIContainer.__init__", "line_number": 53, "usage_type": "call"}, {"api_name": "roi.roi_containers.BasicROIContainer", "line_number": 53, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTreeView.__init__", "line_number": 54, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QTreeView", "line_number": 54, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QTreeView.NoEditTriggers", "line_number": 56, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QTreeView", "line_number": 56, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.CustomContextMenu", "line_number": 65, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 65, "usage_type": "name"}, {"api_name": "basic_widgets.RoundedPushButton", "line_number": 71, "usage_type": "call"}, {"api_name": "utils.Icon", "line_number": 71, "usage_type": "call"}, {"api_name": "basic_widgets.RoundedPushButton", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.Icon", "line_number": 74, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QStandardItem", "line_number": 77, "usage_type": "call"}, {"api_name": "signal_connection.SignalContainer", "line_number": 82, "usage_type": "name"}, {"api_name": "utils.save_execute", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.RoiParameters", "line_number": 102, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 103, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 104, "usage_type": "name"}, {"api_name": "roi.roi_widgets.value_changed.connect", "line_number": 105, "usage_type": "call"}, {"api_name": "roi.roi_widgets.value_changed", "line_number": 105, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 105, "usage_type": "name"}, {"api_name": "signal_connection.SignalContainer", "line_number": 107, "usage_type": "call"}, {"api_name": "roi.roi_widgets.status_changed.connect", "line_number": 108, "usage_type": "call"}, {"api_name": "roi.roi_widgets.status_changed", "line_number": 108, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 108, "usage_type": "name"}, {"api_name": "roi.roi_widgets.arbitrary_signal.connect", "line_number": 109, "usage_type": "call"}, {"api_name": "roi.roi_widgets.arbitrary_signal", "line_number": 109, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 109, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 111, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 112, "usage_type": "argument"}, {"api_name": "roi.roi_widgets", "line_number": 113, "usage_type": "name"}, {"api_name": "utils.RoiParameters", "line_number": 115, "usage_type": "name"}, {"api_name": "roi.roi_widgets.FileWidgetRoi", "line_number": 117, "usage_type": "call"}, {"api_name": "roi.roi_widgets.EmptyROI", "line_number": 119, "usage_type": "call"}, {"api_name": "roi.roi_widgets.FileWidgetRoi", "line_number": 121, "usage_type": "name"}, {"api_name": "roi.roi_widgets.EmptyROI", "line_number": 121, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 123, "usage_type": "argument"}, {"api_name": "roi.roi_widgets.FileWidgetRoi", "line_number": 123, "usage_type": "argument"}, {"api_name": "items.RoiItem", "line_number": 124, "usage_type": "call"}, {"api_name": "roi.roi_widgets.value", "line_number": 124, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 124, "usage_type": "name"}, {"api_name": "roi.roi_widgets.set_item", "line_number": 125, "usage_type": "call"}, {"api_name": "roi.roi_widgets", "line_number": 125, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 129, "usage_type": "name"}, {"api_name": "roi.roi_widgets.FileWidgetRoi", "line_number": 131, "usage_type": "name"}, {"api_name": "roi.roi_widgets.EmptyROI", "line_number": 131, "usage_type": "name"}, {"api_name": "roi.roi_widgets", "line_number": 132, "usage_type": "argument"}, {"api_name": "roi.roi_widgets.FileWidgetRoi", "line_number": 132, "usage_type": "argument"}, {"api_name": "roi.roi_widgets.item.row", "line_number": 133, "usage_type": "call"}, {"api_name": "roi.roi_widgets.item", "line_number": 133, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 133, "usage_type": "name"}, {"api_name": "signal_connection.StatusChangedContainer", "line_number": 135, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QItemSelectionModel.Deselect", "line_number": 142, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QItemSelectionModel", "line_number": 142, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QItemSelectionModel.Select", "line_number": 148, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.QItemSelectionModel", "line_number": 148, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.Options", "line_number": 151, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 151, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.DontUseNativeDialog", "line_number": 152, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 152, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "line_number": 153, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 153, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 157, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.ShowDirsOnly", "line_number": 160, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 160, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.DontResolveSymlinks", "line_number": 160, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QFileDialog.getExistingDirectory", "line_number": 161, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFileDialog", "line_number": 161, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 165, "usage_type": "call"}, {"api_name": "PyQt5.QtGui.QStandardItem", "line_number": 167, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QWidget", "line_number": 168, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 169, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QLabel", "line_number": 171, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt.AlignLeft", "line_number": 172, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 172, "usage_type": "name"}, {"api_name": "items.AbstractGroupItem", "line_number": 180, "usage_type": "argument"}, {"api_name": "items.AbstractFileItem", "line_number": 183, "usage_type": "argument"}, {"api_name": "items.RoiItem", "line_number": 190, "usage_type": "argument"}, {"api_name": "items.H5GiwaxsItem", "line_number": 195, "usage_type": "name"}, {"api_name": "h5py.File", "line_number": 199, "usage_type": "call"}, {"api_name": "utils.read_h5_dict", "line_number": 206, "usage_type": "call"}, {"api_name": "utils.read_h5_dict", "line_number": 211, "usage_type": "call"}, {"api_name": "utils.RoiParameters", "line_number": 212, "usage_type": "call"}, {"api_name": "utils.save_execute", "line_number": 194, "usage_type": "call"}, {"api_name": "signal_connection.SignalContainer", "line_number": 222, "usage_type": "call"}, {"api_name": "signal_connection.SignalContainer", "line_number": 223, "usage_type": "call"}, {"api_name": "roi.roi_widgets", "line_number": 226, "usage_type": "name"}, {"api_name": "roi.roi_widgets.set_inactive", "line_number": 227, "usage_type": "call"}, {"api_name": "roi.roi_widgets", "line_number": 227, "usage_type": "name"}, {"api_name": "roi.roi_widgets.value", "line_number": 229, "usage_type": "attribute"}, {"api_name": "roi.roi_widgets", "line_number": 229, "usage_type": "name"}, {"api_name": "signal_connection.SignalContainer.SignalTypes", "line_number": 229, "usage_type": "attribute"}, {"api_name": "signal_connection.SignalContainer", "line_number": 229, "usage_type": "name"}, {"api_name": "signal_connection.SignalContainer.SignalTypes", "line_number": 236, "usage_type": "attribute"}, {"api_name": "signal_connection.SignalContainer", "line_number": 236, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMenu", "line_number": 267, "usage_type": "call"}, {"api_name": "items.FolderGroupItem", "line_number": 268, "usage_type": "argument"}, {"api_name": "items.H5FileItem", "line_number": 273, "usage_type": "argument"}, {"api_name": "items.AbstractFileItem", "line_number": 278, "usage_type": "argument"}, {"api_name": "items.H5DatasetItem", "line_number": 284, "usage_type": "argument"}, {"api_name": "items.H5FileItem", "line_number": 291, "usage_type": "name"}, {"api_name": "items.FolderGroupItem", "line_number": 291, "usage_type": "name"}, {"api_name": "roi.roi_widgets.EmptyROI", "line_number": 295, "usage_type": "call"}, {"api_name": "items.H5FileItem", "line_number": 298, "usage_type": "name"}, {"api_name": "items.FolderGroupItem", "line_number": 298, "usage_type": "name"}, {"api_name": "items.H5FileItem", "line_number": 301, "usage_type": "argument"}, {"api_name": "items.FolderGroupItem", "line_number": 304, "usage_type": "argument"}, {"api_name": "items.H5FileItem", "line_number": 309, "usage_type": "name"}, {"api_name": "items.FolderGroupItem", "line_number": 309, "usage_type": "name"}, {"api_name": "roi.roi_widgets.EmptyROI", "line_number": 313, "usage_type": "call"}, {"api_name": "utils.save_execute", "line_number": 308, "usage_type": "call"}]} +{"seq_id": "432572204", "text": "\nfrom typing import List\nimport numpy as np\nimport os\nimport multiprocessing as mp\nimport time\nfrom datetime import datetime\nimport pickle\nfrom dataclasses import dataclass\n\nimport argparse\n\nfrom rankers import BayesianRanker\n\nfrom initializers.bow_initializer import BoWInitializer\n\nfrom displays.ransam_display import RanSamDisplay\nfrom displays import TopNDisplay\nfrom displays import SOMDisplay \n\nfrom users import RanSamPriorUser\nfrom users import LogitUser\nfrom users import IdealUser\nfrom users import NullUser\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--seed\", default=42, type=int, help=\"Random seed.\")\nparser.add_argument(\"--processes\", default=-1, type=int, help=\"Number of precesses spawned.\")\n\nparser.add_argument(\"--params_batch\", default=0, type=int, help=\"Which experiments to be conducted [0,1,2].\")\n\nparser.add_argument(\"--annotations\", default=\"data/annotations.csv\", type=str,\n help=\"Annotations to be simulated.\")\n\nparser.add_argument(\"--dataset_path\", default=\"v3c1\", type=str,\n help=\"Root to dataset path.\")\nparser.add_argument(\"--features_name\", default=\"V3C1_20191228.w2vv.images.normed.128pca.viretfromat\", type=str,\n help=\"Name of file with image features.\")\n\nparser.add_argument(\"--keywords_list_name\", default=\"word2idx.txt\", type=str,\n help=\"Name of file with keyword features.\")\nparser.add_argument(\"--kw_features_name\", default=\"txt_weight-11147x2048floats.bin\", type=str,\n help=\"Name of file with keyword features.\")\nparser.add_argument(\"--kw_bias_name\", default=\"txt_bias-2048floats.bin\", type=str,\n help=\"Name of file with keyword bias.\")\nparser.add_argument(\"--pca_matrix_name\", default=\"V3C1_20191228.w2vv.pca.matrix.bin\", type=str,\n help=\"Name of file with pca matrix.\")\nparser.add_argument(\"--pca_mean_name\", default=\"V3C1_20191228.w2vv.pca.mean.bin\", type=str,\n help=\"Name of file with pca mean.\")\n\nparser.add_argument(\"--pickle_root\", default=\"pickle\", type=str,\n help=\"Root of pickle models.\")\nparser.add_argument(\"--pickle_model\", default=\"pcu.prior.pickle\", type=str,\n help=\"Name of pickled user model.\")\n\nparser.add_argument(\"--verbose\", default=False, action=\"store_true\", help=\"Verbose\")\n\nparser.add_argument(\"--output_prefix\", default=\"\", type=str,\n help=\"Prefix of the output file.\")\n\n@dataclass\nclass SimParameters:\n likes: int\n display_types: list\n database_part: float\n text_query: str\n target_id: int\n\nclass Simulator(mp.Process):\n\n def __init__(self, sim_args, par_q: mp.Queue, res_q: mp.Queue, **wargs):\n super().__init__(**wargs)\n np.random.seed(args.seed)\n self._par_q = par_q\n self._res_q = res_q\n\n features = np.fromfile(os.path.join(sim_args.dataset_path, sim_args.features_name), dtype='float32')\n features = features[3:]\n features = features.reshape(int(features.shape[0] / 128), 128)\n self._features = features\n self._kw_init = BoWInitializer(features, \n os.path.join(sim_args.dataset_path, sim_args.keywords_list_name), \n os.path.join(sim_args.dataset_path, sim_args.kw_features_name),\n os.path.join(sim_args.dataset_path, sim_args.kw_bias_name),\n os.path.join(sim_args.dataset_path, sim_args.pca_matrix_name),\n os.path.join(sim_args.dataset_path, sim_args.pca_mean_name)\n )\n with open(os.path.join(sim_args.pickle_root, sim_args.pickle_model), 'rb') as handle:\n self._user = pickle.load(handle)\n self._user._features = features\n self._ranker = BayesianRanker(features, features.shape[0])\n\n self._displays = {\"som\": SOMDisplay(self._features, seed=sim_args.seed), \"top\": TopNDisplay()}\n\n def run(self):\n while True:\n par = self._par_q.get()\n if par is None:\n break\n \n # Parse simulation parameters\n likes = par.likes\n display_types = par.display_types\n database_part = par.database_part\n text_query = par.text_query\n target_id = par.target_id\n\n # Make some assumtions on parameters\n assert likes > 0\n assert likes < 64\n assert database_part is None or (database_part <= 1.0 and database_part > 0.0) \n assert isinstance(target_id, int)\n\n # Initialize search structures\n self._user._count = likes\n self._user._target = target_id\n\n self._ranker.reset()\n self._ranker._scores = self._kw_init.score(text_query)\n self._ranker.normalize()\n\n # Set zero score to filtered elements\n zero_indeces = np.array([], dtype=np.int64)\n if database_part is not None:\n nonzero_count = int(database_part * self._ranker._scores.shape[0])\n zero_indeces = np.flip(np.argsort(self._ranker._scores))[nonzero_count:]\n self._ranker._scores[zero_indeces] = 0 \n\n # Run simulations\n found = -1\n for iteration, disp_type in enumerate(display_types):\n display = self._displays[disp_type].generate(self._ranker.scores)\n\n if target_id in display:\n found = iteration\n break\n\n likes = self._user.decision(display)\n self._ranker.apply_feedback(likes, display)\n self._ranker._scores[zero_indeces] = 0\n \n # Return result\n par.found = found\n self._res_q.put(par)\n \n\ndef parameters_generation0(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = range(1, 5)\n display_types = [[\"som\" for _ in range(10)], \n [\"top\" for _ in range(10)],\n [\"som\" for _ in range(5)] + [\"top\" for _ in range(5)],\n [(\"som\" if i % 2 == 0 else \"top\") for i in range(10)],\n [(\"som\" if i % 2 == 1 else \"top\") for i in range(10)]]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, None, text_query, tar))\n reps += 1\n\n return reps\n\n\ndef parameters_generation1(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = [3]\n display_types = [[(\"som\" if i % 2 == 0 else \"top\") for i in range(10)]]\n db_parts = [0.05, 0.1]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for db_part in db_parts:\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, db_part, text_query, tar))\n reps += 1\n\n return reps\n\n\ndef parameters_generation2(args, targets: list, text_queries: list, par_q: mp.Queue):\n like_counts = [3]\n display_types = [[(\"som\" if i % 2 == 0 else \"top\") for i in range(10)]]\n reps = 0\n for lik in like_counts:\n for tar, text_query in zip(targets, text_queries):\n for disp_type in display_types:\n par_q.put(SimParameters(lik, disp_type, None, text_query, tar))\n reps += 1\n\n return reps\n\ndef main(args):\n np.random.seed(args.seed)\n processes = args.processes\n if processes <= 0:\n processes = mp.cpu_count()\n \n par_q = mp.Queue()\n res_q = mp.Queue()\n jobs = []\n for i in range(processes):\n sim = Simulator(args, par_q, res_q, name=f\"Simulator {i}\")\n jobs.append(sim)\n sim.start()\n \n # Add parameters\n targets = []\n text_queries = []\n with open(args.annotations, \"r\") as f:\n for line in f.readlines():\n target_id, text_query = line.strip().split(\",\")\n targets.append(int(target_id))\n text_queries.append(text_query)\n\n reps = 0\n if args.params_batch == 0:\n reps = parameters_generation0(args, targets, text_queries, par_q)\n elif args.params_batch == 1:\n reps = parameters_generation1(args, targets, text_queries, par_q)\n elif args.params_batch == 2:\n reps = parameters_generation2(args, targets, text_queries, par_q)\n else:\n raise Exception(\"Unknown type of params_batch\")\n\n # Add poison pill\n for i in range(processes):\n par_q.put(None)\n\n # Collect results\n start = datetime.now()\n print(\"Simulations started\\n\")\n res = []\n with open(f\"data/{args.output_prefix}strategy_search_output.{int(time.time())}.csv\", \"w\") as of:\n for i in range(reps):\n last_res = res_q.get()\n res.append(last_res)\n delta = datetime.now() - start\n per_instance = delta / len(res)\n left = (reps - len(res)) * per_instance\n print(f\"Done: {len(res)}/{reps}\\tTime elapsed: {delta}\\tTime left: {left}\\t\\t\\t\", end=\"\\n\", flush=True)\n of.write(f\"{last_res.likes},{last_res.display_types},{last_res.database_part},{last_res.text_query},{last_res.target_id},{last_res.found}\\n\")\n of.flush()\n\n print(\"\\n********************\")\n print(res, flush=True)\n\nif __name__ == \"__main__\":\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n main(args)\n", "repo_name": "siret-junior/somhunter-simulator", "sub_path": "strategy_search.py", "file_name": "strategy_search.py", "file_ext": "py", "file_size_in_byte": 9451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 62, "usage_type": "name"}, {"api_name": "multiprocessing.Process", "line_number": 70, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.fromfile", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "initializers.bow_initializer.BoWInitializer", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 90, "usage_type": "call"}, {"api_name": "rankers.BayesianRanker", "line_number": 92, "usage_type": "call"}, {"api_name": "displays.SOMDisplay", "line_number": 94, "usage_type": "call"}, {"api_name": "displays.TopNDisplay", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 124, "usage_type": "attribute"}, {"api_name": "numpy.flip", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 127, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 148, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 165, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 193, "usage_type": "attribute"}, {"api_name": "multiprocessing.cpu_count", "line_number": 196, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 198, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 230, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 230, "usage_type": "name"}, {"api_name": "time.time", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 237, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 237, "usage_type": "name"}]} +{"seq_id": "43265509282", "text": "import json\nfrom fastapi import Request, FastAPI\nimport generator\nfrom fastapi.middleware.cors import CORSMiddleware\napp = FastAPI()\n\norigins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.post(\"/generate\")\nasync def read_root(request: Request):\n payload = await request.json()\n print(payload['message'])\n\n res = generator.generate(payload['message'])\n print('This is result')\n print(res)\n return {\n 'result': res\n }\n", "repo_name": "AkshitTyagi7/Echosense-gpt", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.FastAPI", "line_number": 5, "usage_type": "call"}, {"api_name": "fastapi.middleware.cors.CORSMiddleware", "line_number": 10, "usage_type": "argument"}, {"api_name": "fastapi.Request", "line_number": 19, "usage_type": "name"}, {"api_name": "generator.generate", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "11661057136", "text": "import pygame\nimport sys\nfrom game import *\nfrom solve import *\nfrom minimum_moves import *\nfrom gamestate import *\n\n\ndef menu(): \n \n pygame.init()\n screen = pygame.display.set_mode((400,400)) \n pygame.display.set_caption(\" Choose Your Destiny !!!\")\n image = pygame.image.load(\"images/bg.png\")\n play = pygame.image.load('images/PLAY.png').convert_alpha()\n img1 = pygame.image.load('images/image1.png').convert_alpha()\n img2 = pygame.image.load('images/image2.png').convert_alpha()\n img3 = pygame.image.load('images/image3.png').convert_alpha()\n solve = pygame.image.load('images/SOLVE.png').convert_alpha()\n ms = pygame.image.load('images/ms.png').convert_alpha()\n button_play = Button(10, 350, play,screen)\n button_solve = Button(150, 350, solve,screen) \n button_solve2 = Button(280, 350, ms,screen)\n color1 = Button(10, 260, img1,screen)\n color2 = Button(150, 260, img2,screen)\n color3 = Button(280, 260, img3,screen) \n pygame.mixer.init()\n crash_sound = pygame.mixer.Sound(\"sounds/background.wav\")\n crash_sound.set_volume(0.2)\n crash_sound.play()\n while True:\n\n screen.blit(image, (0,0))\n\n if button_play.draw():\n pygame.quit()\n pygame.mixer.init()\n button = pygame.mixer.Sound(\"sounds/click.wav\")\n button.play()\n main()\n\n\n if button_solve.draw():\n pygame.quit()\n Solve()\n \n\n if button_solve2.draw():\n pygame.quit()\n main2()\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT :\n pygame.quit() \n\n pygame.display.update()\n\n", "repo_name": "sanatankafle12/DSA_Project", "sub_path": "Menu.py", "file_name": "Menu.py", "file_ext": "py", "file_size_in_byte": 1716, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.mixer.init", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 49, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 53, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 57, "usage_type": "attribute"}]} +{"seq_id": "6858366016", "text": "# import\nimport numpy as np\nimport scipy.stats as stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport altair as alt\nimport warnings\nimport streamlit as st\nfrom keras.layers import Dense, Activation\nfrom keras.models import Sequential\nfrom tensorflow import keras\nfrom sklearn.cluster import KMeans\n# folium\nimport folium\nimport folium.plugins as plugins\nfrom streamlit_folium import folium_static\n\nfrom sklearn.linear_model import LinearRegression, Lasso\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\n# NLP\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nfrom PIL import Image\nfrom nltk.tokenize import word_tokenize\nfrom nltk.util import ngrams\nimport string\n\nplt.style.use('ggplot')\nalt.data_transformers.disable_max_rows()\n# SETTING PAGE CONFIG TO WIDE MODE\n# st.set_page_config(layout=\"wide\")\n# st.balloons()\n\n# LAYING OUT THE TOP SECTION OF THE APP\nrow1_1, row1_2 = st.columns((2, 3))\n\nwith row1_1:\n st.title(\"Airbnb in LA\")\n st.image('airbnb.jpeg', width=280, caption=\"Where are you staying tonight?\")\n 'What makes the lists of Airbnb more expensive than the others 💭'\n\nwith row1_2:\n \"\"\"\n - Check this out on [ my github] (https://github.com/hitomi1104/math10_final_project) \n * Data Source: \n http://insideairbnb.com/get-the-data.html\n\n This App explores Airbnb data which someone webscraped from the actual website before \n By sliding the slider on the left you can view different slices of price and explore different trends.\n \"\"\"\n\n# LOADING DATA\n\n# data = pd.read_csv('/Users/hitomihoshino/Desktop/DS/Projects/projects/Airbnb_LA/listings.csv')\ndata = pd.read_csv('listings.csv')\n\nst.header('Data Dictionary')\ndata_dict = {'id': 'Airbnbs unique identifier for the listing',\n 'name': 'Name of the listing',\n 'host_id': 'Airbnbs unique identifier for the host/user',\n 'host_name': 'Name of the host. Usually just the first name(s).',\n 'neighbourhood_group': 'The neighbourhood group as geocoded using the latitude and longitude against neighborhoods as defined by open or public digital shapefiles.',\n 'neighbourhood': '',\n 'latitude': 'Uses the World Geodetic System (WGS84) projection for latitude and longitude.',\n 'longitude': 'Uses the World Geodetic System (WGS84) projection for latitude and longitude.',\n 'room_type': '[Entire home/apt|Private room|Shared room|Hotel]',\n 'price': 'daily price in local currency',\n 'minimum_nights': 'minimum number of night stay for the listing (calendar rules may be different)',\n 'number_of_reviews': 'The number of reviews the listing has',\n 'last_review': 'The date of the last/newest review',\n 'reviews_per_month': '',\n 'calculated_host_listings_count': 'The number of listings the host has in the current scrape, in the city/region geography.',\n 'availability_365': 'avaliability_x. The availability of the listing x days in the future as determined by the calendar. Note a listing may not be available because it has been booked by a guest or blocked by the host.',\n 'number_of_reviews_ltm': 'The number of reviews the listing has (in the last 12 months)',\n 'license': 'The licence/permit/registration number'\n }\ndata_dict = pd.DataFrame(data_dict, index=['descriptions']).T\nst.table(data_dict)\n\n'''\nIn the data cleaning processes, I droppped some of the variables such as id, which doens't give any insights in predicting the price of the house\nI will explain those deatails in my code\n'''\n\n# DATA CLEANING\nst.caption('( I am not showing Data Cleaning on my App but you can find it in my code )')\ndata.drop_duplicates(inplace=True)\n# dropping meaningless columns\ndata.drop(['id', 'host_id', 'host_name'], axis=1, inplace=True)\n# checking null values\ndata.isnull().sum(axis=0)\n# dropping columns that are overlapping with other cols such as reviews, last_reviews, and reviews_per_month,\n# and ones having too many nan values that make it difficult to fill with averages, forward-fill, or backward-fill\ndf = data.copy()\ndf.drop([\"last_review\", \"reviews_per_month\", \"license\"], axis=1, inplace=True)\n# Since there are only 2 nan values for the column name, fill nan with 'no info'\n# df[df[\"name\"].isnull()]\ndf = df.where(pd.notnull(df), \"no info\")\ndf.isnull().sum(axis=0)\n\n# changing the name of columns\ndf.set_axis(['name', 'neighbourhood_group', 'neighbourhood', 'latitude', 'longitude',\n 'room_type', 'price', 'minimum_nights', 'number_of_reviews',\n 'number_of_host_listings', 'availability',\n 'reviews_year'], axis=1, inplace=True)\n# changing the order of the columns for the col price to be the first so the visualizations can look better and clearer\ncols = df.columns.tolist()\ncols = cols[6:12] + cols[0:6]\ndf = df[cols]\n\ndf.head()\n\n################################################################################ EDA ################################################################################\nst.header('Exploratory Data Analysis')\nst.subheader('Distributions and statistical analysis of the target variable Price')\n'''\nExploring the target variable which is price and see its distributions\n'''\nrow1_1, row1_2 = st.columns((2))\n\nwith row1_1:\n fig, ax = plt.subplots(figsize=(10, 5))\n sns.set_style(style='darkgrid')\n sns.distplot(df['price'], bins=100, kde=False, color='blue')\n st.pyplot(fig)\n '''\n As you can see some of the houses are very expensive which makes it outliers\n Those listings had outstanding features to be expensive. \n '''\n st.text(\"\")\n\n st.write('** Decision **')\n '''\n Most of the listings are less than 500 dollers per night\n So, I will conisder the listings only less than 500 to do some further analysis and predicting the price\n '''\n\nwith row1_2:\n # dropping outliers; also considering even many 5 star hotels don't charge more than 500 for a normal stay\n df = df[df['price'] < 500]\n fig, ax = plt.subplots(figsize=(10, 5))\n sns.set_style(style='darkgrid')\n sns.distplot(df['price'], bins=100, kde=False, color='blue')\n st.pyplot(fig)\n '''\n **Statistics of the data (which I use to build ML models)**\n '''\n st.write(df['price'].describe(include=all))\n # Skew and kurtosis for SalePrice\n \"Skewness: %f\" % df['price'].skew()\n \"Kurtosis: %f\" % df['price'].kurt()\n \"Mode: %f\" % df['price'].mode()\n\n##########################################################################################################################\nst.subheader('Distributions and statistical analysis of all variables')\nst.caption('Exclusing the colulmn name which I will show it in wordcloud instead later')\nbar1, bar2 = st.columns((2, 1))\n\nwith bar1:\n fig, ax = plt.subplots(3, 4, figsize=(20, 15))\n df['minimum_nights'].value_counts().head(10).plot(ax=ax[0][0], kind='bar', title='Minimum night')\n df['number_of_reviews'].value_counts().head(10).plot(ax=ax[0][1], kind='bar', title='Number of reviews')\n df['number_of_host_listings'].value_counts().head(10).plot(ax=ax[0][2], kind='bar', title='Number of host listings')\n df['availability'].value_counts().head(10).plot(ax=ax[0][3], kind='bar', title='Availability')\n\n df['reviews_year'].value_counts().head(10).plot(ax=ax[1][0], kind='bar', title='Reviews per year')\n df['neighbourhood_group'].value_counts().head(10).plot(ax=ax[1][1], kind='bar', title='Neighborhood group')\n df['neighbourhood'].value_counts().head(20).plot(ax=ax[1][2], kind='bar', title=\"Neighborhood\")\n df['latitude'].value_counts().head(10).plot(ax=ax[1][3], kind='bar', title=\"Latitude\")\n df['longitude'].value_counts().head(10).plot(ax=ax[2][0], kind='bar', title='Longitude')\n\n # fixing spcaing between bar charts avoiding overlapping words\n '''[reference] stackoverflow https://stackoverflow.com/questions/6541123/improve-subplot-size-spacing-with-many-subplots-in-matplotlib'''\n fig.tight_layout()\n st.pyplot(fig)\n\n '''\n In this part, I made distribution plots for both numerical and categorical data and sorted values in descending order\n So, for instance in the variable neighborhood, I found \"Venice\" had the most listings.\n I was surprised to see that most of the listings had the restriction to stay more than 30 minimun nights! Also, most of the listings are\n listed for the first time but most of them are booked when I observe the number of host listings and availability variables. \n '''\nwith bar2:\n # Showing all the unique values\n '''\n ** Unique values of each columns **\n '''\n df = df[(df['minimum_nights'] <= 31) & (df['room_type'] == 'Entire home/apt')]\n df.drop(columns='room_type', inplace=True)\n for col in df.columns:\n st.write(' - {} : {} unique values'.format(col, len(df[col].unique())))\n\n##########################################################################################################################\nst.subheader('Heatmaps')\n'''[reference]: Altair documentation https://altair-viz.github.io/gallery/layered_heatmap_text.html '''\n\ncor_data = (df.drop(columns=['name', 'neighbourhood_group', 'neighbourhood'])\n .corr().stack()\n .reset_index()\n .rename(columns={0: 'correlation', 'level_0': 'variable', 'level_1': 'variable2'}))\ncor_data['correlation_label'] = cor_data['correlation'].map('{:.2f}'.format) # Round to 2 decimal\n\nbase = alt.Chart(cor_data).encode(\n x='variable2:O',\n y='variable:O'\n).properties(\n width=500,\n height=400\n)\n\ntext = base.mark_text().encode(\n text='correlation_label',\n color=alt.condition(\n alt.datum.correlation > 0.5,\n alt.value('white'),\n alt.value('black')\n )\n)\n\ncor_plot = base.mark_rect().encode(\n color='correlation:Q'\n)\n\ncor_plot + text\n\ncor1, cor2 = st.columns((3, 2))\nwith cor1:\n # Top correlations\n corr_matrix = df.corr()\n d_top = corr_matrix['price'].sort_values(ascending=False)\n st.write('Top Correlations are: \\n', d_top.head(10))\nwith cor2:\n # Bottom correlations\n d_down = corr_matrix['price'].sort_values(ascending=True)\n st.write('Top Negative Correlations are: \\n', d_down.head(10))\n\n################################################################# MAP #########################################################################\nst.subheader('Exploring reginons with Maps')\n\nmap_1, map_2 = st.columns((2))\n\nwith map_1:\n st.caption('Kmeans clustering of latitude and longitude')\n s = st.slider('select the number of iterations', 1, 20)\n kmeans = KMeans(n_clusters=5, max_iter=s, n_init=1)\n\n val = df[['longitude', 'latitude']]\n\n kmeans.fit(val)\n val[\"cluster\"] = kmeans.predict(val)\n\n c = alt.Chart(val).mark_circle().encode(\n # x ='longitude',\n # y = 'latitude',\n x=alt.X('longitude', scale=alt.Scale(domain=[-119, -117])),\n y=alt.Y('latitude', scale=alt.Scale(domain=[33, 35])),\n color=\"cluster:N\"\n )\n st.altair_chart(c, use_container_width=True)\n\nwith map_2:\n st.caption('folium map visualization')\n price_selected = st.slider(\"Select the range of the price\", value=[0, 500])\n df_price = df[df['price'] <= price_selected[1]]\n df_price = df_price[df_price['price'] >= price_selected[0]]\n count = df_price[['latitude', 'longitude']]\n m = folium.Map([34, -118], zoom_start=7)\n\n plugins.Fullscreen(position='topright', # Full screen\n title='Click to Expand',\n title_cancel='Click to Exit',\n force_separate_button=True).add_to(m)\n\n plugins.MousePosition().add_to(m) ## get coordinates.\n plugins.MarkerCluster(count).add_to(m)\n st.markdown(\"[reference] streamlit documentation of visualizing folium maps \")\n folium_static(m)\n st.markdown(\"[reference] https://python-visualization.github.io/folium/plugins.html\")\n\n################################################################################ NLP ################################################################################\n# s = st.slider(\"Select the range of the price\", value=[0, 500])\n\n# vectorize the word using tfidvectorizer from sklearn\nst.subheader(\"Vectorizing the columns name and show the frequent words using WordCloud\")\ntf = TfidfVectorizer(stop_words='english', min_df=3)\ntf.fit(df['name'])\n\nname_tf = tf.transform(df['name'])\nname_df = pd.DataFrame(name_tf.todense(), columns=tf.get_feature_names())\n\ntf1, tf2, tf3 = st.columns((2, 3, 1))\n\nwith tf1:\n fig, ax = plt.subplots()\n top_texts = name_df.sum().sort_values(ascending=False)\n top_texts.head(15).plot(kind='barh')\n st.pyplot(fig)\n\nwith tf2:\n fig, ax = plt.subplots()\n # Create and generate a word cloud image:\n Cloud = WordCloud(width=500, height=400,\n background_color='black',\n stopwords=stopwords,\n min_font_size=3,\n min_word_length=0).generate_from_frequencies(top_texts)\n\n # background_color=\"white\", max_words=50).generate_from_frequencies(top_texts)\n\n # Display the generated image:\n plt.imshow(Cloud, interpolation='bilinear')\n plt.axis(\"off\")\n st.pyplot(fig)\n\nwith tf3:\n '''\n Words that describe the type of the listings such as parking and studio, comfort and convinience such as cozy, near, and modern were top words.\n\n In the future, I am interested to see the difference in words among different prices of the listings. \n '''\n\n################################################################################ ML ################################################################################\nst.header(\"Applying ML to predict the price of the listed Airbnb houses\")\n\" ** Since I am dealing with a Regressor, I will use R^2 Scores to compare which models have the best score ** \"\n\nst.subheader(\"Part I: Prediction using only numeric columns\")\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics import accuracy_score, r2_score, mean_squared_error as MSE\nfrom pandas.api.types import is_numeric_dtype\n\nnumeric_cols = [c for c in df.columns if is_numeric_dtype(df[c])]\n\ndf_num = df[numeric_cols]\n\n# Define target and predictors\nX = df_num.copy()\ny = X.pop('price')\n\n# diving data into trainning and testing\nX_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.2,\n random_state=10)\n\n# standardlize the train/test sets of data\nscaler = StandardScaler()\nX_train = pd.DataFrame(scaler.fit_transform(X_train),\n columns=X_train.columns)\nX_test = pd.DataFrame(scaler.transform(X_test),\n columns=X_train.columns)\n\n# Applying Linear Regression\nst.caption(\"LR\")\nfrom sklearn.linear_model import LinearRegression\n\nlr = LinearRegression()\nlr.fit(X_train, y_train)\nlr_cv = cross_val_score(lr, X_train, y_train, cv=5)\n\nlr1, lr2, lr3 = st.columns((1, 2, 2))\n\nwith lr1:\n st.write('Cross-validated scores:', lr_cv.T)\n st.write('Average score:', lr_cv.mean())\n st.write('Trainning Score:', lr.score(X_train, y_train))\n st.write('Test Score:', lr.score(X_test, y_test))\n\nwith lr2:\n # collect the model coefficients in a dataframe\n df_coef = pd.DataFrame(lr.coef_, index=X_train.columns,\n columns=['coefficients'])\n # calculate the absolute values of the coefficients\n\n df_coef['coef_abs'] = df_coef.coefficients.abs()\n\n coefs = pd.concat([df_coef['coefficients'].sort_values().head(4),\n df_coef['coefficients'].sort_values().tail(3)])\n\n fig, ax = plt.subplots()\n coefs.plot(kind=\"barh\", figsize=(12, 10))\n plt.title(\"Importance of coefficients\")\n st.pyplot(fig)\n\nwith lr3:\n 'error running it in streamlit not jupyter'\n '''\n lr_pred = lr.predict(X_test)\n sns.set_style(\"darkgrid\")\n fig, ax = plt.subplot()\n fig = sns.regplot(lr_pred, y_test)\n st.pyplot(fig)\n '''\n\n################################################################################\n\nst.subheader(\"Part II: Prediction after dummifying categorical predictors\")\nst.write('before dummifying')\ndf_dum = df.drop(columns=['name'])\nst.table(df_dum.head())\n\ndf_dum = pd.get_dummies(df_dum,\n prefix=['neighbourhood', 'neighbourhood_group'],\n drop_first=True)\n\n'''\nDummified data sample\n'''\nst.table(df_dum.head())\n\n################################################################################\ncorr_matrix = df_dum.corr()\n\nd_top = corr_matrix['price'].sort_values(ascending=False)\nst.write('Top 15 correlated variables to price are: \\n', d_top.head(15))\n\n# with lin2:\nst.subheader(\"Linear Regression\")\n\nlin1, lin2, lin3 = st.columns((3))\n\nwith lin1:\n # Define\n X = df_dum.copy()\n y = X.pop('price')\n\n # diving data into trainning and testing\n X_train, X_test, y_train, y_test = train_test_split(\n X,\n y,\n test_size=0.2,\n random_state=10)\n\n # standardlize the train/test sets of data\n scaler = StandardScaler()\n X_train = pd.DataFrame(scaler.fit_transform(X_train),\n columns=X_train.columns)\n X_test = pd.DataFrame(scaler.transform(X_test),\n columns=X_train.columns)\n\n # Linear Regression\n from sklearn.linear_model import LinearRegression\n\n lr = LinearRegression()\n lr.fit(X_train, y_train)\n lr_cv = cross_val_score(lr, X_train, y_train, cv=5)\n st.write('Cross-validated scores:', lr_cv)\n st.write('Average score:', lr_cv.mean())\n st.write('Trainning Score:', lr.score(X_train, y_train))\n st.write('Test Score:', lr.score(X_test, y_test))\n\nwith lin2:\n # collect the model coefficients in a dataframe\n df_coef = pd.DataFrame(lr.coef_, index=X_train.columns,\n columns=['coefficients'])\n # calculate the absolute values of the coefficients\n df_coef['coef_abs'] = df_coef.coefficients.abs()\n\n coefs = pd.concat([df_coef['coefficients'].sort_values().head(10),\n df_coef['coefficients'].sort_values().tail(10)])\n\n fig, ax = plt.subplots()\n coefs.plot(kind=\"barh\", figsize=(12, 10))\n plt.title(\"Importance of coefficients\")\n st.pyplot(fig)\n\nwith lin3:\n 'error running it in streamlit not jupyter'\n '''\n lr_pred = lr.predict(X_test)\n sns.set_style(\"darkgrid\")\n plt.figure(figsize=(12,10))\n sns.regplot(lr_pred,y_test)\n '''\n\n'**observation**'\n'''\nAs you can observe from the cross validation scores, Linear Regression is not working using dummified data. And, it makes a lot of sense since\nafter the dummifications, most of the values in data turns into 0 which makes it a **sparse matrix**\nAs you can see from the correlatiion top 15 above, those are the variables that we should consider more when predicting the price!\nSo, I will regularization techniques to make use of the dummified data. \n'''\n################################################################################\n# Ridge\nst.subheader('Regularizing the model')\nreg1, reg2, reg3 = st.columns((3))\n\nwith reg1:\n \"Ridge Regularization\"\n from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, RidgeCV, LassoCV, ElasticNetCV\n\n model = RidgeCV(alphas=np.logspace(-4, 4, 10), cv=5)\n model.fit(X_train, y_train)\n\n alpha = model.alpha_\n model = Ridge(alpha=alpha)\n\n # get cross validated scores\n scores = cross_val_score(model, X_train, y_train, cv=5)\n st.write(\"Cross-validated training scores:\", scores)\n st.write(\"Mean cross-validated training score:\", scores.mean())\n\n model.fit(X_train, y_train)\n st.write(\"Training Score:\", model.score(X_train, y_train))\n st.write(\"Test Score:\", model.score(X_test, y_test))\n\nwith reg2:\n # Lasso\n \"Lasso Regularization\"\n model = LassoCV(alphas=np.logspace(-4, 4, 10), cv=5)\n model.fit(X_train, y_train)\n\n alpha = model.alpha_\n model = Lasso(alpha=alpha)\n\n # get cross validated scores\n scores = cross_val_score(model, X_train, y_train, cv=5)\n st.write(\"Cross-validated training scores:\", scores)\n st.write(\"Mean cross-validated training score:\", scores.mean())\n\n model.fit(X_train, y_train)\n st.write(\"Training Score:\", model.score(X_train, y_train))\n st.write(\"Test Score:\", model.score(X_test, y_test))\n\nwith reg3:\n # Elastic net\n \"Elastic net Regularization\"\n model = ElasticNetCV(alphas=np.logspace(-4, 4, 10),\n l1_ratio=np.array([.1, .5, .7, .9, .95, .99, 1]),\n cv=5)\n # fit the model\n model.fit(X_train, y_train)\n\n alpha = model.alpha_\n model = ElasticNet(alpha=alpha)\n\n # get cross validated scores\n scores = cross_val_score(model, X_train, y_train, cv=5)\n st.write(\"Cross-validated training scores:\", scores)\n st.write(\"Mean cross-validated training score:\", scores.mean())\n\n model.fit(X_train, y_train)\n st.write(\"Training Score:\", model.score(X_train, y_train))\n st.write(\"Test Score:\", model.score(X_test, y_test))\n\n'''\n- Lasso gave the best R2 score so far. \n- And seeing the both training and testing scores, all the regurilized models seem not overfitting\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n", "repo_name": "hitomi1104/Airbnb-App-Project", "sub_path": "final.py", "file_name": "final.py", "file_ext": "py", "file_size_in_byte": 21458, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.style.use", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "altair.data_transformers.disable_max_rows", "line_number": 34, "usage_type": "call"}, {"api_name": "altair.data_transformers", "line_number": 34, "usage_type": "attribute"}, {"api_name": "streamlit.columns", "line_number": 40, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 43, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 82, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 83, "usage_type": "call"}, {"api_name": "streamlit.caption", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.notnull", "line_number": 103, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 119, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "seaborn.set_style", "line_number": 128, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 130, "usage_type": "call"}, {"api_name": "streamlit.text", "line_number": 135, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "seaborn.set_style", "line_number": 147, "usage_type": "call"}, {"api_name": "seaborn.distplot", "line_number": 148, "usage_type": "call"}, {"api_name": "streamlit.pyplot", "line_number": 149, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 153, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 160, "usage_type": "call"}, {"api_name": "streamlit.caption", "line_number": 161, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 180, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 196, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 199, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 208, "usage_type": "call"}, {"api_name": "altair.condition", "line_number": 218, "usage_type": "call"}, {"api_name": "altair.datum", "line_number": 219, "usage_type": "attribute"}, {"api_name": "altair.value", "line_number": 220, "usage_type": "call"}, {"api_name": "altair.value", "line_number": 221, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 231, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 236, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 240, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 243, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 245, "usage_type": "call"}, {"api_name": "streamlit.caption", "line_number": 248, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 249, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 250, "usage_type": "call"}, {"api_name": "altair.Chart", "line_number": 257, "usage_type": "call"}, {"api_name": "altair.X", "line_number": 260, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 260, "usage_type": "call"}, {"api_name": "altair.Y", "line_number": 261, "usage_type": "call"}, {"api_name": "altair.Scale", "line_number": 261, "usage_type": "call"}, {"api_name": "streamlit.altair_chart", "line_number": 264, "usage_type": "call"}, {"api_name": "streamlit.caption", "line_number": 267, "usage_type": "call"}, {"api_name": "streamlit.slider", "line_number": 268, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 272, "usage_type": "call"}, {"api_name": "folium.plugins.Fullscreen", "line_number": 274, "usage_type": "call"}, {"api_name": "folium.plugins", "line_number": 274, "usage_type": "name"}, {"api_name": "folium.plugins.MousePosition", "line_number": 279, "usage_type": "call"}, {"api_name": "folium.plugins", "line_number": 279, "usage_type": "name"}, {"api_name": "folium.plugins.MarkerCluster", "line_number": 280, "usage_type": "call"}, {"api_name": "folium.plugins", "line_number": 280, "usage_type": "name"}, {"api_name": "streamlit.markdown", "line_number": 281, "usage_type": "call"}, {"api_name": "streamlit_folium.folium_static", "line_number": 282, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 283, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 290, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 294, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 302, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}, {"api_name": "wordcloud.WordCloud", "line_number": 307, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 309, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 318, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 328, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 331, "usage_type": "call"}, {"api_name": "pandas.api.types.is_numeric_dtype", "line_number": 338, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 347, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 354, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 355, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 357, "usage_type": "call"}, {"api_name": "streamlit.caption", "line_number": 361, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 364, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 366, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 368, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 371, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 372, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 373, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 374, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 378, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 384, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 387, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 387, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 389, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 389, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 390, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 404, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 405, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 407, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 409, "usage_type": "call"}, {"api_name": "streamlit.table", "line_number": 416, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 422, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 425, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 427, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 435, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 442, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 443, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 445, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 451, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 453, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 454, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 455, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 456, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 457, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 461, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 466, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 469, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 469, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 471, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 471, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 472, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 492, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 493, "usage_type": "call"}, {"api_name": "sklearn.linear_model.RidgeCV", "line_number": 499, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 499, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 503, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 506, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 507, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 508, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 511, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 512, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LassoCV", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 517, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Lasso", "line_number": 521, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 524, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 525, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 526, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 529, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 530, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNetCV", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 535, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 536, "usage_type": "call"}, {"api_name": "sklearn.linear_model.ElasticNet", "line_number": 542, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 545, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 546, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 547, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 550, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 551, "usage_type": "call"}]} +{"seq_id": "37337104537", "text": "from fastapi import Body, FastAPI, Depends\nfrom app.auth.jwt_handler import signJWT\nfrom app.data.users import userDB\nfrom app.exceptions.error import errorMsg, exception_handler\nfrom app.model import PostSchema, UserLoginSchema, UserSchema\nfrom app.service.user_service import user_LogIn, user_exist, user_signUp\nfrom app.auth.jwt_bearer import jwtBearer\n#creating FastAPI's webapp\napp=FastAPI()\n\n#Substituting DB\nposts= [\n {\n \"id\":1, \n \"title\" : \"Penguin\",\n \"content\": \"Antartica Maybe\"\n }\n]\n\nusersDB = userDB()\n\n# 1.landing page\n@app.get(\"/\")\ndef root():\n return \"use /docs to view APIs\"\n\n# 2. Creating a GET request for testing\n@exception_handler\n@app.get(\"/get\", tags=[\"testing\"])\ndef root():\n return {\"API\":\"FastAPI\"}\n\n# 3. GET all posts\n@exception_handler\n@app.get(\"/posts\",tags=[\"search posts\"])\ndef get_posts():\n return {\"data\" : posts}\n\n# 4. GET post by id\n@exception_handler\n@app.get(\"/posts/{id}\", tags=[\"search posts\"])\ndef get_post_by_id(id:int):\n if id>len(posts):\n return errorMsg(\"Post with this id doesn't exist\")\n\n for post in posts:\n if post[\"id\"]==id:\n return {\"data\": post} \n \n return errorMsg(\"Post with this id doesn't exist\")\n\n# 5. POST a post\n@exception_handler\n@app.post(\"/addPost\", tags=[\"create posts\"],dependencies=[Depends(jwtBearer())])\ndef createPost(post:PostSchema):\n post.id=len(posts)+1\n posts.append(post.dict())\n return {\"info\":\"Post created successfully\"} \n\n# 6. POST user SignUp\n@exception_handler\n@app.post(\"/user/signup\", tags=[\"user\"])\ndef user_signup(user: UserSchema=Body()):\n if user_exist(usersDB,user.email):\n return errorMsg(\"User already exists.\")\n else:\n user_signUp(usersDB,user)\n return signJWT(user.email)\n \n\n# 7. POST user LogIn\n@exception_handler\n@app.post(\"/user/login\",tags=[\"user\"])\ndef user_login(user: UserLoginSchema):\n if user_exist(userDB,user.email):\n if user_LogIn(userDB,user):\n return signJWT(user.email)\n else:\n return errorMsg(\"Wrong Password.\")\n return errorMsg(\"User doesn't exists.\")\n\n# 8. GET all user data\n@exception_handler\n@app.get(\"/user/showUsers\", tags=[\"user\"])\ndef user_display_all():\n return usersDB.showData()", "repo_name": "PrasadDalwee/FastAPI", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2254, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "app.auth.jwt_handler", "line_number": 9, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 9, "usage_type": "call"}, {"api_name": "app.data.users.userDB", "line_number": 20, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler.get", "line_number": 23, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 23, "usage_type": "name"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 28, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.get", "line_number": 29, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 29, "usage_type": "name"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 34, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.get", "line_number": 35, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 35, "usage_type": "name"}, {"api_name": "app.exceptions.error.errorMsg", "line_number": 44, "usage_type": "call"}, {"api_name": "app.exceptions.error.errorMsg", "line_number": 50, "usage_type": "call"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 40, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.get", "line_number": 41, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 41, "usage_type": "name"}, {"api_name": "app.model.PostSchema", "line_number": 55, "usage_type": "name"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 53, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.post", "line_number": 54, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 54, "usage_type": "name"}, {"api_name": "fastapi.Depends", "line_number": 54, "usage_type": "call"}, {"api_name": "app.auth.jwt_bearer.jwtBearer", "line_number": 54, "usage_type": "call"}, {"api_name": "app.model.UserSchema", "line_number": 63, "usage_type": "name"}, {"api_name": "fastapi.Body", "line_number": 63, "usage_type": "call"}, {"api_name": "app.service.user_service.user_exist", "line_number": 64, "usage_type": "call"}, {"api_name": "app.exceptions.error.errorMsg", "line_number": 65, "usage_type": "call"}, {"api_name": "app.service.user_service.user_signUp", "line_number": 67, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler.signJWT", "line_number": 68, "usage_type": "call"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 61, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.post", "line_number": 62, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 62, "usage_type": "name"}, {"api_name": "app.model.UserLoginSchema", "line_number": 74, "usage_type": "name"}, {"api_name": "app.service.user_service.user_exist", "line_number": 75, "usage_type": "call"}, {"api_name": "app.data.users.userDB", "line_number": 75, "usage_type": "argument"}, {"api_name": "app.service.user_service.user_LogIn", "line_number": 76, "usage_type": "call"}, {"api_name": "app.data.users.userDB", "line_number": 76, "usage_type": "argument"}, {"api_name": "app.auth.jwt_handler.signJWT", "line_number": 77, "usage_type": "call"}, {"api_name": "app.exceptions.error.errorMsg", "line_number": 79, "usage_type": "call"}, {"api_name": "app.exceptions.error.errorMsg", "line_number": 80, "usage_type": "call"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 72, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.post", "line_number": 73, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 73, "usage_type": "name"}, {"api_name": "app.exceptions.error.exception_handler", "line_number": 83, "usage_type": "name"}, {"api_name": "app.auth.jwt_handler.get", "line_number": 84, "usage_type": "call"}, {"api_name": "app.auth.jwt_handler", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "3107188007", "text": "\"\"\" Behavior_cloning for selfdriving car project\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n# google specific imports\nimport google3\nfrom google3.pyglib import gfile\nfrom google3.util.textprogressbar import pybar\nimport tensorflow.google as tf\nfrom google3.sstable.python import sstable\n\nfrom google3.learning.deepmind.python import app\nfrom google3.learning.deepmind.python import flags\nfrom google3.learning.deepmind.python import logging\n\n#mprint = tf.app.logging.info\nmprint = logging.info\nmprint('google3 imports done')\n\n\n\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers.core import Flatten,Dense,Lambda,Dropout\nfrom keras.layers.convolutional import Conv2D,Cropping2D\nfrom keras.layers.pooling import MaxPooling2D\nimport keras.callbacks as kcb\n\nmprint('keras imports done')\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\n\nmprint('mlstuff done')\n\nimport matplotlib.pyplot as plt\nmprint('matplotlib imported')\n\nimport csv\nimport tempfile\nfrom contextlib import contextmanager\nimport collections\nimport random\nfrom datetime import datetime\nimport platform\n\nmprint('allimports done')\n\n\nFLAGS = flags.FLAGS\nflags.DEFINE_string('master', 'local',\n \"\"\"BNS name of the TensorFlow runtime to use.\"\"\")\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef tfrecordwriter(tfrecords_path):\n with gfs(tfrecords_path) as fname:\n writer = tf.python_io.TFRecordWriter(fname)\n yield writer\n writer.close()\n sys.stdout.flush()\n\n\n# read from google file system\n@contextmanager\ndef gfsread(name):\n tmpdir = tempfile.mkdtemp()\n tmpfname = tmpdir+'/tmp'\n gfile.Copy(name,tmpfname)\n yield tmpfname\n\n# write to google file system\n@contextmanager\ndef gfs(name,suffix='.tmpdata'):\n tmp_file = tempfile.NamedTemporaryFile(mode='w',suffix=suffix)\n mprint('writing '+name+' to tmp file')\n yield tmp_file.name\n gfile.Copy(tmp_file.name,name,overwrite=True)\n\n# create the model. The model is almost identical to the LeNet model except for\n# the introduction of the cropping and doubling of the number of nodes in the first fully\n# connected layer to 240 and adding a dropout with probability of 0.5. Since this is \n# regression problem we use mean-squared-error and 'adam' optimizer\ndef Lenet():\n height = 160\n width = 320\n depth = 3\n model=Sequential()\n model.add(Lambda(lambda x:x/255-0.5,input_shape=(height, width, depth)))\n model.add(Cropping2D(cropping=((70,25),(0,0))))\n model.add(Conv2D(20, (5, 5), padding='same',activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Conv2D(50, (5, 5), padding='same',activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(240,activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(84,activation='relu'))\n model.add(Dense(1))\n model.compile(loss='mse',optimizer='adam')\n return model\n\nDrivingFrame = collections.namedtuple('DrivingFrame', 'center left right steering throttle brake speed')\ndef fromTFExampleMoreFeatures(rcrd):\n ex = tf.train.Example()\n ex.ParseFromString(rcrd)\n height = 160\n width = 320\n def toimg(s):\n return np.fromstring(ex.features.feature[s].bytes_list.value[0],dtype=np.float32).reshape((height,width,-1))\n def toflt(s):\n return ex.features.feature[s].float_list.value[0]\n return DrivingFrame(center = toimg('center'),\n left = toimg('left'),\n right = toimg('right'),\n steering = toflt('steering'),\n throttle=toflt('throttle'),\n brake = toflt('brake'),\n speed = toflt('speed'))\n\n#create the generators necessary to fit the model. Both generators for validation and training are returned from this\n# function. It also returns the number of steps that needs to be taken on each generator.\ndef train_validate_generators(sstable_path,cross_validation_ratio=0.1,batch_size=256):\n mprint('train_validate_generators')\n table = sstable.SSTable(sstable_path)\n n = len(table)\n cv_start = int(n*(1.0-cross_validation_ratio))\n mprint(\"number of entries in table : \"+str(n))\n num_valid = n-cv_start\n mprint(\"num_valid : \"+str(num_valid*3))\n num_train = cv_start\n mprint(\"num_train : \"+str(num_train*3))\n num_valid_steps = int(num_valid/batch_size)+(1 if num_valid%batch_size != 0 else 0)\n num_train_steps = int(num_train/batch_size)\n cv_start_key = table.iteritems(start_offset=cv_start).next()\n tgen = train_generator(sstable_path,batch_size,0.5,None,cv_start_key,None)\n vgen = valid_generator(sstable_path,batch_size,cv_start_key,None,None)\n return tgen,num_train_steps*3,vgen,num_valid_steps*3\n\n#example_generator parses every record and generates training examples for center\n# left and right images\ndef example_generator(sstable_path,start,stop,start_offset,cycle):\n table = sstable.SSTable(sstable_path)\n while True:\n for k,v in table.iteritems(start_offset=start_offset):\n f=fromTFExampleMoreFeatures(v)\n yield (f.center,f.steering)\n yield (f.right,f.steering-0.2)\n yield (f.left,f.steering+0.2)\n if not cycle:\n mprint('finished non-cyclic example generator')\n break\n\n# a function to weight each examples. An attempt was made to use a higher weight for examples with non-zero\n# steering angles. However, that degraded the performance.\ndef weight_fn(batch_labels):\n return np.ones_like(np.squeeze(batch_labels)) #-0.5+2/(1+np.exp((-1.0/3)*np.square(np.squeeze(batch_labels))))\n\n# generate training examples\ndef train_generator(sstable_path,batch_size,reject_prob,start,stop,start_offset):\n mprint('train_generator')\n height = 160\n width = 320\n batch_features = np.zeros((batch_size, height, width, 3))\n batch_labels = np.zeros((batch_size,1))\n yieldid=0\n curid=0\n for img,str_angle in example_generator(sstable_path,start,stop,start_offset,True):\n if random.uniform(0.0,1.0)= self.period:\n self.epochs_since_last_save = 0\n filepath = self.filepath.format(epoch=epoch, **logs)\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n warnings.warn('Can save best model only with %s available, '\n 'skipping.' % (self.monitor), RuntimeWarning)\n else:\n if self.monitor_op(current, self.best):\n if self.verbose > 0:\n print('Epoch %05d: %s improved from %0.5f to %0.5f,'\n ' saving model to %s'\n % (epoch, self.monitor, self.best,\n current, filepath))\n self.best = current\n save_model(self.model,None,filepath)\n else:\n if self.verbose > 0:\n print('Epoch %05d: %s did not improve' %\n (epoch, self.monitor))\n else:\n if self.verbose > 0:\n print('Epoch %05d: saving model to %s' % (epoch, filepath))\n save_model(self.model,None,filepath)\n\n\n\ndef main(argv):\n mprint('entered main')\n mprint(platform.python_version())\n\n wdir = '/cns/is-d/home/sunilsn/carnd/t1/p3/'\n #wdir = '/usr/local/google/home/sunilsn/carnd/t1/p3/'\n sstable_path=wdir+'allfeatures.sstable'\n mprint('working director : '+wdir)\n\n batch_size=1024\n train_gen,train_steps_per_epoch,valid_gen,valid_steps = train_validate_generators(sstable_path,batch_size=batch_size)\n mprint('train_steps_per_epoch : '+str(train_steps_per_epoch))\n mprint('valid_steps : '+str(valid_steps))\n epochs = 10\n model = Lenet()\n dt = datetime.now()\n model_path_prefix = wdir+'model_spe_{:03d}_epochs_{:03d}_datetime_'.format(train_steps_per_epoch,epochs)+datetime.now().strftime('%Y_%m_%d_%H_%M_%S')\n checkpoint = ModelCheckpoint(model_path_prefix+'_checkpoint.data')\n csvlogger = CSVLogger(model_path_prefix+'_training_log.csv')\n history_object = model.fit_generator(train_gen,validation_data=valid_gen,\n validation_steps=valid_steps,\n steps_per_epoch=train_steps_per_epoch,\n epochs=epochs,\n callbacks=[checkpoint,csvlogger])\n save_model(model,history_object,model_path_prefix)\n\ndef plot_history(history_object,path_prefix):\n if history_object is not None:\n mprint(history_object.history.keys())\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n with gfs(path_prefix+'_history.png',suffix='.png') as fname:\n plt.savefig(fname)\n\nif __name__ == '__main__':\n mprint('calling tf-app-run')\n app.run(main)\n\n", "repo_name": "sunilnandihalli/carnd-behavior-cloning", "sub_path": "model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 11777, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "google3.learning.deepmind.python.logging.info", "line_number": 19, "usage_type": "attribute"}, {"api_name": "google3.learning.deepmind.python.logging", "line_number": 19, "usage_type": "name"}, {"api_name": "google3.learning.deepmind.python.flags.FLAGS", "line_number": 53, "usage_type": "attribute"}, {"api_name": "google3.learning.deepmind.python.flags", "line_number": 53, "usage_type": "name"}, {"api_name": "google3.learning.deepmind.python.flags.DEFINE_string", "line_number": 54, "usage_type": "call"}, {"api_name": "google3.learning.deepmind.python.flags", "line_number": 54, "usage_type": "name"}, {"api_name": "tensorflow.google.train.Feature", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.google.train", "line_number": 58, "usage_type": "attribute"}, {"api_name": "tensorflow.google", "line_number": 58, "usage_type": "name"}, {"api_name": "tensorflow.google.train.Int64List", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.google.train.Feature", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.google.train", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.google", "line_number": 61, "usage_type": "name"}, {"api_name": "tensorflow.google.train.FloatList", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.google.train.Feature", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.google.train", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.google", "line_number": 65, "usage_type": "name"}, {"api_name": "tensorflow.google.train.BytesList", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.google.python_io.TFRecordWriter", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.google.python_io", "line_number": 69, "usage_type": "attribute"}, {"api_name": "tensorflow.google", "line_number": 69, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 78, "usage_type": "call"}, {"api_name": "google3.pyglib.gfile.Copy", "line_number": 80, "usage_type": "call"}, {"api_name": "google3.pyglib.gfile", "line_number": 80, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 76, "usage_type": "name"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 86, "usage_type": "call"}, {"api_name": "google3.pyglib.gfile.Copy", "line_number": 89, "usage_type": "call"}, {"api_name": "google3.pyglib.gfile", "line_number": 89, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 84, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 99, "usage_type": "call"}, {"api_name": "keras.layers.core.Lambda", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Cropping2D", "line_number": 101, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 102, "usage_type": "call"}, {"api_name": "keras.layers.pooling.MaxPooling2D", "line_number": 103, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 104, "usage_type": "call"}, {"api_name": "keras.layers.pooling.MaxPooling2D", "line_number": 105, "usage_type": "call"}, {"api_name": "keras.layers.core.Flatten", "line_number": 106, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 107, "usage_type": "call"}, {"api_name": "keras.layers.core.Dropout", "line_number": 108, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 109, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 110, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 114, "usage_type": "call"}, {"api_name": "tensorflow.google.train.Example", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.google.train", "line_number": 116, "usage_type": "attribute"}, {"api_name": "tensorflow.google", "line_number": 116, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 121, "usage_type": "attribute"}, {"api_name": "google3.sstable.python.sstable.SSTable", "line_number": 136, "usage_type": "call"}, {"api_name": "google3.sstable.python.sstable", "line_number": 136, "usage_type": "name"}, {"api_name": "google3.sstable.python.sstable.SSTable", "line_number": 154, "usage_type": "call"}, {"api_name": "google3.sstable.python.sstable", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.ones_like", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 176, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 198, "usage_type": "call"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 218, "usage_type": "attribute"}, {"api_name": "keras.callbacks", "line_number": 218, "usage_type": "name"}, {"api_name": "keras.callbacks.CSVLogger.__init__", "line_number": 220, "usage_type": "call"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 220, "usage_type": "attribute"}, {"api_name": "keras.callbacks", "line_number": 220, "usage_type": "name"}, {"api_name": "google3.pyglib.gfile.FastGFile", "line_number": 223, "usage_type": "call"}, {"api_name": "google3.pyglib.gfile", "line_number": 223, "usage_type": "name"}, {"api_name": "keras.callbacks.Callback", "line_number": 226, "usage_type": "attribute"}, {"api_name": "keras.callbacks", "line_number": 226, "usage_type": "name"}, {"api_name": "numpy.less", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.Inf", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.greater", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.Inf", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.greater", "line_number": 255, "usage_type": "attribute"}, {"api_name": "numpy.Inf", "line_number": 256, "usage_type": "attribute"}, {"api_name": "numpy.less", "line_number": 258, "usage_type": "attribute"}, {"api_name": "numpy.Inf", "line_number": 259, "usage_type": "attribute"}, {"api_name": "platform.python_version", "line_number": 294, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 307, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 307, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 308, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 308, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "google3.learning.deepmind.python.app.run", "line_number": 332, "usage_type": "call"}, {"api_name": "google3.learning.deepmind.python.app", "line_number": 332, "usage_type": "name"}]} +{"seq_id": "44313409629", "text": "from typing import Callable, List, Tuple, Iterator\nfrom .__sentence_pair_dataset import SentencePairDataset\nimport random\n\n\nclass UnpackedSentencePairDataset(SentencePairDataset):\n def __init__(self, workdir: str, source_sentence_file: str, target_sentence_file: str,\n source_tokenizer: Callable[[str], List[int]], target_tokenizer: Callable[[str], List[int]]):\n super().__init__(workdir, source_sentence_file, target_sentence_file, source_tokenizer, target_tokenizer)\n self.__len_accu = []\n for i in range(super().__len__()):\n l = 0\n if len(self.__len_accu) > 0:\n l = self.__len_accu[-1]\n self.__len_accu.append(l + len(super().__getitem__(i)[1]) + 1)\n self.__len = self.__len_accu[-1]\n\n def __pair_idx2idx(self, l1: int, l2: int) -> int:\n return self.__len_accu[l1] - len(super().__getitem__(l1)[1]) - 1 + l2\n\n def __idx2pair_idx(self, idx: int) -> Tuple[int, int]:\n min = 0\n max = len(self.__len_accu)\n while max > min:\n avg = (min + max) // 2\n high = self.__len_accu[avg]\n lv = len(super().__getitem__(avg)[1])\n low = high - lv - 1\n if low <= idx < high:\n return avg, idx - low\n if low > idx:\n max = avg\n else:\n min = avg\n\n def testIndexEx(self):\n for i in range(len(self)):\n l1, l2 = self.__idx2pair_idx(i)\n j = self.__pair_idx2idx(l1, l2)\n if i != j:\n print(i, l1, l2, j)\n assert j == i\n\n def __len__(self):\n return self.__len\n\n def __getitem__(self, idx: int) -> Tuple[List[int], List[int], List[int]]:\n if idx < 0:\n idx = len(self) + idx\n if idx >= len(self):\n raise RuntimeError(\"out of range\")\n l1, l2 = self.__idx2pair_idx(idx)\n x, y = super().__getitem__(l1)\n assert 0 <= l2 <= len(y)\n isend = l2 == len(y)\n y = y[0:l2 + 1]\n trg = list(y)\n trg.insert(0, self.bos())\n if isend:\n y.append(self.eos())\n else:\n trg.pop()\n return x, trg, y\n\n def batchSampler(self, batch_size: int, epcho: int, suffle: bool = True) -> Iterator[List[int]]:\n store = {}\n for i in range(super().__len__()):\n x, y = super().__getitem__(i)\n for j in range(len(y) + 1):\n k = f\"{len(x)}-{j}\"\n if k not in store:\n store[k] = []\n store[k].append(self.__pair_idx2idx(i, j))\n same_shape = list(store.values())\n batchs = []\n for ss in same_shape:\n while len(ss) > 0:\n batchs.append(ss[0:batch_size])\n ss = ss[batch_size:]\n while epcho > 0:\n epcho = epcho - 1\n listme = list(range(len(batchs)))\n while len(listme) > 0:\n n = 0 if not suffle else random.randrange(0, len(listme))\n n = listme.pop(n)\n yield batchs[n]\n", "repo_name": "lidotcircle/ml", "sub_path": "ldcml/dataset/nlp/__unpacked_sentence_pairs_dataset.py", "file_name": "__unpacked_sentence_pairs_dataset.py", "file_ext": "py", "file_size_in_byte": 3106, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "__sentence_pair_dataset.SentencePairDataset", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 8, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 47, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 84, "usage_type": "call"}, {"api_name": "typing.Iterator", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "37392795515", "text": "from xml.etree.ElementTree import XMLParser, TreeBuilder\n\nclass CustomTreeBuilder(TreeBuilder):\n def __init__(self, title=''):\n super().__init__()\n\n self.body_start = False\n self.depth_l = 0\n self.depth_t = 0\n self.lines = 1\n self.lines_t = []\n self.list_item = []\n self.list_header = []\n self.rows = []\n self.cols = []\n self.table_number = 0\n self.leaf_table = False\n self.leaf_lines = False\n \n self.temp_text = \"\"\n self.temp_lines = []\n self.temp_list = []\n self.temp_table = []\n self.temp_row = []\n self.temp_rowspan = []\n self.temp_colspan = []\n self.temp_caption = []\n\n self.parsing = {'p': 0,\n 'span': 0,\n 'note': 0,\n 'custom-shape': 0}\n\n self.custom_shape_concat = False\n \n self.result_dict = {0: {'type': 'text',\n 'value': title}}\n\n def recursive_reader(self, value_dict):\n for idx in value_dict:\n if value_dict[idx]['type'] == 'text':\n self.cell_text.append(value_dict[idx]['value'])\n elif value_dict[idx]['type'] != 'img':\n self.recursive_reader(value_dict[idx]['value'])\n\n def to_html(self, table):\n html_string = \"\"\n\n html_string = ''.join([html_string, \"\"])\n for tr in table:\n html_string = ''.join([html_string, \"\"])\n for td in table[tr]:\n cell_tag = \" 1:\n cell_tag = ''.join([cell_tag, ' rowspan=\\'', table[tr][td]['rowspan'], '\\''])\n if int(table[tr][td]['colspan']) > 1:\n cell_tag = ''.join([cell_tag, ' colspan=\\'', table[tr][td]['colspan'], '\\''])\n cell_tag = ''.join([cell_tag, '>'])\n\n html_string = ''.join([html_string, cell_tag])\n\n self.cell_text = []\n self.recursive_reader(table[tr][td]['value'])\n\n html_string = ''.join([html_string, '\\n'.join(self.cell_text), \"\"])\n html_string = ''.join([html_string, \"\"])\n html_string = ''.join([html_string, \"
\"])\n\n return html_string\n\n def to_line(self):\n self.temp_text = self.temp_text.replace('\\n', ' ').replace('\\r', ' ').replace('\\t', ' ')\n \n if self.temp_text.strip() != \"\":\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'text',\n 'value': self.temp_text.strip()}\n self.lines += 1\n\n self.temp_text = \"\"\n \n def start(self, tag, attrs):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag in self.parsing:\n self.parsing[tag] += 1\n\n if tag=='custom-shape':\n self.custom_shape_concat=True\n\n if tag!='custom-shape' and self.parsing['custom-shape']==0 and self.custom_shape_concat:\n self.custom_shape_concat = False\n self.to_line()\n \n if tag=='image':\n for attr in attrs:\n if attr.endswith('href'):\n if self.depth_t > 0 or self.depth_l > 0:\n self.temp_lines[-1][self.lines_t[-1]] = {'type':'img',\n 'value': attrs[attr]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'img',\n 'value': attrs[attr]}\n self.lines += 1\n \n elif tag=='line-break':\n self.to_line()\n\n elif tag in ['tab', 's']:\n self.temp_text = ''.join([self.temp_text, ' '])\n \n elif tag=='list-item':\n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='list-header':\n self.temp_lines.append({})\n self.lines_t.append(0)\n self.list_header.append(0)\n\n elif tag=='list':\n self.list_item.append(0)\n self.temp_list.append({})\n self.depth_l += 1\n\n elif tag=='table-cell':\n self.temp_rowspan.append('1')\n self.temp_colspan.append('1')\n for attr in attrs:\n if attr.endswith('number-rows-spanned'):\n self.temp_rowspan[-1] = attrs[attr]\n elif attr.endswith('number-columns-spanned'):\n self.temp_colspan[-1] = attrs[attr]\n \n self.temp_lines.append({})\n self.lines_t.append(0)\n\n elif tag=='table-row':\n self.cols.append(0)\n self.temp_row.append({})\n\n elif tag=='table':\n self.temp_caption.append('')\n for attr in attrs:\n if attr.endswith('}name'):\n self.temp_caption[-1] = (attrs[attr])\n\n self.rows.append(0)\n self.temp_table.append({})\n self.depth_t += 1\n self.leaf_table = True\n\n if self.leaf_lines:\n self.leaf_lines = False\n \n return TreeBuilder.start(self, tag, attrs)\n\n def end(self, tag):\n tag = tag.split('}')[1] if '}' in tag else tag\n\n if tag == 'automatic-styles':\n self.body_start = True\n \n elif tag=='g':\n self.custom_shape_concat=False\n self.to_line()\n\n elif (not self.custom_shape_concat) and tag=='p' and self.parsing['note']==0:\n self.to_line()\n\n elif tag=='list-item':\n self.temp_list[-1][self.list_item[-1]] = {'type': 'list-item',\n 'value': self.temp_lines[-1]}\n \n self.list_item[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list-header':\n self.temp_list[-1][self.list_header[-1]] = {'type': 'list-header',\n 'value': self.temp_lines[-1]}\n self.list_header[-1] += 1\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='list':\n if self.depth_t > 0 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'list',\n 'value': self.temp_list[-1]}\n self.lines += 1\n \n self.temp_list = self.temp_list[:-1]\n self.depth_l -= 1\n self.list_item = self.list_item[:-1]\n\n elif tag=='table-cell':\n if self.custom_shape_concat:\n self.to_line()\n \n if self.leaf_lines:\n self.leaf_lines = False\n\n if len(self.temp_lines) > 0:\n table_idx = 0\n \n for temp_line in self.temp_lines[-1]:\n if self.temp_lines[-1][temp_line]['type'] == 'table':\n table_idx = temp_line\n\n for temp_line in range(table_idx, len(self.temp_lines[-1])):\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.temp_row[-1][self.cols[-1]] = {'rowspan': self.temp_rowspan[-1],\n 'colspan': self.temp_colspan[-1],\n 'value': self.temp_lines[-1]}\n \n self.cols[-1] += 1\n\n self.temp_rowspan = self.temp_rowspan[:-1]\n self.temp_colspan = self.temp_colspan[:-1]\n self.temp_lines = self.temp_lines[:-1]\n self.lines_t = self.lines_t[:-1]\n\n elif tag=='table-row':\n self.temp_table[-1][self.rows[-1]] = self.temp_row[-1]\n\n self.temp_row = self.temp_row[:-1]\n self.rows[-1] += 1\n self.cols = self.cols[:-1]\n\n elif tag=='table':\n caption = self.temp_caption[-1]\n self.temp_caption = self.temp_caption[:-1]\n\n if self.leaf_table:\n self.leaf_table = False\n self.leaf_lines = True\n\n if len(self.temp_lines) > 0:\n for temp_line in self.temp_lines[-1]:\n self.result_dict[self.lines] = self.temp_lines[-1][temp_line]\n self.lines += 1\n \n self.lines_t[-1] = 0\n\n html_string = self.to_html(self.temp_table[-1])\n \n if self.depth_t > 1 or self.depth_l > 1:\n self.temp_lines[-1][self.lines_t[-1]] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines_t[-1] += 1\n else:\n self.result_dict[self.lines] = {'type': 'table',\n 'caption': caption,\n 'number': self.table_number,\n 'html': html_string,\n 'value': self.temp_table[-1]}\n self.lines += 1\n \n self.table_number += 1\n\n self.temp_table = self.temp_table[:-1]\n self.depth_t -= 1\n self.rows = self.rows[:-1]\n\n if tag in self.parsing:\n self.parsing[tag] -= 1\n \n return TreeBuilder.end(self, tag)\n\n def data(self, data):\n if self.parsing['span'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n elif self.parsing['p'] > 0 and self.parsing['note']==0:\n self.temp_text = ''.join([self.temp_text, data])\n \n return TreeBuilder.data(self, data)\n\n def close(self):\n return self.result_dict", "repo_name": "hkyoon94/AGC_task12", "sub_path": "inference/tree_builder.py", "file_name": "tree_builder.py", "file_ext": "py", "file_size_in_byte": 10931, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 3, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.start", "line_number": 161, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 161, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.end", "line_number": 282, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 282, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.TreeBuilder.data", "line_number": 290, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.TreeBuilder", "line_number": 290, "usage_type": "name"}]} +{"seq_id": "3180528167", "text": "import urllib\r\n\r\nfrom odoo import fields, models, api\r\nfrom odoo.exceptions import UserError\r\n\r\nimport smpplib\r\nimport smpplib.gsm\r\nimport smpplib.client\r\nimport smpplib.consts\r\nimport logging\r\n\r\nimport sys\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass Crm_Sms_Manager(models.TransientModel):\r\n _name = \"send.sms.crm\"\r\n _description = \"A Wizard for sending sms messages to CRM\"\r\n\r\n def _default_to(self):\r\n active = self._context.get('active_id')\r\n print(active)\r\n crm = self.env[\"crm.lead\"].browse(active)\r\n number = crm.mobile\r\n if (number == False):\r\n number = crm.phone\r\n return number\r\n\r\n to = fields.Char(string=\"To\", default=_default_to, required=True)\r\n message = fields.Char(string=\"Message\", required=True, size=150)\r\n gateway = fields.Many2one(\"gateway.sms\", string=\"Gateway\", required=True)\r\n\r\n\r\n def send_message_crm(self):\r\n url = self.gateway\r\n msg = self.message\r\n dest = self.to\r\n un = self.gateway.username\r\n pwd = self.gateway.pwd\r\n fr = self.gateway.code\r\n gateway_type = self.gateway.type\r\n send = self.env['send.sms']\r\n if gateway_type == 'http':\r\n send.send_with_http(url, un, pwd, msg, dest, fr)\r\n else:\r\n send.send_with_smpp(url, un, pwd, msg, dest, fr)\r\n return {'type': 'ir.actions.act_window_close'}\r\n", "repo_name": "primeKal/odoo_sms_manager_jasmin", "sub_path": "crm_sms_manager/models/crm_send.py", "file_name": "crm_send.py", "file_ext": "py", "file_size_in_byte": 1408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.models.TransientModel", "line_number": 17, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 17, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 30, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 30, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 32, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "12983829585", "text": "\n\"\"\" @author : Bivek Panthi\n Python file to train the GAN model\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom formatData import loadData\nfrom sklearn.model_selection import train_test_split\nfrom gan import GenAdvNetwork\nimport os\nfrom matplotlib import pyplot as plt\n\nlatent_dim_ = 78\nepochs_ = 1\nbatch_size_ = 32\ntrajectory_size = 78\n\nif __name__==\"__main__\":\n \"\"\"\n Loading data\n Note than we can only specify absolute location of the raw data\n \"\"\"\n molRep2D, energies = loadData(12, \"/home/panthibivek/thesis/GAN_pkg/data/traj.xyz\")\n #split it into training and test set\n X_train, X_test, y_train, y_test = train_test_split(molRep2D,energies, test_size=0.1)\n\n print(\"Training data size:\", X_train.shape)\n print(\"Test data size:\", X_test.shape)\n\n y_train = np.reshape(y_train, (-1, 1))\n X_train = np.array(X_train)\n X_train = X_train.astype(float)\n X_train = np.reshape(X_train, (-1, trajectory_size, 1))\n dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))\n dataset = dataset.shuffle(buffer_size=100).batch(batch_size_)\n\n GAN_model = GenAdvNetwork(latent_dim=latent_dim_, batch_size=batch_size_)\n GAN_model.compile(\n generator_opt=tf.keras.optimizers.Adam(learning_rate=0.001),\n discriminator_opt=tf.keras.optimizers.Adam(learning_rate=0.001),\n disc_loss=tf.keras.losses.BinaryCrossentropy(),\n gen_loss=tf.keras.losses.MAE\n )\n history = GAN_model.fit(dataset, epochs=epochs_)\n\n train_dir = os.path.dirname(os.path.abspath(\"__file__\")) + \"/runs/train/\"\n only_dir = sorted([f for f in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, f))])\n if len(only_dir) > 0:\n last_train_seq_number = int(only_dir[-1][-1])\n else:\n last_train_seq_number = 0\n current_train_dir = train_dir + \"exp\" + str(last_train_seq_number+1)\n os.mkdir(current_train_dir)\n GAN_model.save_weights(current_train_dir + \"/weights/\")\n\n plt.plot(history.history['d_loss'])\n plt.title('Discriminator Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n fig1 = plt.gcf()\n plt.show()\n fig1.savefig(current_train_dir + '/disLoss.png', dpi = 300)\n\n plt.plot(history.history['g_loss'])\n plt.title('Generator Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n fig2 = plt.gcf()\n plt.show()\n fig2.savefig(current_train_dir + '/genLoss.png', dpi = 300)", "repo_name": "panthibivek/Generative-Adversarial-Network-for-Improving-Sampling-of-Molecular-Trajectories", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "formatData.loadData", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gan.GenAdvNetwork", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 39, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.BinaryCrossentropy", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 41, "usage_type": "attribute"}, {"api_name": "tensorflow.keras", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "36405874871", "text": "import os\nfrom flask import Flask, redirect, render_template, request, Response, url_for\n\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index() -> Response:\n \"\"\" Render the index page, which prompts the user for a word \"\"\"\n word = request.values.get('word', None)\n\n if word is None:\n return render_template('index.html')\n else:\n return redirect(url_for('gift', word=str(word)))\n\n\n@app.route('/')\ndef gift(word: str=None) -> Response:\n \"\"\" Render the page that shows the gift, given a seed word \"\"\"\n # Always redirect to a canonical, lower-case representation of the word\n lowerword = word.lower()\n if lowerword == word:\n return render_template('gift.html', word=word)\n else:\n return redirect(url_for('gift', word=lowerword))\n\n\ndef static_reroute(filename: str) -> callable:\n \"\"\" Factory for making view functions that redirect to a static file \"\"\"\n\n # Make a generic redirection function\n def redirected():\n \"\"\" Render the redirected file \"\"\"\n return redirect(url_for('static', filename=filename))\n\n # Get the base of the filename, to use as the function name\n (base, _) = os.path.splitext(filename)\n\n # Rename the function\n redirected.__name__ = base\n\n # Make the route last: if you make it before you rename the function, you'll\n # get conflicts\n return app.route('/' + filename)(redirected)\n\n\nfavicon = static_reroute('favicon.ico')\nbrowserconfig = static_reroute('browserconfig.xml')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "repo_name": "myw/gift", "sub_path": "gift/gift.py", "file_name": "gift.py", "file_ext": "py", "file_size_in_byte": 1481, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 9, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "34000527980", "text": "import requests\n\n#\n# url = 'https://www.sogou.com'\n#\n# resp = requests.get(url=url)\n#\n# page_text = resp.text\n#\n# with open('./sougou.html', 'w', encoding='utf-8') as fp:\n# fp.write(page_text)\n\nkeyword = input('enter akey word')\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/91.0.4472.101 Safari/537.36 '\n}\nparams = {\n 'query': keyword\n}\nurl = 'https://www.sogou.com/web'\nresp = requests.get(url=url, params=params, headers=headers)\nresp.encoding = 'utf-8'\npage_text = resp.text\nfileName = keyword + '.html'\nwith open(fileName, 'w', encoding='utf-8') as fp:\n fp.write(page_text)\nprint(fileName, '爬取完毕!!!')\n", "repo_name": "284497478/pythonProject", "sub_path": "testPython/request_demo.py", "file_name": "request_demo.py", "file_ext": "py", "file_size_in_byte": 730, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "31576394039", "text": "from .base_announcement import BaseAnnouncement\nfrom oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401\nfrom oci.decorators import init_model_state_from_kwargs\n\n\n@init_model_state_from_kwargs\nclass AnnouncementSummary(BaseAnnouncement):\n \"\"\"\n A summary representation of an announcement.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"\n Initializes a new AnnouncementSummary object with values from keyword arguments. The default value of the :py:attr:`~oci.announcements_service.models.AnnouncementSummary.type` attribute\n of this class is ``AnnouncementSummary`` and it should not be changed.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param id:\n The value to assign to the id property of this AnnouncementSummary.\n :type id: str\n\n :param type:\n The value to assign to the type property of this AnnouncementSummary.\n :type type: str\n\n :param reference_ticket_number:\n The value to assign to the reference_ticket_number property of this AnnouncementSummary.\n :type reference_ticket_number: str\n\n :param summary:\n The value to assign to the summary property of this AnnouncementSummary.\n :type summary: str\n\n :param time_one_title:\n The value to assign to the time_one_title property of this AnnouncementSummary.\n :type time_one_title: str\n\n :param time_one_type:\n The value to assign to the time_one_type property of this AnnouncementSummary.\n Allowed values for this property are: \"ACTION_REQUIRED_BY\", \"NEW_START_TIME\", \"ORIGINAL_END_TIME\", \"REPORT_DATE\", \"START_TIME\", \"TIME_DETECTED\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type time_one_type: str\n\n :param time_one_value:\n The value to assign to the time_one_value property of this AnnouncementSummary.\n :type time_one_value: datetime\n\n :param time_two_title:\n The value to assign to the time_two_title property of this AnnouncementSummary.\n :type time_two_title: str\n\n :param time_two_type:\n The value to assign to the time_two_type property of this AnnouncementSummary.\n Allowed values for this property are: \"END_TIME\", \"NEW_END_TIME\", \"ESTIMATED_END_TIME\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type time_two_type: str\n\n :param time_two_value:\n The value to assign to the time_two_value property of this AnnouncementSummary.\n :type time_two_value: datetime\n\n :param services:\n The value to assign to the services property of this AnnouncementSummary.\n :type services: list[str]\n\n :param affected_regions:\n The value to assign to the affected_regions property of this AnnouncementSummary.\n :type affected_regions: list[str]\n\n :param announcement_type:\n The value to assign to the announcement_type property of this AnnouncementSummary.\n Allowed values for this property are: \"ACTION_RECOMMENDED\", \"ACTION_REQUIRED\", \"EMERGENCY_CHANGE\", \"EMERGENCY_MAINTENANCE\", \"EMERGENCY_MAINTENANCE_COMPLETE\", \"EMERGENCY_MAINTENANCE_EXTENDED\", \"EMERGENCY_MAINTENANCE_RESCHEDULED\", \"INFORMATION\", \"PLANNED_CHANGE\", \"PLANNED_CHANGE_COMPLETE\", \"PLANNED_CHANGE_EXTENDED\", \"PLANNED_CHANGE_RESCHEDULED\", \"PRODUCTION_EVENT_NOTIFICATION\", \"SCHEDULED_MAINTENANCE\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type announcement_type: str\n\n :param lifecycle_state:\n The value to assign to the lifecycle_state property of this AnnouncementSummary.\n Allowed values for this property are: \"ACTIVE\", \"INACTIVE\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type lifecycle_state: str\n\n :param is_banner:\n The value to assign to the is_banner property of this AnnouncementSummary.\n :type is_banner: bool\n\n :param time_created:\n The value to assign to the time_created property of this AnnouncementSummary.\n :type time_created: datetime\n\n :param time_updated:\n The value to assign to the time_updated property of this AnnouncementSummary.\n :type time_updated: datetime\n\n :param environment_name:\n The value to assign to the environment_name property of this AnnouncementSummary.\n :type environment_name: str\n\n :param platform_type:\n The value to assign to the platform_type property of this AnnouncementSummary.\n Allowed values for this property are: \"IAAS\", \"SAAS\", 'UNKNOWN_ENUM_VALUE'.\n Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.\n :type platform_type: str\n\n :param chain_id:\n The value to assign to the chain_id property of this AnnouncementSummary.\n :type chain_id: str\n\n \"\"\"\n self.swagger_types = {\n 'id': 'str',\n 'type': 'str',\n 'reference_ticket_number': 'str',\n 'summary': 'str',\n 'time_one_title': 'str',\n 'time_one_type': 'str',\n 'time_one_value': 'datetime',\n 'time_two_title': 'str',\n 'time_two_type': 'str',\n 'time_two_value': 'datetime',\n 'services': 'list[str]',\n 'affected_regions': 'list[str]',\n 'announcement_type': 'str',\n 'lifecycle_state': 'str',\n 'is_banner': 'bool',\n 'time_created': 'datetime',\n 'time_updated': 'datetime',\n 'environment_name': 'str',\n 'platform_type': 'str',\n 'chain_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'type': 'type',\n 'reference_ticket_number': 'referenceTicketNumber',\n 'summary': 'summary',\n 'time_one_title': 'timeOneTitle',\n 'time_one_type': 'timeOneType',\n 'time_one_value': 'timeOneValue',\n 'time_two_title': 'timeTwoTitle',\n 'time_two_type': 'timeTwoType',\n 'time_two_value': 'timeTwoValue',\n 'services': 'services',\n 'affected_regions': 'affectedRegions',\n 'announcement_type': 'announcementType',\n 'lifecycle_state': 'lifecycleState',\n 'is_banner': 'isBanner',\n 'time_created': 'timeCreated',\n 'time_updated': 'timeUpdated',\n 'environment_name': 'environmentName',\n 'platform_type': 'platformType',\n 'chain_id': 'chainId'\n }\n\n self._id = None\n self._type = None\n self._reference_ticket_number = None\n self._summary = None\n self._time_one_title = None\n self._time_one_type = None\n self._time_one_value = None\n self._time_two_title = None\n self._time_two_type = None\n self._time_two_value = None\n self._services = None\n self._affected_regions = None\n self._announcement_type = None\n self._lifecycle_state = None\n self._is_banner = None\n self._time_created = None\n self._time_updated = None\n self._environment_name = None\n self._platform_type = None\n self._chain_id = None\n self._type = 'AnnouncementSummary'\n\n def __repr__(self):\n return formatted_flat_dict(self)\n\n def __eq__(self, other):\n if other is None:\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n return not self == other\n", "repo_name": "oracle/oci-python-sdk", "sub_path": "src/oci/announcements_service/models/announcement_summary.py", "file_name": "announcement_summary.py", "file_ext": "py", "file_size_in_byte": 7959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 345, "dataset": "github-code", "pt": "52", "api": [{"api_name": "base_announcement.BaseAnnouncement", "line_number": 7, "usage_type": "name"}, {"api_name": "oci.util.formatted_flat_dict", "line_number": 178, "usage_type": "call"}, {"api_name": "oci.decorators.init_model_state_from_kwargs", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "37468443789", "text": "import json\nimport logging\nfrom pathlib import Path\nimport typing as tp\n\nfrom retrying import retry\nfrom . import utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass Link:\n \"\"\"\n Connection with Dora for your trainer.\n This is minimalistic and won't do much.\n\n This can also be used to simulate a fake link by passing `None`\n as the history file.\n \"\"\"\n def __init__(self, history_file: tp.Optional[Path] = None):\n \"\"\"\n Initialize the Link with Dora.\n \"\"\"\n self.history: tp.List[dict] = []\n self.history_file = history_file\n\n # Retry operation as history file might be stale for update by running XP\n @retry(stop_max_attempt_number=10)\n def load(self):\n if self.history_file is None:\n return\n if self.history_file.exists():\n history = utils.try_load(self.history_file, load=json.load, mode='r')\n if history is not None:\n self.history = history\n\n def _commit(self):\n if self.history_file is None:\n return\n\n from . import distrib\n if not distrib.is_master():\n return\n with utils.write_and_rename(self.history_file, \"w\") as tmp:\n json.dump(self.history, tmp, indent=2)\n\n def update_history(self, history: tp.List[dict]):\n history = utils.jsonable(history)\n if not isinstance(history, list):\n raise ValueError(f\"history must be a list, but got {type(history)}\")\n self.history[:] = history\n self._commit()\n\n def push_metrics(self, metrics: dict):\n metrics = utils.jsonable(metrics)\n self.history.append(metrics)\n self._commit()\n", "repo_name": "facebookresearch/dora", "sub_path": "dora/link.py", "file_name": "link.py", "file_ext": "py", "file_size_in_byte": 1679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 199, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 33, "usage_type": "attribute"}, {"api_name": "retrying.retry", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 45, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "14825758659", "text": "import os\nimport discord\nfrom dotenv import load_dotenv\nfrom discord.ext import commands\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\nintents = discord.Intents.default()\nintents.members = True\nbot = commands.Bot(command_prefix=\".\", intents=intents)\n\n# build a string then return it of the voters\ndef buildVoteString(name, members):\n message = \"**Votes for {name} - {voteNum}**\".format(name=name, voteNum=len(members))\n for member in members:\n message += (\"\\n\")\n message += member.display_name\n return message\n\n@bot.command(name='getvotes')\nasync def getVotes(ctx):\n # troy for president\n pres = discord.utils.get(ctx.guild.roles, id=768136870537986098).members\n await ctx.send(buildVoteString(\"Troy Lafond for President\", pres))\n vp = discord.utils.get(ctx.guild.roles, id=768136907116511303).members\n await ctx.send(buildVoteString(\"Zachary Recine for Vice President\", vp))\n treasurer = discord.utils.get(ctx.guild.roles, id=768136954251706389).members\n await ctx.send(buildVoteString(\"Anthony Millsci for Treasurer\", treasurer))\n sec1 = discord.utils.get(ctx.guild.roles, id=768136996664639498).members\n await ctx.send(buildVoteString(\"Pierre Demers for Secretary\", sec1))\n sec2 = discord.utils.get(ctx.guild.roles, id=768137052809723906).members\n await ctx.send(buildVoteString(\"Sasha Wilkinson for Secretary\", sec2))\n poli = discord.utils.get(ctx.guild.roles, id=768137090327379968).members\n await ctx.send(buildVoteString(\"Neyder Fernández for Political Outreach Director\", poli))\n\n\n\nbot.run(TOKEN)", "repo_name": "UMass-Lowell-College-Democrats/votebot", "sub_path": "src/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 6, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "discord.Intents.default", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.Intents", "line_number": 10, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 12, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 12, "usage_type": "name"}, {"api_name": "discord.utils.get", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 25, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 27, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 27, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 29, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 29, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 31, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 31, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 33, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 33, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 35, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "72457913764", "text": "import multiprocessing\nimport pandas as pd\nimport os\nimport subprocess\n\n\nclass ClimePredict(object):\n def __init__(self):\n self.input_path = ''\n self.leave_path = ''\n self.prms_path = ''\n self.result_path = ''\n\n def leave_half(self, input_file_path, leave_file_path, pathway):\n\n if not os.path.isdir(input_file_path):\n os.makedirs(input_file_path)\n if not os.path.isdir(leave_file_path):\n os.makedirs(leave_file_path)\n\n input_genes = pd.read_csv(pathway, sep='\\t')\n col_names = input_genes.columns[1]\n pathway = list(input_genes[col_names].unique())\n\n for i in range(len(pathway)):\n df = input_genes[input_genes[col_names].isin([pathway[i]])]\n for j in range(5):\n # get samples\n if df.shape[0] <= 2:\n continue\n sample_num = round(df.shape[0] / 2)\n df_w = df.sample(sample_num,random_state=j+123)\n # leave out genes\n df_leave_out = df.drop(df_w.axes[0])\n\n # leave gene names\n file_name = '{0}_{1}.txt'.format(i, j)\n df_input_file = os.path.join(input_file_path, file_name)\n df_leave_file = os.path.join(leave_file_path, file_name)\n\n df_w.to_csv(df_input_file, sep='\\t', index=False)\n df_leave_out.to_csv(df_leave_file, sep='\\t', index=False)\n self.input_path = input_file_path\n self.leave_path = leave_file_path\n\n def get_prm(self, template_path, prms_out_path, result_out_file):\n\n if not os.path.isdir(result_out_file):\n os.makedirs(result_out_file)\n if not os.path.isdir(prms_out_path):\n os.makedirs(prms_out_path)\n\n template = pd.read_csv(template_path, sep='\\t')\n path = self.input_path\n all_file = list(filter(lambda f: not f.startswith('.'), os.listdir(path)))\n\n for j in all_file:\n input_gene_path = os.path.join(path, j)\n out_path = os.path.join(result_out_file, j)\n write_path = os.path.join(prms_out_path, j.replace('.txt', '.prms'))\n template.iloc[2, 0] = input_gene_path\n\n template.iloc[3, 0] = out_path\n template.to_csv(write_path, sep='\\t')\n self.prms_path = prms_out_path\n self.result_path = result_out_file\n\n\n def run_cmd(self,cmd):\n subprocess.call(cmd, shell=True)\n\n\n def run_clime(self,core,clime_path='clime'):\n prims_path = self.prms_path\n all_prims_file = list(filter(lambda f: not f.startswith('.'), os.listdir(prims_path)))\n all_cmd = []\n\n for i in all_prims_file:\n prims_abs = os.path.join(prims_path, i)\n cmd = clime_path + ' ' + prims_abs + ' 0'\n all_cmd.append(cmd)\n # Do multiple processing\n cores = core\n pool = multiprocessing.Pool(processes=cores)\n # method 1: map\n pool.map(self.run_cmd, all_cmd)\n\n\n\nif __name__ == '__main__':\n foo = ClimePredict()\n foo.leave_half('/home/yangfang/PCSF/input/', '/home/yangfang/PCSF/output',\n '/home/yangfang/PCSF/clime_roc/human.KEGG.txt')\n foo.get_prm('/home/yangfang/PCSF/clime_roc/human_CI_half.prms',\n '/home/yangfang/PCSF/test_re112/prms_all/',\n '/home/yangfang/PCSF/test_re112/result_all/')\n # foo.run_clime(6)", "repo_name": "yangfangs/test_GFICLEE", "sub_path": "clime/clime_predict.py", "file_name": "clime_predict.py", "file_ext": "py", "file_size_in_byte": 3431, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.isdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 53, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 70, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "16496622571", "text": "from rest_framework.serializers import ModelSerializer\n\nfrom rest.models import (Khoa,\n Lop,\n SinhVien,\n MonHoc,\n KetQua)\n\n\nclass SinhVienSerializer(ModelSerializer):\n\n class Meta:\n model = SinhVien\n fields = ('masv',\n 'hoten',\n 'gioitinh',\n 'ngaysinh',\n 'hocbong',\n )\n\n\n\nclass LopSerializer(ModelSerializer):\n sinhvien = SinhVienSerializer(many=True)\n\n class Meta:\n model = Lop\n fields = ('malop',\n 'tenlop',\n 'khoa',\n 'sinhvien',\n )\n\n\nclass KhoaSerializer(ModelSerializer):\n # lop = SerializerMethodField()\n lop = LopSerializer(many=True, read_only=True)\n\n class Meta:\n model = Khoa\n fields = ('makhoa',\n 'tenkhoa',\n 'lop',\n )\n\n # def get_lop(self, obj):\n # return LopSerializer(Lop.objects.filter(khoa=obj), many=True).data\n\n\nclass MonHocSerializer(ModelSerializer):\n class Meta:\n model = MonHoc\n fields = ('mamh',\n 'tenmh',\n 'sotiet',\n )\n\n\nclass KetQuaSerializer(ModelSerializer):\n sinhvien = SinhVienSerializer\n monhoc = MonHocSerializer\n class Meta:\n model = KetQua\n fields = ('sinhvien',\n 'monhoc',\n 'diemthi',\n )\n", "repo_name": "khai-nguyen-dinh/django-app", "sub_path": "django_rest/restfll/rest/api/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1541, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 10, "usage_type": "name"}, {"api_name": "rest.models.SinhVien", "line_number": 13, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 23, "usage_type": "name"}, {"api_name": "rest.models.Lop", "line_number": 27, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 35, "usage_type": "name"}, {"api_name": "rest.models.Khoa", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 50, "usage_type": "name"}, {"api_name": "rest.models.MonHoc", "line_number": 52, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 59, "usage_type": "name"}, {"api_name": "rest.models.KetQua", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "10319220495", "text": "from flask import jsonify, request\n\n# Solution B - If the script importing the module is not in a package\nimport os,sys,inspect\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir) \nimport database as database\nimport utils as utils\n\nclass GroupsNaoExisteException(Exception):\n pass\n\nexemple = {\n \"Id\": 1,\n \"Name\": \"Devs\",\n \"Describe\": \"Group turned for developers\",\n \"ChatType\": \"Group\",\n \"ChatId\": 1,\n \"Date\": \"Wed, 23 Oct 2019 00:12:37 GMT\"\n}\n\ndatabase.local[\"Groups\"] = [exemple]\n\ndef getGroups():\n return jsonify(database.local[\"Groups\"])\n\ndef newGroup(request_json):\n res_group = request_json\n res_group[\"Date\"] = utils.createdDate()\n if('Name' in res_group.keys()):\n for group in database.local[\"Groups\"]:\n if(group['Id'] == res_group['Id']):\n res_group[\"Id\"] = utils.createdId(database.local[\"Groups\"])\n database.local[\"Groups\"].append(res_group)\n return jsonify(database.local[\"Groups\"])\n else:\n return jsonify({'erro':'usuario sem nome'}), 400 ", "repo_name": "MateusArenas/Wasit-PYTHON-API", "sub_path": "Groups/groups.py", "file_name": "groups.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "inspect.getfile", "line_number": 5, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "database.local", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 26, "usage_type": "call"}, {"api_name": "database.local", "line_number": 26, "usage_type": "attribute"}, {"api_name": "utils.createdDate", "line_number": 30, "usage_type": "call"}, {"api_name": "database.local", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.createdId", "line_number": 34, "usage_type": "call"}, {"api_name": "database.local", "line_number": 34, "usage_type": "attribute"}, {"api_name": "database.local", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 36, "usage_type": "call"}, {"api_name": "database.local", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "70048772006", "text": "\"\"\"Using FastAPI to Build Python Web APIs.\"\"\"\n\n# The First API, Step by Step.\n\nfrom typing import Optional\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\n\n@app.get(\"/\")\nasync def read_root():\n return {\"message\": \"Welcome to my web API :)\"}\n\n\n@app.get(\"/users/me\")\nasync def read_user_me():\n return {\"user_id\": \"the current user\"}\n\n\n@app.get(\"/users/{user_id}\")\nasync def read_user(user_id: int):\n return {\"user_id\": user_id}\n\n\n# Data handling with pydantic.\n# Receive path parameters and a request body.\n\n# To declare a request body, you use pydantic models,\n# with all their power and benefits.\n\n\nclass Item(BaseModel):\n name: str\n description: Optional[str] = None\n price: float\n tax: Optional[float] = None\n\n\n# @app.put(\"/items/{item_id}\")\n# async def update_item(item_id: int, item: Item):\n# item_dict = item.model_dump()\n# if item.tax:\n# price_with_tax = item.price + item.tax\n# item_dict.update({\"price_with_tax\": price_with_tax})\n# return {\"item_id\": item_id, **item_dict}\n\n\n@app.post(\"/items/{item_id}\")\nasync def create_item(item_id: int, item: Item):\n return {\n \"item_id\": item_id,\n **item.model_dump(),\n }\n", "repo_name": "sava9ecode/getting_started_with_fastapi", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fastapi.FastAPI", "line_number": 10, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "23788919534", "text": "from datetime import timedelta, datetime\nfrom extractors.g1 import extract_g1_data, load_g1_raw_data\nfrom prefect import task, Flow, Parameter\nfrom prefect.schedules import IntervalSchedule\n\n\n@task(max_retries=1, retry_delay=timedelta(seconds=1))\ndef transform(data):\n print(\"transformed\")\n return \"transformed\"\n\n\n@task(max_retries=1, retry_delay=timedelta(seconds=1))\ndef load_final_dataset(data):\n print(\"dataset loaded\")\n\n\ndef main():\n\n # Scheduled to run every 24 hours\n schedule = IntervalSchedule(\n start_date=datetime.utcnow() + timedelta(seconds=1),\n interval=timedelta(hours=24),\n )\n\n # Main project flow\n with Flow(\"Fake News Data Extractor 2000\", schedule=schedule) as flow:\n\n # Extract\n g1_url = \"https://g1.globo.com/fato-ou-fake/\"\n g1_raw_data = extract_g1_data(g1_url)\n\n # Transform\n fake_news_dataset = transform(g1_raw_data)\n\n # --- Load ---\n\n # ----- Load Raws -----\n load_g1_raw_data(g1_raw_data)\n\n # ----- Load Transformations -----\n\n # ----- Load Final Dataset -----\n load_final_dataset(fake_news_dataset)\n\n flow.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "RamonMachado/FakeNewsFactCheckETL", "sub_path": "job.py", "file_name": "job.py", "file_ext": "py", "file_size_in_byte": 1198, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "prefect.task", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 7, "usage_type": "call"}, {"api_name": "prefect.task", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 13, "usage_type": "call"}, {"api_name": "prefect.schedules.IntervalSchedule", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "prefect.Flow", "line_number": 27, "usage_type": "call"}, {"api_name": "extractors.g1.extract_g1_data", "line_number": 31, "usage_type": "call"}, {"api_name": "extractors.g1.load_g1_raw_data", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "6625487427", "text": "from urllib.request import urlopen\nimport simplejson\nimport time\nfrom datetime import datetime\n\nunix_timestamp = 1601510400 #October 1, 2020\n#amt = (60*60*24*7) #one week\namt = (60*60*12) #1 days\nhalf_month_before = unix_timestamp - amt\n\nf = open('all_20_beyondthebump.txt','w')\nfor i in range(1,(280*2) + 1):\n # print('HALF DAY ' + str(i))\n request = 'https://api.pushshift.io/reddit/submission/search/?size=1200&after=' + str(half_month_before) + '&before=' + str(unix_timestamp) + '&sort_type=score&sort=desc&subreddit=beyondthebump'\n # print(request)\n #print request2\n try:\n json = simplejson.loads(urlopen(request).read()) \n json = json['data']\n num_comments = 0\n if len(json) > 0:\n num_comments = len(json)\n # print(str(num_comments))\n f.write(datetime.fromtimestamp(int(half_month_before)).strftime('%Y-%m-%d') + '--' + datetime.fromtimestamp(int(unix_timestamp)).strftime('%Y-%m-%d') + ' ' + str(num_comments)+'\\n')\n else:\n f.write('0\\n')\n \n unix_timestamp = unix_timestamp - amt\n half_month_before = half_month_before - amt\n time.sleep(0.5) #sleep a bit to not get timeouts\n except Exception as ex:\n print(ex)\n f.close()\n assert(0)\n\n\nf.close()\n \n \n", "repo_name": "bhargavkuchipudi0/independent_study", "sub_path": "src/basic_stats_all.py", "file_name": "basic_stats_all.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "simplejson.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "40666162363", "text": "import io, os\ninput = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline\n\nn = int(input().decode())\nline = []\nfor i in range(n):\n line.append(int(input().decode()))\n\nanswer = 0\n\nstart, end = 0, n-1\n\nwhile start < end:\n #print(\"start:\", start, \"end:\", end)\n start_h = line[start]\n end_h = line[end]\n \n if start_h < line[start+ 1]:\n start += 1\n continue\n if end_h < line[end-1]:\n end -= 1\n continue\n \n broken = False \n for i in range(start+1, end):\n if line[i] > start_h:\n answer += 1\n # print(i, start)\n start = i\n broken = True\n break\n \n if broken:\n continue\n\n for j in range(end-1, start, -1):\n if line[j] > end_h:\n answer +=1\n # print(j, end)\n end = j\n broke = True\n break\n \n if broken:\n continue\n \n if start != end -1:\n answer += 1\n start += 1\n\nprint(answer + n-1)\n", "repo_name": "sreyaaluri/funsies", "sub_path": "ICPC prep/D .py", "file_name": "D .py", "file_ext": "py", "file_size_in_byte": 995, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "io.BytesIO", "line_number": 2, "usage_type": "call"}, {"api_name": "os.read", "line_number": 2, "usage_type": "call"}, {"api_name": "os.fstat", "line_number": 2, "usage_type": "call"}]} +{"seq_id": "28181821344", "text": "from functools import wraps\nimport json\nimport mysql.connector\nfrom flask import make_response, request\nfrom datetime import datetime, timedelta\nimport jwt\nimport re\nfrom config.config import dbconfig\nclass auth_model():\n def __init__(self): #connection establishment code\n try:\n self.con=mysql.connector.connect(host=dbconfig['hostname'], user=dbconfig['username'], password=dbconfig['password'], database=dbconfig['database'])\n self.con.autocommit=True\n self.cur=self.con.cursor(dictionary=True)\n print(\"connection successful auth_model\")\n except:\n print(\"error oooooo\")\n\n def token_auth(self, endpoint=\"\"):\n def inner1(func):\n @wraps(func)\n def inner2(*args):\n endpoint = request.url_rule\n print(endpoint)\n authorization = request.headers.get(\"Authorization\")\n # print (authorization)\n if re.match(\"Bearer *([^ ]+) *$\", authorization, flags=0):\n token = authorization.split(\" \")[1]\n # print(token)\n try:\n jwtdecoded=(jwt.decode(token, \"arn\", algorithms=\"HS256\"))\n # print(jwtdecoded)\n except jwt.ExpiredSignatureError:\n return make_response({\"ERROR\":\"token expired\"}, 401)\n \n role_id = jwtdecoded['payload']['role_id']\n self.cur.execute(f\"SELECT roles FROM accessibility_view WHERE endpoint='{endpoint}'\")\n result = self.cur.fetchall()\n if len(result)>0:\n print(json.loads(result[0]['roles']))\n allowed_roles = json.loads(result[0]['roles'])\n if role_id in allowed_roles:\n return func(*args)\n else:\n return make_response({\"ERROR\":\"invalid roles\"}, 404)\n else:\n return make_response({\"ERROR\":\"unknown endpoint\"}, 404)\n \n else:\n return make_response({\"ERROR\": \"invalid token\"}, 401)\n return inner2\n return inner1", "repo_name": "arnabroy144/sighup-login_fullStack", "sub_path": "signup-login_backend/flask_app/model/auth_model.py", "file_name": "auth_model.py", "file_ext": "py", "file_size_in_byte": 2295, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "mysql.connector.connector.connect", "line_number": 12, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 12, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 12, "usage_type": "name"}, {"api_name": "config.config.dbconfig", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.url_rule", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 25, "usage_type": "name"}, {"api_name": "re.match", "line_number": 27, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 31, "usage_type": "call"}, {"api_name": "jwt.ExpiredSignatureError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.make_response", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 50, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "1051659478", "text": "import json\nimport logging\nimport os\n\nfrom .config import (\n DOCKER_PLUGIN_DIR,\n DOCKER_PLUGIN_CONFIG_PATH,\n FILESTORAGE_MAPPING,\n ETC_DIR,\n ETC_CONFIG_PATH,\n LOG_DIR,\n OPT_DIR,\n PHYSICAL_VOLUME,\n PORT,\n SERVICE_DIR,\n SERVICE_EXEC_START,\n SERVICE_PATH,\n SERVICE_NAME,\n VOLUME_GROUP\n)\nfrom .core import LvmPyError, run_cmd\nfrom .cleanup import cleanup_volumes\n# from .health import run_healthcheck\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_folders():\n logger.info('Creating folders')\n for path in (\n DOCKER_PLUGIN_DIR,\n ETC_DIR,\n LOG_DIR,\n OPT_DIR,\n SERVICE_DIR\n ):\n os.makedirs(path, exist_ok=True)\n\n\ndef stop_service(name=SERVICE_NAME):\n logger.info('Stopping service %s', name)\n run_cmd(['systemctl', 'daemon-reload'])\n try:\n run_cmd(['systemctl', 'stop', name])\n run_cmd(['systemctl', 'disable', name])\n except LvmPyError as e:\n logger.warning('Lvmpy service cannot be stopped %s', e)\n\n\ndef start_service(name=SERVICE_NAME):\n logger.info('Starting service %s', name)\n run_cmd(['systemctl', 'daemon-reload'])\n run_cmd(['systemctl', 'enable', name])\n run_cmd(['systemctl', 'start', name])\n\n\ndef load_btrfs_kernel_module():\n logger.info('Loading btrfs kernel module')\n run_cmd(['modprobe', 'btrfs'])\n\n\ndef generate_systemd_service_config(\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH\n):\n return f\"\"\"\n[Unit]\nDescription=python lvm docker plugin\nConflicts=getty@tty1.service\nAfter=network.target\n\n[Service]\nType=simple\nWorkingDirectory=/opt/docker-lvmpy/\nExecStart={exec_start}\nEnvironmentFile={etc_config_path}\nRestart=on-failure\nKillSignal=SIGINT\nStandardError=syslog\nNotifyAccess=all\n\n[Install]\nWantedBy=multi-user.target\n\"\"\"\n\n\ndef generate_plugin_config(port=PORT):\n return {\n 'Name': 'lvmpy',\n 'Description': 'A simple volume driver for lvm volumes written in python',\n 'Addr': f'http://127.0.0.1:{port}'\n }\n\n\ndef generate_etc_config(block_device, volume_group, filestorage_mapping):\n return '\\n'.join([\n f'PHYSICAL_VOLUME={block_device}',\n f'VOLUME_GROUP={volume_group}',\n f'FILESTORAGE_MAPPING={filestorage_mapping}'\n ])\n\n\ndef generate_config_files(\n block_device=PHYSICAL_VOLUME,\n volume_group=VOLUME_GROUP,\n filestorage_mapping=FILESTORAGE_MAPPING,\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH,\n port=PORT\n):\n logger.info('Generating config files. Exec start [%s]', exec_start)\n\n docker_plugin_config = generate_plugin_config(port=PORT)\n\n with open(DOCKER_PLUGIN_CONFIG_PATH, 'w') as docker_plugin_config_file:\n json.dump(docker_plugin_config, docker_plugin_config_file)\n\n service_config = generate_systemd_service_config(\n exec_start=exec_start,\n etc_config_path=etc_config_path\n )\n\n with open(SERVICE_PATH, 'w') as service_file:\n service_file.write(service_config)\n\n etc_config = generate_etc_config(\n block_device=block_device,\n volume_group=volume_group,\n filestorage_mapping=filestorage_mapping\n )\n with open(ETC_CONFIG_PATH, 'w') as etc_config_file:\n etc_config_file.write(etc_config)\n\n\ndef setup(\n service_name=SERVICE_NAME,\n block_device=PHYSICAL_VOLUME,\n volume_group=VOLUME_GROUP,\n filestorage_mapping=FILESTORAGE_MAPPING,\n exec_start=SERVICE_EXEC_START,\n etc_config_path=ETC_CONFIG_PATH,\n port=PORT\n):\n stop_service(name=service_name)\n load_btrfs_kernel_module()\n cleanup_volumes(\n block_device=block_device,\n volume_group=volume_group\n )\n create_folders()\n generate_config_files(\n block_device=block_device,\n volume_group=volume_group,\n filestorage_mapping=filestorage_mapping,\n exec_start=exec_start,\n etc_config_path=etc_config_path,\n port=port\n )\n start_service(name=service_name)\n # run_healthcheck(vg=volume_group)\n\n\ndef main():\n print('Setting up docker-lvmpy server')\n setup()\n print('Setup of docker-lvmpy completed')\n\n\nif __name__ == '__main__':\n main()\n", "repo_name": "skalenetwork/docker-lvmpy", "sub_path": "src/install.py", "file_name": "install.py", "file_ext": "py", "file_size_in_byte": 4156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "config.DOCKER_PLUGIN_DIR", "line_number": 32, "usage_type": "name"}, {"api_name": "config.ETC_DIR", "line_number": 33, "usage_type": "name"}, {"api_name": "config.LOG_DIR", "line_number": 34, "usage_type": "name"}, {"api_name": "config.OPT_DIR", "line_number": 35, "usage_type": "name"}, {"api_name": "config.SERVICE_DIR", "line_number": 36, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 38, "usage_type": "call"}, {"api_name": "config.SERVICE_NAME", "line_number": 41, "usage_type": "name"}, {"api_name": "core.run_cmd", "line_number": 43, "usage_type": "call"}, {"api_name": "core.run_cmd", "line_number": 45, "usage_type": "call"}, {"api_name": "core.run_cmd", "line_number": 46, "usage_type": "call"}, {"api_name": "core.LvmPyError", "line_number": 47, "usage_type": "name"}, {"api_name": "config.SERVICE_NAME", "line_number": 51, "usage_type": "name"}, {"api_name": "core.run_cmd", "line_number": 53, "usage_type": "call"}, {"api_name": "core.run_cmd", "line_number": 54, "usage_type": "call"}, {"api_name": "core.run_cmd", "line_number": 55, "usage_type": "call"}, {"api_name": "core.run_cmd", "line_number": 60, "usage_type": "call"}, {"api_name": "config.SERVICE_EXEC_START", "line_number": 64, "usage_type": "name"}, {"api_name": "config.ETC_CONFIG_PATH", "line_number": 65, "usage_type": "name"}, {"api_name": "config.PORT", "line_number": 88, "usage_type": "name"}, {"api_name": "config.PHYSICAL_VOLUME", "line_number": 105, "usage_type": "name"}, {"api_name": "config.VOLUME_GROUP", "line_number": 106, "usage_type": "name"}, {"api_name": "config.FILESTORAGE_MAPPING", "line_number": 107, "usage_type": "name"}, {"api_name": "config.SERVICE_EXEC_START", "line_number": 108, "usage_type": "name"}, {"api_name": "config.ETC_CONFIG_PATH", "line_number": 109, "usage_type": "name"}, {"api_name": "config.PORT", "line_number": 110, "usage_type": "name"}, {"api_name": "config.PORT", "line_number": 114, "usage_type": "name"}, {"api_name": "config.DOCKER_PLUGIN_CONFIG_PATH", "line_number": 116, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 117, "usage_type": "call"}, {"api_name": "config.SERVICE_PATH", "line_number": 124, "usage_type": "argument"}, {"api_name": "config.ETC_CONFIG_PATH", "line_number": 132, "usage_type": "argument"}, {"api_name": "config.SERVICE_NAME", "line_number": 137, "usage_type": "name"}, {"api_name": "config.PHYSICAL_VOLUME", "line_number": 138, "usage_type": "name"}, {"api_name": "config.VOLUME_GROUP", "line_number": 139, "usage_type": "name"}, {"api_name": "config.FILESTORAGE_MAPPING", "line_number": 140, "usage_type": "name"}, {"api_name": "config.SERVICE_EXEC_START", "line_number": 141, "usage_type": "name"}, {"api_name": "config.ETC_CONFIG_PATH", "line_number": 142, "usage_type": "name"}, {"api_name": "config.PORT", "line_number": 143, "usage_type": "name"}, {"api_name": "cleanup.cleanup_volumes", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "34157793446", "text": "import matplotlib\nmatplotlib.use(\"Agg\")\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nwarnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\nimport numpy as np\n\n# import the necessary packages\n#from sklearn.preprocessing import LabelBinarizer\n#from sklearn.metrics import classification_report\nfrom config import cats_vs_dogs_config as config\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport argparse\nimport glob\nimport sys\n\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--pathname\", default=config.PROJ_JPG_DIR, help=\"path to the dataset\")\nargs = vars(ap.parse_args())\n\npath_root = args[\"pathname\"] # root path name of dataset\n\nif (not os.path.exists(path_root)): # create \"path_root\" directory if it does not exist\n #os.mkdir(path_root)\n print(\"ERROR: you need the directory with the jpg files\")\n sys.exit(0)\n\n\n\n# ##################################################################################################\n\n#Size of images\nIMAGE_WIDTH = 256 #227\nIMAGE_HEIGHT = 256 #227\n\n# ##################################################################################################\n\n\ndef resize_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT):\n img = cv2.resize(img, (img_width, img_height), interpolation = cv2.INTER_CUBIC)\n return img\n\n\n# ##################################################################################################\n\n\nlabelNames = [\"cat\", \"dog\", \"others\"]\n\nimages_path = [img for img in glob.glob(path_root + \"/*/*.jpg\")]\n\n\n# ##################################################################################################\nprint(\"BUILD THE VALIDATION SET with 4000 images: 2000 per each class\")\n\nwrk_dir = path_root + \"/val\"\n\nif (not os.path.exists(wrk_dir)): # create \"val\" directory if it does not exist\n os.mkdir(wrk_dir)\n\nf_test = open(wrk_dir+\"/validation.txt\", \"w\") #open file valid.txt\"\nf_lab = open(wrk_dir+\"/labels.txt\", \"w\") #open file labels.txt\"\nfor s in [0,1,2]:\n string = \"%s\\n\" % labelNames[s]\n f_lab.write(string)\nf_lab.close()\n\ncounter = [-1,-1, 0]\n\nval_count = 0\n\nfor in_idx, img_path in enumerate(images_path):\n\n #print(\"DBG: now processing image \", img_path)\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = resize_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)\n image2 = img.astype(\"int\")\n if \"/jpg/cats/\" in img_path:\n label = 0\n filename = img_path.split(\"/cats/\")[1]\n elif \"/jpg/dogs/\" in img_path:\n label = 1\n filename = img_path.split(\"/dogs/\")[1]\n else: # other\n label = 2\n filename = \"others.jpg\"\n print(\"ERROR: your path name does not contain '/jpg/' \")\n print(img_path)\n sys.exit(0)\n\n\n counter[ label ] = counter[ label ] +1;\n\n if (counter[ label ] <= 10499) : #skip the first 10500 images of each class and take the last 2000\n continue\n\n val_count = val_count + 1\n string = \"%05d\" % counter[ label ]\n\n class_name = labelNames[label]\n\n path_name = wrk_dir + \"/\" + class_name\n\n if (not os.path.exists(path_name)): # create directory if it does not exist\n os.mkdir(path_name) #https://github.com/BVLC/caffe/issues/3698\n\n path_name = wrk_dir + \"/\" + class_name + \"/\" + filename\n\n string = \" %1d\" % label\n f_test.write(path_name + string + \"\\n\")\n\n cv2.imwrite(path_name, image2)\n\n print(path_name)\n\n\n\nf_test.close()\n\n\n# ##################################################################################################\nprint(\"BUILD THE TEST SET with 1000 images of size 227 x 277\")\n\n\nwrk_dir = path_root + \"/test\"\n\nif (not os.path.exists(wrk_dir)): # create \"test\" directory if it does not exist\n os.mkdir(wrk_dir)\n\nf_test = open(wrk_dir+\"/test.txt\", \"w\") #open file test.txt\"\nf_lab = open(wrk_dir+\"/labels.txt\", \"w\") #open file labels.txt\"\nfor s in [0,1,2]:\n string = \"%s\\n\" % labelNames[s]\n f_lab.write(string)\nf_lab.close()\n\ncounter = [-1, -1, 0]\n\n\ntest_count = -1\nfor in_idx, img_path in enumerate(images_path):\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = resize_img(img, img_width=227, img_height=227)\n image2 = img.astype(\"int\")\n if \"/jpg/cats/\" in img_path:\n label = 0\n filename = img_path.split(\"/cats/\")[1]\n elif \"/jpg/dogs/\" in img_path:\n label = 1\n filename = img_path.split(\"/dogs/\")[1]\n else: # other\n label = 2\n filename = \"others.jpg\"\n print(\"ERROR: your path name does not contain '/jpg/' \")\n sys.exit(0)\n\n\n counter[ label ] = counter[ label ] +1;\n\n if (counter[ label ] <= 9999) or (counter[ label ] > 10499) : #take the images from 10000 to 10500\n continue\n\n test_count = test_count +1\n string = \" %04d\" % test_count\n\n class_name = labelNames[label]\n\n path_name = wrk_dir + \"/\" + filename\n\n f_test.write(path_name + string + \"\\n\")\n\n cv2.imwrite(path_name, image2)\n #cv2.imshow(labelNames[label], image2)\n #cv2.waitKey(0)\n\n print(path_name)\n\nf_test.close()\nprint(\"Test set contains \", test_count+1, \" images\")\n\n\n# ##################################################################################################\nprint(\"BUILD THE TRAIN IMAGES SET with 20000 images\")\n\n\nwrk_dir = path_root + \"/train\"\n\nif (not os.path.exists(wrk_dir)): # create \"train\" directory if it does not exist\n os.mkdir(wrk_dir)\n\nf_test = open(wrk_dir + \"/train.txt\", \"w\") #open file test.txt\"\nf_lab = open(wrk_dir + \"/labels.txt\", \"w\") #open file labels.txt\"\nfor s in [0,1,2]:\n string = \"%s\\n\" % labelNames[s]\n f_lab.write(string)\nf_lab.close()\n\ncounter = [-1,-1,0]\ntrain_count = 0\n\nfor in_idx, img_path in enumerate(images_path):\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = resize_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)\n image2 = img.astype(\"int\")\n if \"/jpg/cats/\" in img_path:\n label = 0\n filename = img_path.split(\"/cats/\")[1]\n elif \"/jpg/dogs/\" in img_path:\n label = 1\n filename = img_path.split(\"/dogs/\")[1]\n else: # other\n print(img_path)\n label = 2\n filename = \"others.jpg\"\n print(\"ERROR: your path name does not contain '/jpg/' \")\n sys.exit(0)\n\n\n counter[ label ] = counter[ label ] +1;\n\n if (counter[ label ] > 9999) : #skip images after the first 10000\n continue\n\n train_count = train_count +1\n\n string = \"%05d\" % counter[ label ]\n\n class_name = labelNames[label]\n\n path_name = wrk_dir + \"/\" + class_name\n\n if (not os.path.exists(path_name)): # create directory if it does not exist\n os.mkdir(path_name)\n\n path_name = wrk_dir + \"/\" + class_name + \"/\" + filename\n\n string = \" %1d\" % label\n f_test.write(path_name + string + \"\\n\")\n\n cv2.imwrite(path_name, image2)\n #cv2.imshow(labelNames[label], image2)\n #cv2.waitKey(0)\n\n #print(path_name)\n\nf_test.close()\n\n# ##################################################################################################\nprint(\"BUILD THE CALIBRATION IMAGES SET with 200 images\")\n\n\nwrk_dir = path_root + \"/calib\"\n\nif (not os.path.exists(wrk_dir)): # create \"calibration\" directory if it does not exist\n os.mkdir(wrk_dir)\n\nf_calib = open(wrk_dir + \"/calibration.txt\", \"w\") #open file calibration.txt\"\nfor s in [0,1,2]:\n string = \"%s\\n\" % labelNames[s]\n\ncounter = [-1,-1,0]\n\n\ncalib_count = -1\nfor in_idx, img_path in enumerate(images_path):\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = resize_img(img, img_width=IMAGE_WIDTH, img_height=IMAGE_HEIGHT)\n image2 = img.astype(\"int\")\n if \"/jpg/cats/\" in img_path:\n label = 0\n filename = img_path.split(\"/cats/\")[1]\n elif \"/jpg/dogs/\" in img_path:\n label = 1\n filename = img_path.split(\"/dogs/\")[1]\n else: # other\n label = 2\n filename = \"others.jpg\"\n print(\"ERROR: your path name does not contain '/jpg/' \")\n sys.exit(0)\n\n\n counter[ label ] = counter[ label ] +1;\n\n if (counter[ label ] > 99) : #take only the first 100 images per each class\n continue\n\n calib_count = calib_count + 1\n string = \"%05d\" % counter[ label]\n\n class_name = labelNames[ label ]\n\n path_name = wrk_dir + \"/\" + class_name\n\n if (not os.path.exists(path_name)): # create directory if it does not exist\n os.mkdir(path_name)\n\n path_name = wrk_dir + \"/\" + class_name + \"/\" + filename\n string2 = \" %1d\" % int(calib_count)\n f_calib.write(class_name + \"/\" + filename + string2 + \"\\n\")\n\n cv2.imwrite(path_name, image2)\n #cv2.imshow(labelNames[int(testY[int(i)])], image2)\n #cv2.waitKey(0)\n\n #print(path_name)\n\nf_calib.close()\n\nprint(\"Train set contains \", train_count, \" images\")\nprint(\"Validation set contains \", val_count, \" images\")\nprint(\"Calibrationset contains \", calib_count+1, \" images\")\nprint(\"END\\n\")\n", "repo_name": "Xilinx/Vitis-In-Depth-Tutorial", "sub_path": "Machine_Learning/Design_Tutorials/01-caffe_cats_vs_dogs/files/caffe/code/1_write_cats-vs-dogs_images.py", "file_name": "1_write_cats-vs-dogs_images.py", "file_ext": "py", "file_size_in_byte": 8924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 114, "dataset": "github-code", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 2, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 5, "usage_type": "call"}, {"api_name": "warnings.filterwarnings", "line_number": 6, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "config.cats_vs_dogs_config.PROJ_JPG_DIR", "line_number": 24, "usage_type": "attribute"}, {"api_name": "config.cats_vs_dogs_config", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 46, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 46, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 111, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 149, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 196, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 210, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 210, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 241, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 248, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 263, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 275, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 303, "usage_type": "call"}, {"api_name": "os.path", "line_number": 303, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 304, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 310, "usage_type": "call"}]} +{"seq_id": "21519051634", "text": "import speech_recognition as sr\nimport pyaudio\nfrom pynput import keyboard\n\nrecognizer = sr.Recognizer()\n\n\n# region Convert microphone speech to text\ndef start_listening_to_voice():\n with sr.Microphone() as source:\n recognizer.adjust_for_ambient_noise(source)\n print(\"Start recording\")\n data = recognizer.listen(source)\n print(\"End recording\")\n text = recognizer.recognize_google(data, language=\"en-US\", show_all=True)\n print(text)\n # endregion\n\n\ndef on_press(key):\n try:\n print(\"alphanumerical key {0} pressed\".format(key.char))\n except AttributeError:\n print(\"special key {0} pressed\".format(key))\n if key == keyboard.Key.f11:\n print(\"Detected f11\")\n start_listening_to_voice()\n\n\ndef on_release(key):\n print('{0} released'.format(\n key))\n if key == keyboard.Key.esc:\n # Stop listener\n return False\n\n\nwith keyboard.Listener(on_press=on_press, on_release=on_release) as listener:\n listener.join()\n\n\n\n\n", "repo_name": "CasJanse/Personal-Assistant", "sub_path": "VoiceRecognition.py", "file_name": "VoiceRecognition.py", "file_ext": "py", "file_size_in_byte": 1028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "speech_recognition.Recognizer", "line_number": 5, "usage_type": "call"}, {"api_name": "speech_recognition.Microphone", "line_number": 10, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 25, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pynput.keyboard", "line_number": 33, "usage_type": "name"}, {"api_name": "pynput.keyboard.Listener", "line_number": 38, "usage_type": "call"}, {"api_name": "pynput.keyboard", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "14021343842", "text": "\nfrom .nvd3 import NVD3\nfrom flask import jsonify, request\nimport numpy as np\n\nclass TwoAxisFocus(NVD3):\n _allowed_axes = [\"sigma\", \"minmax\"]\n def __init__(self, x, y1, y2, data_source, init_params={},\n chart_id=\"new_chart\", url=\"/new_chart/\", colors=[], auto_scale=\"sigma\",\n y1_axis_range=[], y2_axis_range=[], sigma=3,\n x_label=\"\", y1_label=\"\", y2_label=\"\",\n margin={\"top\": 30, \"right\": 60, \"bottom\": 50, \"left\": 70}):\n\n self.x = x\n self.y1 = y1\n self.y2 = y2\n self.auto_scale = auto_scale if auto_scale in self._allowed_axes else \"sigma\"\n self.sigma = 3\n self.y1_axis_range = y1_axis_range\n self.y2_axis_range = y2_axis_range\n\n self.options = {\n \"type\": \"TwoAxisFocus\",\n \"chartid\": chart_id,\n \"url\": url,\n \"colors\": colors,\n \"init_params\": init_params,\n \"labels\": {\n \"xAxis\": x_label,\n \"yAxis1\": y1_label,\n \"yAxis2\": y2_label\n },\n \"margin\": margin,\n \"type\": \"TwoAxisFocus\"\n }\n def get_data():\n args = {}\n for c in init_params:\n if request.args.get(c):\n args[c] = request.args[c]\n else:\n args[c] = init_params[c]\n return jsonify(self.to_json(\n self.apply_filters(data_source, args)\n ))\n\n super(TwoAxisFocus, self).__init__(self.options, get_data)\n\n def get_bounds(self, y, method=\"sigma\"):\n if self.auto_scale == \"sigma\":\n m_, s_ = y.mean(), y.std()\n l = m_ - self.sigma*s_\n u = m_ + self.sigma*s_\n else:\n l = y.min()\n u = y.max()\n return [l, u]\n\n def to_json(self, df):\n if df.empty:\n return {\n \"data\": [],\n \"yAxis1\": {\"lower\": 0, \"upper\": 1},\n \"yAxis2\": {\"lower\": 0, \"upper\": 1}\n }\n\n if not self.y1_axis_range:\n bounds1 = self.get_bounds(df[self.y1], method=self.auto_scale)\n else:\n bounds1 = self.y1_axis_range\n\n if not self.y2_axis_range:\n bounds2 = self.get_bounds(df[self.y2], method=self.auto_scale)\n else:\n bounds2 = self.y2_axis_range\n\n records = [\n {\"key\": self.y1, \"values\": [], \"yAxis\": 1, \"type\": \"line\"},\n {\"key\": self.y2, \"values\": [], \"yAxis\": 2, \"type\": \"line\"}\n ]\n\n for n, r in df.iterrows():\n records[0][\"values\"].append({\"x\": r[self.x], \"y\": r[self.y1]})\n records[1][\"values\"].append({\"x\": r[self.x], \"y\": r[self.y2]})\n\n return {\n \"data\": records,\n \"yAxis1\": {\"bounds\": bounds1},\n \"yAxis2\": {\"bounds\": bounds2}\n }\n", "repo_name": "adrian-bigrentz/pyxley", "sub_path": "pyxley/charts/nvd3/two_axis_focus.py", "file_name": "two_axis_focus.py", "file_ext": "py", "file_size_in_byte": 2870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "nvd3.NVD3", "line_number": 6, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "86407800721", "text": "import json\nimport os\n\nlist1 = [];\nlist2 = [];\nif os.path.exists(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\data.json\"):\n\tfout = open(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\data.json\", \"r+\")\nelse:\n\tfout = open(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\data.json\", \"w+\")\nfor i in range(1,11):\n\tfini = open(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\\\\"+str(i)+\".txt\", \"r+\")\n\td = {}\n\td['name'] =\"Person\"+str(i) + \" (\" +fini.readline()[:-1] +\")\"\n\td['group'] =i\n\tlist1.append(d)\n\nfor i in range(0,10):\n\tfor j in range(i+1,10):\n\t\tcounter = 0;\n\t\tfini = open(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\\\\"+str(i+1)+\".txt\", \"r+\").readlines()\n\t\tfinj = open(r\"C:\\Users\\USER\\Desktop\\Spring2016\\5544\\Lab3\\Connections\\\\\"+str(j+1)+\".txt\", \"r+\").readlines()\n\t\tfor k in fini:\n\t\t\tfor l in finj:\n\t\t\t\tif (k==l): counter = counter + 1\n\t\td={}\n\t\td['source'] =i\n\t\td['target'] =j\n\t\td['value'] =counter \n\t\tlist2.append(d)\n\njson.dump( {\"nodes\":list1,\"edges\":list2}, fout, indent = 2)", "repo_name": "worm6206/5544lab3All", "sub_path": "Connections/ConnectEdges.py", "file_name": "ConnectEdges.py", "file_ext": "py", "file_size_in_byte": 1030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "11148733279", "text": "# https://towardsdatascience.com/types-of-convolutions-in-deep-learning-717013397f4d\n# https://towardsdatascience.com/the-evolution-of-deeplab-for-semantic-segmentation-95082b025571\n# What is CRF, try this\n# https://towardsdatascience.com/review-crf-rnn-conditional-random-fields-as-recurrent-neural-networks-semantic-segmentation-a11eb6e40c8c\n# https://github.com/lucasb-eyer/pydensecrf crf kütüphanesi\n# FCN Semantic Segmentation\n# https://towardsdatascience.com/review-fcn-semantic-segmentation-eb8c9b50d2d1\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nclass ASPP(nn.Module):\n def __init__(self,num_classes):\n super().__init__()\n\n self.conv_1x1_1 = nn.Conv2d(512, 256, kernel_size=1)\n self.bn_conv_1x1_1 = nn.BatchNorm2d(256)\n\n self.conv_3x3_1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=6, dilation=6)\n self.bn_conv_3x3_1 = nn.BatchNorm2d(256)\n\n self.conv_3x3_2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=12, dilation=12)\n self.bn_conv_3x3_2 = nn.BatchNorm2d(256)\n\n self.conv_3x3_3 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=18, dilation=18)\n self.bn_conv_3x3_3 = nn.BatchNorm2d(256)\n\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n\n self.conv_1x1_2 = nn.Conv2d(512,256,kernel_size=1)\n self.bn_conv_1x1_2 = nn.BatchNorm2d(256)\n\n self.conv_1x1_3 = nn.Conv2d(1280, 256, kernel_size=1) # (1280 = 5*256)\n self.bn_conv_1x1_3 = nn.BatchNorm2d(256)\n\n self.conv_1x1_4 = nn.Conv2d(256, num_classes, kernel_size=1)\n\n\n\n def forward(self, feature_map):\n # (feature_map has shape (batch_size, 512, h/16, w/16)) (assuming self.resnet is ResNet18_OS16 or ResNet34_OS16. \n # If self.resnet instead is ResNet18_OS8 or ResNet34_OS8, it will be (batch_size, 512, h/8, w/8))\n\n feature_map_h = feature_map.size()[2] # (== h/16)\n feature_map_w = feature_map.size()[3] # (== w/16)\n\n out_1x1 = F.relu(self.bn_conv_1x1_1(self.conv_1x1_1(feature_map))) # (shape: (batch_size, 256, h/16, w/16))\n out_3x3_1 = F.relu(self.bn_conv_3x3_1(self.conv_3x3_1(feature_map))) # (shape: (batch_size, 256, h/16, w/16))\n out_3x3_2 = F.relu(self.bn_conv_3x3_2(self.conv_3x3_2(feature_map))) # (shape: (batch_size, 256, h/16, w/16))\n out_3x3_3 = F.relu(self.bn_conv_3x3_3(self.conv_3x3_3(feature_map))) # (shape: (batch_size, 256, h/16, w/16))\n\n out_img = self.avg_pool(feature_map) # (shape: (batch_size, 512, 1, 1))\n out_img = F.relu(self.bn_conv_1x1_2(self.conv_1x1_2(out_img))) # (shape: (batch_size, 256, 1, 1))\n out_img = F.upsample(out_img, size=(feature_map_h, feature_map_w), mode=\"bilinear\") # (shape: (batch_size, 256, h/16, w/16))\n\n out = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_img], 1) # (shape: (batch_size, 1280, h/16, w/16))\n out = F.relu(self.bn_conv_1x1_3(self.conv_1x1_3(out))) # (shape: (batch_size, 256, h/16, w/16))\n out = self.conv_1x1_4(out) # (shape: (batch_size, num_classes, h/16, w/16))\n\n return out\n\n\nclass DeepLabV3(nn.Module):\n def __init__(self,model_id,project_dir):\n super(DeepLabV3,self).__init__()\n\n self.num_classes = 20\n\n self.model_id = model_id\n self.project_dir = project_dir\n self.create_model_dirs()\n\n\n self.resnet = ResNet18_OSN()\n self.aspp = ASPP(num_classes=self.num_classes)\n\n \n def forward(self,x):\n\n h = x.size()[2]\n w = x.size()[3]\n\n feature_map = self.resnet(x)\n\n output = self.aspp(feature_map)\n\n output = F.upsample(output,size = (h,w), mode=\"bilinear\")\n\n return output\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "burakalperen/Pytorch-Semantic-Segmentation", "sub_path": "DeepLab/DeepLab_v3.py", "file_name": "DeepLab_v3.py", "file_ext": "py", "file_size_in_byte": 3694, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.functional.upsample", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 58, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 64, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.functional.upsample", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 88, "usage_type": "name"}]} +{"seq_id": "14546388800", "text": "from collections import defaultdict\nfrom functools import partial, update_wrapper, total_ordering\nfrom enum import IntEnum\n\nfrom ...fingerprint import fingerprint\nfrom ...cosette.task import TaskStatus, close_dependency_graph\nfrom ...cosette.use import Use\nfrom ..test import Test, TestResult\nfrom ..eval_test_task import EvalTestTask\nfrom ...eponine.browser import Index\nfrom ... import LOGGER\n\n\ndef stats_worker(test_fn, name, description, tasks, **kwargs):\n '''Function creating the test for all the required tasks (summary tests of\n test tasks or summary tests on all tasks for example).\n\n :param test_fn: function generating the test to apply to tasks\n :param str name: the name of the stat task\n :param str description: its description\n :param tasks: the list of tasks to be tested.\n :type tasks: list(Task)\n :returns: an :class:`~.EvalTestTask` that evaluates the diagnostic test.\n :rtype: EvalTestTask\n '''\n test_fn.__name__ = name + '.stats'\n test_fn.__qualname__ = test_fn.__qualname__[-10] + name + '.stats'\n inj_args = [(task, None) for task in tasks]\n wrapped = partial(test_fn, name=name, description=description, **kwargs)\n update_wrapper(wrapped, test_fn)\n use_create_test = Use(inj_args=inj_args, wrapped=wrapped, deps_type='soft')\n return EvalTestTask.from_test_task(use_create_test.get_task())\n\n\ndef task_stats(*, name, description='', labels=None, tasks):\n '''Create a :class:`TestStatsTasks` from a list of tasks.\n\n The :class:`TestStatsTasks` class must be instantiated with the list of\n task results, which are not available to the user when the tests are\n specified in the job file. Therefore, the creation of the\n :class:`TestStatsTasks` must be delayed until the other tasks have finished\n and their results are available in the environment. For this purpose it is\n necessary to wrap the instantiation of :class:`TestStatsTasks` in a\n :class:`~.Use` wrapper, and evaluate the resulting test using a\n :class:`~.EvalTestTask`.\n\n This function hides this bit of complexity from the user. Assume you have a\n list of tasks that you would like to produce statistics about (we will use\n :class:`~.DelayTask` objects for our example):\n\n >>> from valjean.cosette.task import DelayTask\n >>> my_tasks = [DelayTask(1), DelayTask(3), DelayTask(0.2)]\n\n Here is how you make a :class:`TestStatsTasks`:\n\n >>> stats = task_stats(name='delays', tasks=my_tasks)\n >>> from valjean.gavroche.eval_test_task import EvalTestTask\n >>> isinstance(stats, EvalTestTask)\n True\n >>> print(stats.depends_on)\n {Task('delays.stats')}\n >>> create_stats = next(task for task in stats.depends_on)\n\n Here `create_stats` is the task that actually creates the\n :class:`TestStatsTasks`. It soft-depends on the tasks in `my_tasks`:\n\n >>> [task in create_stats.soft_depends_on for task in my_tasks]\n [True, True, True]\n\n The reason why the dependency is soft is that we want to collect statistics\n about the task outcome in any case, even (especially!) if some of the tasks\n failed.\n\n :param str name: the name of the task to create.\n :param str description: its description.\n :param tasks: the list of tasks to be tested.\n :type tasks: list(Task)\n :returns: an :class:`~.EvalTestTask` that evaluates the diagnostic test.\n :rtype: EvalTestTask\n '''\n def create_test(*task_results, name, description, labels):\n return [TestStatsTasks(name=name, description=description,\n labels=labels, task_results=task_results)]\n\n return stats_worker(create_test, name=name, description=description,\n labels=labels, tasks=close_dependency_graph(tasks))\n\n\n@total_ordering\nclass NameFingerprint:\n '''A small helper class to store a name and an optional fingerprint for the\n referenced item.'''\n\n def __init__(self, name, fingerprint=None):\n self.name = name\n self.fingerprint = fingerprint\n\n def __str__(self):\n return str(self.name)\n\n def __repr__(self):\n return repr(self.name)\n\n def __eq__(self, other):\n return (self.name == other.name\n and self.fingerprint == other.fingerprint)\n\n def __lt__(self, other):\n if self.name != other.name:\n return self.name < other.name\n return self.fingerprint < other.fingerprint\n\n\nclass TestStatsTasks(Test):\n '''A test that evaluates statistics about the success/failure status of the\n given tasks.\n '''\n\n def __init__(self, *, name, description='', labels=None, task_results):\n '''Instantiate a :class:`TestStatsTasks`.\n\n :param str name: the test name.\n :param str description: the test description.\n :param task_results: a list of task results, intended as the contents\n of the environment sections associated with the executed tasks.\n This test notably inspects the ``'status'`` key to see if the task\n succeeded.\n :type task_results: list(dict(str, *stuff*))\n '''\n super().__init__(name=name, description=description, labels=labels)\n self.task_results = task_results\n\n def evaluate(self):\n '''Evaluate this test and turn it into a :class:`TestResultStatsTasks`.\n '''\n status_dict = defaultdict(list)\n for task_name, task_result in self.task_results:\n name_fing = NameFingerprint(task_name)\n status_dict[task_result['status']].append(name_fing)\n return TestResultStatsTasks(test=self, classify=status_dict)\n\n def data(self):\n '''Generator yielding objects supporting the buffer protocol that (as a\n whole) represent a serialized version of `self`.'''\n yield from super().data()\n yield self.__class__.__name__.encode('utf-8')\n for task_name, _task_result in self.task_results:\n yield task_name.encode('utf-8')\n\n\nclass TestResultStatsTasks(TestResult):\n '''The result of the evaluation of a :class:`TestStatsTasks`. The test is\n considered successful if all the observed tasks have successfully completed\n (``TaskStatus.DONE``).\n '''\n\n def __init__(self, *, test, classify):\n '''Instantiate a :class:`TestResultStatsTasks`.\n\n :param TestStatsTasks test: the test producing this result.\n :param classify: a dictionary mapping the task status to the list of\n task names with the given status.\n :type classify: dict(TaskStatus, list(str))\n '''\n super().__init__(test=test)\n self.classify = classify\n\n def __bool__(self):\n '''Returns `True` if all the observed tests have succeeded.'''\n return TaskStatus.DONE in self.classify and len(self.classify) == 1\n\n\nclass TestOutcome(IntEnum):\n '''An enumeration that represents the possible outcomes of a test:\n\n `SUCCESS`\n represents tests that have been evaluated and have succeeded;\n\n `FAILURE`\n represents tests that have been evaluated and have failed;\n\n `MISSING`\n represents tasks that did not generate any ``'result'`` key;\n\n `NOT_A_TEST`\n represents tasks that did not generate a :class:`~.TestResult` object\n as a result;\n '''\n SUCCESS = 0\n FAILURE = 1\n MISSING = 2\n NOT_A_TEST = 3\n __test__ = False\n\n\ndef test_stats(*, name, description='', labels=None, tasks):\n '''Create a :class:`TestStatsTests` from a list of tests.\n\n The :class:`TestStatsTests` class must be instantiated with the list of\n test results, which are not available to the user when the tests are\n specified in the job file. Therefore, the creation of the\n :class:`TestStatsTests` must be delayed until the test tasks have finished\n and their results are available in the environment. For this purpose it is\n necessary to wrap the instantiation of :class:`TestStatsTests` in a\n :class:`~.Use` wrapper, and evaluate the resulting test using a\n :class:`~.EvalTestTask`.\n\n This function hides this bit of complexity from the user. Assume you have a\n list of tasks that evaluate some tests and that you would like to produce\n statistics about the tests results. Let us construct a toy dataset first:\n\n >>> from collections import OrderedDict\n >>> import numpy as np\n >>> from valjean.eponine.dataset import Dataset\n >>> x = np.linspace(-5., 5., num=100)\n >>> y = x**2\n >>> error = np.zeros_like(y)\n >>> bins = OrderedDict([('x', x)])\n >>> parabola = Dataset(y, error, bins=bins, name='parabola')\n >>> parabola2 = Dataset(y*(1+1e-6), error, bins=bins, name='parabola2')\n\n Now we write a function that generates dummy tests for the `parabola`\n dataset:\n\n >>> from valjean.gavroche.test import TestEqual, TestApproxEqual\n >>> def test_generator():\n ... result = [TestEqual(parabola, parabola2, name='equal?').evaluate(),\n ... TestApproxEqual(parabola, parabola2,\n ... name='approx_equal?').evaluate()]\n ... return {'test_generator': {'result': result}}, TaskStatus.DONE\n\n\n We need to wrap this function in a PythonTask so that it can be executed as\n a part of the dependency graph:\n\n >>> from valjean.cosette.pythontask import PythonTask\n >>> create_tests_task = PythonTask('test_generator', test_generator)\n\n Here is how you make a :class:`TestStatsTests` to collect statistics about\n the results of the generated tests:\n\n >>> stats = test_stats(name='equal', tasks=[create_tests_task])\n\n >>> from valjean.gavroche.eval_test_task import EvalTestTask\n >>> isinstance(stats, EvalTestTask)\n True\n\n Here `stats` evaluates the test that gathers the statistics, and it depends\n on a special task that generates the :class:`TestStatsTests` instance:\n\n >>> print(stats.depends_on)\n {Task('equal.stats')}\n >>> create_stats = next(task for task in stats.depends_on)\n\n In turn, `create_stats` has a soft dependency on the task that generates\n our test, `create_tests_task`:\n\n >>> create_tests_task in create_stats.soft_depends_on\n True\n\n The reason why the dependency is soft is that we want to collect statistics\n about the test outcome in any case, even (especially!) if some of the tests\n failed or threw exceptions.\n\n Let's run the tests:\n\n >>> from valjean.config import Config\n >>> config = Config()\n >>> from valjean.cosette.env import Env\n >>> env = Env()\n >>> for task in [create_tests_task, create_stats, stats]:\n ... env_up, status = task.do(env=env, config=config)\n ... env.apply(env_up)\n >>> print(status)\n TaskStatus.DONE\n\n The results are stored in a :class:`list` under the key ``'result'``:\n\n >>> print(len(env[stats.name]['result']))\n 1\n >>> stats_res = env[stats.name]['result'][0]\n >>> print(\"SUCCESS:\", stats_res.classify[TestOutcome.SUCCESS])\n SUCCESS: ['approx_equal?']\n >>> print(\"FAILURE:\", stats_res.classify[TestOutcome.FAILURE])\n FAILURE: ['equal?']\n\n :param str name: the name of the task to create.\n :param str description: its description.\n :param tasks: the list of tasks that generate the tests to observe.\n :type tasks: list(Task)\n :returns: an :class:`~.EvalTestTask` that evaluates the diagnostic test.\n :rtype: EvalTestTask\n '''\n\n def create_test(*task_results, name, description, labels):\n return [TestStatsTests(name=name, description=description,\n labels=labels, task_results=task_results)]\n\n return stats_worker(create_test, name=name, description=description,\n labels=labels, tasks=tasks)\n\n\n# hey, pytest!\ntest_stats.__test__ = False\n\n\nclass TestStatsTests(Test):\n '''A test that evaluates statistics about the success/failure of the given\n tests.\n '''\n\n def __init__(self, *, name, description='', labels=None, task_results):\n '''Instantiate a :class:`TestStatsTests` from a collection of task\n results. The tasks are expected to generate :class:`~.TestResult`\n objects, which must appear in the ``'result'`` key of the task result.\n '''\n super().__init__(name=name, description=description, labels=labels)\n self.task_results = task_results\n\n def evaluate(self):\n '''Evaluate this test and turn it into a :class:`TestResultStatsTests`.\n '''\n status_dict = defaultdict(list)\n for task_name, task_result in self.task_results:\n if 'result' not in task_result:\n name_fing = NameFingerprint(task_name)\n status_dict[TestOutcome.MISSING].append(name_fing)\n continue\n test_results = task_result['result']\n for test_result in test_results:\n if not isinstance(test_result, TestResult):\n name_fing = NameFingerprint(task_name)\n status_dict[TestOutcome.NOT_A_TEST].append(name_fing)\n if test_result:\n test_lst = status_dict[TestOutcome.SUCCESS]\n else:\n test_lst = status_dict[TestOutcome.FAILURE]\n name_fing = NameFingerprint(test_result.test.name,\n fingerprint(test_result.test))\n test_lst.append(name_fing)\n return TestResultStatsTests(test=self, classify=status_dict)\n\n def data(self):\n '''Generator yielding objects supporting the buffer protocol that (as a\n whole) represent a serialized version of `self`.'''\n yield from super().data()\n yield self.__class__.__name__.encode('utf-8')\n for task_name, _task_result in self.task_results:\n yield task_name.encode('utf-8')\n\n\nclass TestResultStatsTests(TestResultStatsTasks):\n '''The result of the evaluation of a :class:`TestStatsTests`. The test is\n considered successful if all the observed tests have been successfully\n evaluated and have succeeded.\n '''\n def __bool__(self):\n return TestOutcome.SUCCESS in self.classify and len(self.classify) == 1\n\n\ndef test_stats_by_labels(*, name, description='', labels=None,\n tasks, by_labels):\n '''Create a :class:`TestStatsTestsByLabels` from a list of tests.\n\n See :func:`test_stats` for the generalities about this function.\n\n Compared to :func:`test_stats` it takes one argument more: ``'by_labels'``\n to classify then build statistics based on these labels. **The order of the\n labels matters**, as they are successively selected.\n\n Let's define three menus:\n\n >>> menu1 = {'food': 'egg + spam', 'drink': 'beer'}\n >>> menu2 = {'food': 'egg + bacon', 'drink': 'beer'}\n >>> menu3 = {'food': 'lobster thermidor', 'drink': 'brandy'}\n\n These three menus are ordered by pairs. Statistics on meals are\n kept in the restaurant, using :class:`~.TestMetadata`. The goal of the\n tests is to know if both persons of a pair order the same menu and when\n they do it.\n\n .. code::\n\n orders = [TestMetadata(\n {'Graham': menu1, 'Terry': menu1}, name='gt_wday_lunch',\n labels={'day': 'Wednesday', 'meal': 'lunch'}),\n TestMetadata(\n {'Michael': menu1, 'Eric': menu2}, name='me_wday_dinner',\n labels={'day': 'Wednesday', 'meal': 'dinner'}),\n TestMetadata(\n {'John': menu2, 'Terry': menu2}, name='jt_wday',\n labels={'day': 'Wednesday'}),\n TestMetadata(\n {'Terry': menu3, 'John': menu3}, name='Xmasday',\n labels={'day': \"Christmas Eve\"})]\n\n The restaurant owner uses :func:`test_stats_by_labels` to build statistics\n on his menus and the habits of his consumers.\n\n For example, the menus filtered on ``day`` will give:\n\n .. code::\n\n ============= ============ =============\n day % success % failure\n ============= ============ =============\n Christmas Eve 1/1 0/1\n Wednesday 2/3 1/3\n ============= ============ =============\n\n These results means, considering the tests requested, both consumers have\n the same meal on Christmas Eve. On Wednesday, one pair of customers out of\n three did not order the same menu.\n\n The same kind of statistics can be done based on the meal:\n\n .. code::\n\n ========== ============ =============\n meal % success % failure\n ========== ============ =============\n dinner 0/1 1/1\n lunch 1/1 0/1\n ========== ============ =============\n\n In that case two tests were not taken into account as they did not have any\n ``'meal'`` label.\n\n It is also possible to make selections on multiple labels. In that case the\n order matters: the classification is performed following the order of the\n labels requested. For example, ``'meal'`` then ``'day'`` :\n\n .. code::\n\n ========== ========= ============ =============\n meal day % success % failure\n ========== ========= ============ =============\n dinner Wednesday 0/1 1/1\n lunch Wednesday 1/1 0/1\n ========== ========= ============ =============\n\n * Only two tests are filtered due to the meal selection\n * Requesting ``'day'`` then ``'meal'`` would only inverse the two first\n columns in that case and emit a **warning**: a preselection on ``'day'``\n is done and in Christmas Eve case the ``'meal'`` label is not provided,\n the selection cannot be done. In the Wednesday one, no problem ``'meal'``\n appears at least in one case (two in our cases).\n\n Finally if the request involves a label that does not exist in any test an\n exception will be raised, mentioning the failing label.\n\n :param str name: the name of the task to create.\n :param str description: its description.\n :param tasks: the list of tasks that generate the tests to observe.\n :type tasks: list(Task)\n :param tuple(str) by_labels: labels from the tests on which the\n classification will be based\n :returns: an :class:`~.EvalTestTask` that evaluates the diagnostic test.\n :rtype: EvalTestTask\n '''\n def create_test(*task_results, name, description, labels, by_labels=None):\n return [TestStatsTestsByLabels(\n name=name, description=description, labels=labels,\n task_results=task_results, by_labels=by_labels)]\n\n return stats_worker(create_test, name=name, description=description,\n labels=labels, tasks=tasks, by_labels=by_labels)\n\n\n# hey, pytest!\ntest_stats_by_labels.__test__ = False\n\n\nclass TestStatsTestsByLabelsException(Exception):\n '''Exception raised during the diagnostic test on :class:`~.TestResult`\n when a classification by labels is required.'''\n # And for pytest...\n __test__ = False\n\n\nclass TestStatsTestsByLabels(Test):\n '''A test that evaluates statistics about the success/failure of the given\n tests using their labels to classify them.\n\n Usually more than one test is performed for each tested case. This test\n summarize tests done on a given category defined by the user in the usual\n tests (:class:`~.TestStudent`, :class:`~.TestMetadata`, etc.).\n\n During the evaluation a list of dictionaries of labels is built for each\n test. These labels are given by the user at the initialization of the test.\n Each dictionary also contains the name of the test (name of the task) and\n its result (sucess or failure). From this list of dictionaries an\n :class:`~.Index` is built.\n\n The result of the evaluation is given a a list of dictionaries containing\n the strings corresponding to the chosen labels under the ``'labels'`` key\n and the number of results OK, KO and total.\n '''\n def __init__(self, *, name, description='', labels=None, task_results,\n by_labels):\n '''Instantiate a :class:`TestStatsTestsByLabels` from a collection of\n task results. The tasks are expected to generate :class:`~.TestResult`\n objects, which must appear in the ``'result'`` key of the task result.\n\n :param str name: the test name\n :param str description: the test description\n :param task_result: a list of task results, each task normally contains\n a :class:`~.TestResult`, used in this test.\n :param tuple by_labels: ordered labels to sort the test results. These\n labels are the test labels.\n '''\n super().__init__(name=name, description=description, labels=labels)\n self.task_results = task_results\n self.by_labels = by_labels\n\n def _build_labels_lod(self):\n '''Build the labels list of dictionaries that allows the creation of\n the index.\n\n Two additional labels are added to the labels list: ``'_test_name'``\n and ``'_result'`` that should not be used as test's labels. The first\n one could be needed some time and is expected to be unique, the second\n one is filled with :class:`TestOutcome`.\n '''\n labels_lod = []\n for _task_name, task_result in self.task_results:\n if 'result' not in task_result:\n continue\n test_results = task_result['result']\n for test_result in test_results:\n ldic = test_result.test.labels.copy()\n if '_test_name' in ldic:\n LOGGER.warning(\n \"'_test_name' is a label in some test, will be \"\n \"replaced in TestStatsTestsByLabels\")\n ldic['_test_name'] = test_result.test.name\n if '_result' in ldic:\n LOGGER.warning(\n \"'_result' is a label in some test, will be replaced\"\n \"in TestStatsTestsByLabels\")\n if test_result:\n ldic['_result'] = TestOutcome.SUCCESS\n else:\n ldic['_result'] = TestOutcome.FAILURE\n labels_lod.append(ldic)\n\n return labels_lod\n\n @staticmethod\n def _build_index(labels_lod):\n index = Index()\n for itres, tres in enumerate(labels_lod):\n for lab in tres:\n index[lab][tres[lab]].add(itres)\n return index\n\n def _rloop_over_labels(self, index, labels, rok, rko, plab=()):\n '''Recursive method to select labels and calculate the relative\n statistics.\n\n If ``labels`` contains only one element, the method build a dictionary\n that summarizes the OK, KO and total number of tests for the considered\n label. The method returns a list containing the dictionary as its only\n element.\n\n The previous labels are obtained from the ``plab`` argument.\n\n If ``labels`` contains multiple labels, the first label is considered\n first. For each possible value of the label, the method constructs a\n sub-index by filtering the index on the given label and recursively\n calls itself on the sub-index. The ``plab`` arguement contains the list\n of all the labels that have been considered so far. The lists of\n dictionaries that result from the recursive calls are concatenated into\n a single list and returned.\n\n :param Index index: index used to calculate the statistics\n :param tuple(str) labels: ordered labels to sort the test results\n :param set rok: successful test results\n :param set rko: failed test results\n :param tuple plab: previous labels\n :rtype: list(dict)\n '''\n # pylint: disable=too-many-arguments\n label = labels[0]\n if label not in index:\n LOGGER.warning('%s not found in some test labels', label)\n return []\n if len(labels) > 1:\n lres = []\n for lab, labset in index.get(label, {}).items():\n subind = index.keep_only(labset)\n lres.extend(self._rloop_over_labels(subind, labels[1:],\n rok, rko,\n plab=plab+(lab,)))\n return lres\n lres = []\n for lab, labset in index.get(label, {}).items():\n lres.append({'labels': plab + (lab,),\n 'OK': len(labset & rok),\n 'KO': len(labset & rko),\n 'total': len(labset)})\n return lres\n\n def _stats_for_labels(self, index):\n '''Check the presence of all required labels in the index, build the\n success and failures sets and return the dictionary of results.\n\n :raises: TestStatsTestsByLabelsExeption\n :returns: list of dictionaries of labels and states\n :rtype: list(dict)\n '''\n if not set(self.by_labels) <= set(index):\n raise TestStatsTestsByLabelsException(\n f'TestStatsTestsByLabels: {self.by_labels} not found in test '\n 'labels')\n rok = index['_result'][TestOutcome.SUCCESS]\n rko = index['_result'][TestOutcome.FAILURE]\n res = self._rloop_over_labels(index, self.by_labels, rok, rko)\n return sorted(res, key=lambda x: x['labels'])\n\n def evaluate(self):\n '''Evaluate this test and turn it into a\n :class:`TestResultStatsTestsByLabels`.\n '''\n labels_lod = self._build_labels_lod()\n index = self._build_index(labels_lod)\n sfl = self._stats_for_labels(index)\n return TestResultStatsTestsByLabels(test=self, classify=sfl,\n n_labels=len(labels_lod))\n\n def data(self):\n '''Generator yielding objects supporting the buffer protocol that (as a\n whole) represent a serialized version of `self`.'''\n yield from super().data()\n yield self.__class__.__name__.encode('utf-8')\n for task_name, _task_result in self.task_results:\n yield task_name.encode('utf-8')\n for label in self.by_labels:\n yield label.encode('utf-8')\n\n\nclass TestResultStatsTestsByLabels(TestResultStatsTasks):\n '''The result of the evaluation of a :class:`TestStatsTestsByLabels`. The\n test is considered successful if all the observed tests have been\n successfully evaluated and have succeeded.\n\n An oracle is available for each individual test (usually what is required\n here).\n\n ``self.classify`` is here a list of dictionaries with the following keys:\n ``['labels', 'OK', 'KO', 'total']``.\n '''\n\n def __init__(self, *, test, classify, n_labels):\n super().__init__(test=test, classify=classify)\n self.n_labels = n_labels\n\n def __bool__(self):\n '''Test is successful if all tests are.'''\n return all(self.oracles())\n\n def oracles(self):\n '''Test if each test is successful.\n\n :rtype: list(bool)\n '''\n return [t['OK'] == t['total'] for t in self.classify]\n\n def nb_missing_labels(self):\n '''Return the number of tests where at least one of the labels required\n were missing.\n\n :rtype: int\n '''\n return self.n_labels - sum(s['total'] for s in self.classify)\n\n\ndef classification_counts(classify, status_first):\n '''Count the occurrences of different statuses in the `classify`\n dictionary.\n\n :param dict classify: a dictionary associating *things* to statuses. The\n statuses must have the same type as `status_first`\n :param status_first: the status that is considered as success. This must be\n an enum class\n :returns: a pair of lists of equal length. The first element of the pair is\n the list of statuses appearing in `classify` (`status_first` is\n guaranteed to come first in this list); the second element is the\n number of times the corresponding status appears in `classify`.\n '''\n statuses = [status_first]\n statuses.extend(status for status in status_first.__class__\n if status != status_first)\n counts = [len(classify[status]) for status in statuses]\n statuses = [status for status, count in zip(statuses, counts)\n if count != 0]\n counts = [count for count in counts if count != 0]\n return statuses, counts\n", "repo_name": "valjean-framework/valjean", "sub_path": "valjean/gavroche/diagnostics/stats.py", "file_name": "stats.py", "file_ext": "py", "file_size_in_byte": 28475, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "functools.partial", "line_number": 29, "usage_type": "call"}, {"api_name": "functools.update_wrapper", "line_number": 30, "usage_type": "call"}, {"api_name": "cosette.use.Use", "line_number": 31, "usage_type": "call"}, {"api_name": "eval_test_task.EvalTestTask.from_test_task", "line_number": 32, "usage_type": "call"}, {"api_name": "eval_test_task.EvalTestTask", "line_number": 32, "usage_type": "name"}, {"api_name": "cosette.task.close_dependency_graph", "line_number": 86, "usage_type": "call"}, {"api_name": "fingerprint.fingerprint", "line_number": 96, "usage_type": "name"}, {"api_name": "functools.total_ordering", "line_number": 89, "usage_type": "name"}, {"api_name": "test.Test", "line_number": 114, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 136, "usage_type": "call"}, {"api_name": "test.TestResult", "line_number": 151, "usage_type": "name"}, {"api_name": "cosette.task.TaskStatus.DONE", "line_number": 170, "usage_type": "attribute"}, {"api_name": "cosette.task.TaskStatus", "line_number": 170, "usage_type": "name"}, {"api_name": "enum.IntEnum", "line_number": 173, "usage_type": "name"}, {"api_name": "test.Test", "line_number": 307, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 323, "usage_type": "call"}, {"api_name": "test.TestResult", "line_number": 331, "usage_type": "argument"}, {"api_name": "fingerprint.fingerprint", "line_number": 339, "usage_type": "call"}, {"api_name": "test.Test", "line_number": 481, "usage_type": "name"}, {"api_name": "eponine.browser.Index", "line_number": 551, "usage_type": "call"}]} +{"seq_id": "16248475630", "text": "#! /usr/bin/env python\n\"\"\"\nLoad a genbank-free lineage, anchor with genbank.\n\"\"\"\nimport sys\nimport argparse\nimport csv\nimport traceback\nimport ncbi_taxdump_utils\nfrom collections import defaultdict, Counter\nimport itertools\nimport pprint\nimport sourmash_lib\nimport lca_json # from github.com/ctb/2017-sourmash-lca\n\nLCA_DBs = ['db/genbank.lca.json']\nSCALED=10000\nTHRESHOLD=5 # how many counts of a taxid at min\n\nsys.path.insert(0, '../2017-sourmash-revindex')\nimport revindex_utils\n\nclass TaxidNotFound(Exception):\n pass\n\n\ntaxlist = ['superkingdom', 'phylum', 'class', 'order', 'family', 'genus',\n 'species']\nnull_names = set(['[Blank]', 'na', 'null'])\n\n\n_print_debug = False\ndef debug(*args):\n if _print_debug:\n print(*args)\n\n\ndef get_taxids_for_name(taxfoo, names_to_taxids, srank, sname):\n\n # what taxids does the query name have?\n tid_set = names_to_taxids.get(sname, set())\n tid_list = list(tid_set)\n\n # none? that's a problem, quit out.\n if not tid_list:\n raise TaxidNotFound(sname)\n\n # collect taxids at the right rank\n taxid_at_rank = []\n for taxid in tid_list:\n rank = taxfoo.get_taxid_rank(taxid)\n if rank == srank:\n assert taxfoo.get_taxid_name(taxid) == sname\n taxid_at_rank.append(taxid)\n\n # there should only be one \n if len(taxid_at_rank) == 1:\n return taxid_at_rank[0]\n\n # @CTB need to do something more here.\n\n return -1\n\n\ndef get_lca_taxid_for_lineage(taxfoo, names_to_taxids, lineage):\n \"\"\"\n Given taxfoo and a list of lineage pairs (rank, identifier), find the least\n common ancestor in the lineage, and return that taxid with the rest of\n the lineage pairs.\n \"\"\"\n lineage = list(lineage) # make a copy\n while lineage:\n (rank, name) = lineage.pop(0)\n try:\n taxid = get_taxids_for_name(taxfoo, names_to_taxids, rank, name)\n if taxid == -1:\n raise TaxidNotFound\n last_taxid = taxid\n\n assert taxfoo.get_taxid_rank(taxid) == rank\n assert taxfoo.get_taxid_name(taxid) == name\n except TaxidNotFound:\n lineage.insert(0, (rank, name)) # add back in!\n break\n\n return last_taxid, lineage\n\n\ndef get_lowest_taxid_for_lineage(taxfoo, names_to_taxids, lineage):\n \"\"\"\n Given taxfoo and a list of lineage pairs (rank, identifier), find\n the lowest rank that has a match in NCBI lineage, and return that\n taxid with the rest of the lineage pairs.\n \"\"\"\n lineage = list(lineage) # make a copy\n remainder = []\n while 1:\n (rank, ident) = lineage.pop() # pop from end\n try:\n taxid = get_taxids_for_name(taxfoo, names_to_taxids, rank, ident)\n if taxid == -1:\n raise TaxidNotFound\n except TaxidNotFound:\n remainder.append((rank, ident))\n continue # super borked logic\n\n break # super borked logic\n\n return taxid, list(reversed(remainder))\n\n\ndef build_tree(assignments, initial=None):\n \"\"\"\n Builds a tree of dictionaries from lists of (rank, name) tuples\n in 'assignments'. This tree can then be used to find least common\n ancestor agreements/confusion.\n \"\"\"\n if initial is None:\n tree = {}\n else:\n tree = initial\n\n for assignment in assignments:\n node = tree\n\n for rank, name in assignment:\n child = node.get((rank, name), {})\n node[(rank, name)] = child\n\n # shift -> down in tree\n node = child\n\n return tree\n\n\ndef test_build_tree():\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2')]])\n assert tree == { ('rank1', 'name1'): { ('rank2', 'name2') : {}} }\n\n\ndef test_build_tree_2():\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2a')],\n [('rank1', 'name1'), ('rank2', 'name2b')],\n ])\n\n assert tree == { ('rank1', 'name1'): { ('rank2', 'name2a') : {},\n ('rank2', 'name2b') : {}} }\n\n\ndef test_build_tree_3():\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2a')],\n ])\n\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2b')],\n ], tree)\n\n assert tree == { ('rank1', 'name1'): { ('rank2', 'name2a') : {},\n ('rank2', 'name2b') : {}} }\n\n\ndef find_lca(tree):\n \"\"\"\n Given a tree produced by 'find_tree', find the first node with multiple\n children, OR the only leaf in the tree. Return ((rank, name), reason),\n where 'reason' is the number of children of the returned node, i.e.e\n 0 if it's a leaf and > 1 if it's an internal node.\n \"\"\"\n\n node = tree\n cur = ('root', 'root')\n while 1:\n if len(node) == 1: # descend to only child\n cur = next(iter(node.keys()))\n node = node[cur]\n elif len(node) == 0: # at leaf; end\n return cur, 0\n else: # len(node) > 1 => confusion!!\n return cur, len(node)\n\n\ndef test_find_lca():\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2')]])\n lca = find_lca(tree)\n\n assert lca == (('rank2', 'name2'), 0)\n\n\ndef test_find_lca_2():\n tree = build_tree([[('rank1', 'name1'), ('rank2', 'name2a')],\n [('rank1', 'name1'), ('rank2', 'name2b')],\n ])\n lca = find_lca(tree)\n\n assert lca == (('rank1', 'name1'), 2)\n\n\ndef build_reverse_tree(assignments, initial=None):\n \"\"\"\n Builds a child -> parent dictionary (a reverse DAG) from lists of\n (rank, name) tuples in 'assignments'.\n \"\"\"\n if initial is None:\n parents = {}\n else:\n parents = initial\n\n for assignment in assignments:\n last_node = ('root', 'root')\n for rank, name in assignment:\n parents[(rank, name)] = last_node\n last_node = (rank, name)\n\n return parents\n\n\ndef test_build_reverse_tree():\n parents = build_reverse_tree([[('rank1', 'name1'), ('rank2', 'name2')]])\n\n print(parents)\n assert parents == { ('rank2', 'name2'): ('rank1', 'name1'),\n ('rank1', 'name1'): ('root', 'root') }\n\n\ndef test_build_reverse_tree_2():\n parents = build_reverse_tree([[('rank1', 'name1'), ('rank2', 'name2a')],\n [('rank1', 'name1'), ('rank2', 'name2b')],\n ])\n\n assert parents == { ('rank2', 'name2a'): ('rank1', 'name1'),\n ('rank2', 'name2b'): ('rank1', 'name1'),\n ('rank1', 'name1'): ('root', 'root') }\n\n\ndef test_build_reverse_tree_3():\n parents = build_reverse_tree([[('rank1', 'name1'), ('rank2', 'name2a')],\n ])\n parents = build_reverse_tree([[('rank1', 'name1'), ('rank2', 'name2b')],\n ], parents)\n\n assert parents == { ('rank2', 'name2a'): ('rank1', 'name1'),\n ('rank2', 'name2b'): ('rank1', 'name1'),\n ('rank1', 'name1'): ('root', 'root') }\n\n\ndef main():\n p = argparse.ArgumentParser()\n p.add_argument('csv')\n p.add_argument('revindex')\n p.add_argument('siglist', nargs='+')\n p.add_argument('--lca', nargs='+', default=LCA_DBs)\n p.add_argument('-k', '--ksize', default=31, type=int)\n p.add_argument('-o', '--output', type=argparse.FileType('wt'),\n help='output CSV to this file instead of stdout')\n #p.add_argument('-v', '--verbose', action='store_true')\n p.add_argument('-d', '--debug', action='store_true')\n args = p.parse_args()\n\n if args.debug:\n global _print_debug\n _print_debug = True\n\n ## load LCA databases\n lca_db_list = []\n for lca_filename in args.lca:\n print('loading LCA database from {}'.format(lca_filename),\n file=sys.stderr)\n lca_db = lca_json.LCA_Database(lca_filename)\n taxfoo, hashval_to_lca, _ = lca_db.get_database(args.ksize, SCALED)\n lca_db_list.append((taxfoo, hashval_to_lca))\n \n # reverse index names -> taxids\n names_to_taxids = defaultdict(set)\n for taxid, (name, _, _) in taxfoo.taxid_to_names.items():\n names_to_taxids[name].add(taxid)\n\n ### parse spreadsheet\n r = csv.reader(open(args.csv, 'rt'))\n row_headers = ['identifier'] + taxlist\n\n print('examining spreadsheet headers...', file=sys.stderr)\n first_row = next(iter(r))\n\n n_disagree = 0\n for (column, value) in zip(row_headers, first_row):\n if column.lower() != value.lower():\n print('** assuming {} == {} in spreadsheet'.format(column, value),\n file=sys.stderr)\n n_disagree += 1\n if n_disagree > 2:\n print('whoa, too many assumptions. are the headers right?',\n file=sys.stderr)\n sys.exit(-1)\n\n confusing_lineages = defaultdict(list)\n incompatible_lineages = defaultdict(list)\n assignments = {}\n for row in r:\n lineage = list(zip(row_headers, row))\n\n ident = lineage[0][1]\n lineage = lineage[1:]\n\n # clean lineage of null names\n lineage = [(a,b) for (a,b) in lineage if b not in null_names]\n\n # ok, find the least-common-ancestor taxid...\n taxid, rest = get_lca_taxid_for_lineage(taxfoo, names_to_taxids,\n lineage)\n\n # and find the *lowest* identifiable ancestor taxid, just to see\n # if there are confusing lineages.\n lowest_taxid, lowest_rest = \\\n get_lowest_taxid_for_lineage(taxfoo, names_to_taxids, lineage)\n\n # do they match? if not, report.\n if lowest_taxid != taxid:\n lowest_lineage = taxfoo.get_lineage(lowest_taxid, taxlist)\n lowest_str = ', '.join(lowest_lineage)\n\n # find last matching, in case different classification levels.\n match_lineage = [ b for (a, b) in lineage ]\n end = match_lineage.index(lowest_lineage[-1])\n assert end >= 0\n match_lineage = match_lineage[:end + 1]\n match_str = ', '.join(match_lineage)\n\n confusing_lineages[(match_str, lowest_str)].append(ident)\n\n # check! NCBI lineage should be lineage of taxid + rest\n ncbi_lineage = taxfoo.get_lineage(taxid, taxlist)\n assert len(ncbi_lineage)\n reconstructed = ncbi_lineage + [ b for (a,b) in rest ]\n\n # ...make a comparable lineage from the CSV line...\n csv_lineage = [ b for (a, b) in lineage ]\n\n # are NCBI-rooted and CSV lineages the same?? if not, report.\n if csv_lineage != reconstructed:\n csv_str = \", \".join(csv_lineage[:len(ncbi_lineage)])\n ncbi_str = \", \".join(ncbi_lineage)\n incompatible_lineages[(csv_str, ncbi_str)].append(ident)\n\n # all is well if we've reached this point! We've got NCBI-rooted\n # taxonomies and now we need to record. next:\n #\n # build a set of triples: (rank, name, taxid), where taxid can\n # be None.\n\n lineage_taxids = taxfoo.get_lineage_as_taxids(taxid)\n tuples_info = []\n for taxid in lineage_taxids:\n name = taxfoo.get_taxid_name(taxid)\n rank = taxfoo.get_taxid_rank(taxid)\n\n if rank in taxlist:\n tuples_info.append((rank, name))\n\n for (rank, name) in rest:\n assert rank in taxlist\n tuples_info.append((rank, name))\n\n assignments[ident] = tuples_info\n\n print(\"{} weird lineages that maybe don't match with NCBI.\".format(len(confusing_lineages) + len(incompatible_lineages)), file=sys.stderr)\n\n ## next phase: collapse lineages etc.\n\n ## load revindex\n print('loading reverse index:', args.revindex, file=sys.stderr)\n custom_bins_ri = revindex_utils.HashvalRevindex(args.revindex)\n\n # load the signatures associated with each revindex.\n print('loading signatures for custom genomes...', file=sys.stderr)\n sigids_to_sig = {}\n for sigid, (filename, md5) in custom_bins_ri.sigid_to_siginfo.items():\n sig = revindex_utils.get_sourmash_signature(filename, md5)\n if sig.name() in assignments:\n sigids_to_sig[sigid] = sig\n else:\n debug('no assignment:', sig.name())\n\n # figure out what ksize we're talking about here! (this should\n # probably be stored on the revindex...)\n random_db_sig = next(iter(sigids_to_sig.values()))\n ksize = random_db_sig.minhash.ksize\n\n print('...found {} custom genomes that also have assignments!!'.format(len(sigids_to_sig)), file=sys.stderr)\n\n ## now, connect the dots: hashvals to custom classifications\n hashval_to_custom = defaultdict(list)\n for hashval, sigids in custom_bins_ri.hashval_to_sigids.items():\n for sigid in sigids:\n sig = sigids_to_sig.get(sigid, None)\n if sig:\n assignment = assignments[sig.name()]\n hashval_to_custom[hashval].append(assignment)\n\n # whew! done!! we can now go from a hashval to a custom assignment!!\n\n # for each query, gather all the matches in both custom and NCBI, then\n # classify.\n csvfp = csv.writer(sys.stdout)\n if args.output:\n print(\"outputting classifications to '{}'\".format(args.output.name))\n csvfp = csv.writer(args.output)\n else:\n print(\"outputting classifications to stdout\")\n csvfp.writerow(['ID'] + taxlist)\n\n total_count = 0\n for query_filename in args.siglist:\n for query_sig in sourmash_lib.load_signatures(query_filename,\n ksize=ksize):\n print(u'\\r\\033[K', end=u'', file=sys.stderr)\n print('... classifying {}'.format(query_sig.name()), end='\\r',\n file=sys.stderr)\n debug('classifying', query_sig.name())\n total_count += 1\n\n these_assignments = defaultdict(list)\n n_custom = 0\n for hashval in query_sig.minhash.get_mins():\n # custom\n assignment = hashval_to_custom.get(hashval, [])\n if assignment:\n these_assignments[hashval].extend(assignment)\n n_custom += 1\n\n # NCBI\n for (this_taxfoo, hashval_to_lca) in lca_db_list:\n hashval_lca = hashval_to_lca.get(hashval)\n if hashval_lca is not None and hashval_lca != 1:\n lineage = this_taxfoo.get_lineage_as_dict(hashval_lca,\n taxlist)\n\n tuple_info = []\n for rank in taxlist:\n if rank not in lineage:\n break\n tuple_info.append((rank, lineage[rank]))\n these_assignments[hashval_lca].append(tuple_info)\n\n check_counts = Counter()\n for tuple_info in these_assignments.values():\n last_tup = tuple(tuple_info[-1])\n check_counts[last_tup] += 1\n\n debug('n custom hashvals:', n_custom)\n debug(pprint.pformat(check_counts.most_common()))\n\n # now convert to trees -> do LCA & counts\n counts = Counter()\n parents = {}\n for hashval in these_assignments:\n\n # for each list of tuple_info [(rank, name), ...] build\n # a tree that lets us discover least-common-ancestor.\n tuple_info = these_assignments[hashval]\n tree = build_tree(tuple_info)\n\n # also update a tree that we can ascend from leaves -> parents\n # for all assignments for all hashvals\n parents = build_reverse_tree(tuple_info, parents)\n\n # now find either a leaf or the first node with multiple\n # children; that's our least-common-ancestor node.\n lca, reason = find_lca(tree)\n counts[lca] += 1\n\n # ok, we now have the LCAs for each hashval, and their number\n # of counts. Now sum across \"significant\" LCAs - those above\n # threshold.\n\n tree = {}\n tree_counts = defaultdict(int)\n\n debug(pprint.pformat(counts.most_common()))\n\n n = 0\n for lca, count in counts.most_common():\n if count < THRESHOLD:\n break\n\n n += 1\n\n xx = []\n parent = lca\n while parent:\n xx.insert(0, parent)\n tree_counts[parent] += count\n parent = parents.get(parent)\n debug(n, count, xx[1:])\n\n # update tree with this set of assignments\n build_tree([xx], tree)\n\n if n > 1:\n debug('XXX', n)\n\n # now find LCA? or whatever.\n lca, reason = find_lca(tree)\n if reason == 0: # leaf node\n debug('END', lca)\n else: # internal node\n debug('MULTI', lca)\n\n # backtrack to full lineage via parents\n lineage = []\n parent = lca\n while parent != ('root', 'root'):\n lineage.insert(0, parent)\n parent = parents.get(parent)\n\n # output!\n row = [query_sig.name()]\n for taxrank, (rank, name) in itertools.zip_longest(taxlist, lineage, fillvalue=('', '')):\n if rank:\n assert taxrank == rank\n row.append(name)\n\n csvfp.writerow(row)\n\n print(u'\\r\\033[K', end=u'', file=sys.stderr)\n print('classified {} signatures total'.format(total_count), file=sys.stderr)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "ctb/2017-sourmash-lca", "sub_path": "classify-free-tax.py", "file_name": "classify-free-tax.py", "file_ext": "py", "file_size_in_byte": 18118, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 246, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 252, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 266, "usage_type": "attribute"}, {"api_name": "lca_json.LCA_Database", "line_number": 267, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 272, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 277, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 280, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 287, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 291, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 292, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 294, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 364, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 369, "usage_type": "attribute"}, {"api_name": "revindex_utils.HashvalRevindex", "line_number": 370, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 373, "usage_type": "attribute"}, {"api_name": "revindex_utils.get_sourmash_signature", "line_number": 376, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 387, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 390, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 402, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 402, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 405, "usage_type": "call"}, {"api_name": "sourmash_lib.load_signatures", "line_number": 412, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 414, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 416, "usage_type": "attribute"}, {"api_name": "collections.defaultdict", "line_number": 420, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 443, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 449, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 452, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 475, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 477, "usage_type": "call"}, {"api_name": "itertools.zip_longest", "line_number": 516, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 523, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 524, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 528, "usage_type": "call"}]} +{"seq_id": "510329255", "text": "#!/usr/bin/python\nimport xml.etree.ElementTree as ET\nimport json\n\n# To check two XML snippets are semantically equal, this method compares\n# each element recursively.\n# This methods are borrowed from:\n# https://bitbucket.org/ianb/formencode/src/tip/formencode/doctest_xml_compare.py?fileviewer=file-view-default#cl-70\ndef xml_compare(x1, x2, reporter = False):\n if x1.tag != x2.tag:\n if reporter:\n print('Tags do not match: %s and %s' % (x1.tag, x2.tag))\n return False\n for name, value in x1.attrib.items():\n if x2.attrib.get(name) != value:\n if reporter:\n print('Attributes do not match: %s=%r, %s=%r' \\\n % (name, value, name, x2.attrib.get(name)))\n return False\n for name in x2.attrib.keys():\n if name not in x1.attrib:\n if reporter:\n print('x2 has an attribute x1 is mising: %s' \\\n % name)\n return False\n if not xml_text_compare(x1.text, x2.text):\n if reporter:\n print('text: r != %r' % (x1.text, x2.text))\n return False\n if not xml_text_compare(x1.tail, x2.tail):\n if reporter:\n print('tail: %r != %r' % (x1.tail, x2.tail))\n return False\n cl1 = x1.getchildren()\n cl2 = x2.getchildren()\n if len(cl1) != len(cl2):\n if reporter:\n print('children length differs, %i != %i' \\\n % (len(cl1), len(cl2)))\n return False\n i = 0\n for c1, c2 in zip(cl1, cl2):\n i += 1\n if not xml_compare(c1, c2, reporter=reporter):\n if reporter:\n print('children %i do not match: %s' \\\n % (i, c1.tag))\n return False\n return True\n\ndef xml_text_compare(t1, t2):\n if not t1 and not t2:\n return True\n if t1 == '*' or t2 == '*':\n return True\n return (t1 or '').strip() == (t2 or '').strip()\n\ndef json_comp(filename1, filename2):\n f1 = open(filename1)\n f2 = open(filename2)\n x1 = json.load(f1)\n x2 = json.load(f2)\n f1.close()\n f2.close()\n return json_ordered(x1) == json_ordered(x2)\n\ndef json_ordered(obj):\n if isinstance(obj, dict):\n return sorted((k, json_ordered(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(json_ordered(x) for x in obj)\n else:\n obj\n", "repo_name": "kyoshinaga/jigg-pipeline-test", "sub_path": "comp_modules.py", "file_name": "comp_modules.py", "file_ext": "py", "file_size_in_byte": 2382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 61, "usage_type": "call"}, {"api_name": "json.load", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "14895139138", "text": "import requests\nimport json\nimport pickle\nfrom time import sleep\n\nif __name__ == \"__main__\":\n d = json.dumps({'name': 'd', 'method': 'idn', 'arguments': None})\n r = requests.post(\" http://127.0.0.1:5000/Trigger\", data=d)\n d = json.dumps({'name': 'd', 'method': 'measure', 'arguments': 2000})\n r = requests.post(\" http://127.0.0.1:5000/Trigger\", data=d)\n\n sleep(2)\n\n d = json.dumps({'name': 'd', 'method': 'idn'})\n r = requests.post(\"http://127.0.0.1:5000/Read\", data=d)\n print(r.status_code, r.reason)\n print(pickle.loads(r.content))\n\n d = json.dumps({'name': 'd', 'method': 'measure'})\n r = requests.post(\"http://127.0.0.1:5000/Read\", data=d)\n print(r.status_code, r.reason)\n print(pickle.loads(r.content))\n\n print('===========GET==========')\n r = requests.get(\"http://127.0.0.1:5000/Read\",params={'name': 'd', 'method': 'measure'})\n print(pickle.loads(r.content))", "repo_name": "uetke/UUServer", "sub_path": "instserver/testServer.py", "file_name": "testServer.py", "file_ext": "py", "file_size_in_byte": 913, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dumps", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 8, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 10, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 12, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 15, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "27533173574", "text": "from django.shortcuts import get_object_or_404, redirect, render\nfrom tracker.models.match import Match\nfrom tracker.models.corner import Corner\nfrom tracker.models.shot import Shot\nfrom tracker.models.goal import Goal\nfrom tracker.forms.create_corner import CreateCornerForm\nfrom tracker.forms.create_goal_form import CreateGoalForm\nfrom tracker.forms.create_shot_form import CreateShotForm\nfrom tracker.forms.match_result_form import MatchResultForm\n\ndef play_game_view(request, match_id):\n match = get_object_or_404(Match, pk=match_id)\n create_corner_form = CreateCornerForm()\n create_shot_form = CreateShotForm(team=match.home_team)\n create_goal_form = CreateGoalForm(team=match.home_team)\n match_result_form = MatchResultForm()\n if request.method == 'POST':\n if 'end_game' in request.POST:\n match.result = request.POST['result']\n match.save()\n return redirect(f\"/tracker/start_game\")\n if 'create_corner' in request.POST: \n minute = request.POST['minute']\n Corner.objects.create(minute=minute, match=match)\n elif 'create_shot' in request.POST:\n minute = request.POST['minute']\n corner = match.get_latest_corner() if 'corner' in request.POST else None\n on_target = 'on_target' in request.POST\n blocked_by_player = 'blocked_by_player' in request.POST\n body_part = request.POST['body_part']\n player = int(request.POST['player'])\n Shot.objects.create(minute=minute, match=match, player_id=player, corner=corner,\n on_target=on_target, blocked_by_player=blocked_by_player, body_part=body_part)\n elif 'create_goal' in request.POST:\n minute = request.POST['minute']\n corner = match.get_latest_corner() if 'corner' in request.POST else None\n assist = int(request.POST['assist']) if request.POST['assist'] != '' else None\n impressive_assist = 'impressive_assist' in request.POST if assist else None\n impressive_goal = 'impressive_goal' in request.POST\n body_part = request.POST['body_part']\n player = int(request.POST['player'])\n Goal.objects.create(minute=minute, match=match, player_id=player, corner=corner, body_part=body_part,\n is_impressive_assist=impressive_assist, is_impressive_goal=impressive_goal, assist_id=assist)\n return redirect(f\"/tracker/play_game/{match_id}\")\n else: \n context = {\n 'match': match,\n 'events': [event for event in match.get_events_sorted()],\n 'create_corner_form': create_corner_form,\n 'create_shot_form': create_shot_form,\n 'create_goal_form': create_goal_form,\n 'match_result_form': match_result_form\n }\n return render(request, \"tracker/play_game.html\", context=context)", "repo_name": "ArthurXu17/Fifa-Stat-Tracker", "sub_path": "fifa_stat_tracker/tracker/views/play_game_view.py", "file_name": "play_game_view.py", "file_ext": "py", "file_size_in_byte": 2924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 12, "usage_type": "call"}, {"api_name": "tracker.models.match.Match", "line_number": 12, "usage_type": "argument"}, {"api_name": "tracker.forms.create_corner.CreateCornerForm", "line_number": 13, "usage_type": "call"}, {"api_name": "tracker.forms.create_shot_form.CreateShotForm", "line_number": 14, "usage_type": "call"}, {"api_name": "tracker.forms.create_goal_form.CreateGoalForm", "line_number": 15, "usage_type": "call"}, {"api_name": "tracker.forms.match_result_form.MatchResultForm", "line_number": 16, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 21, "usage_type": "call"}, {"api_name": "tracker.models.corner.Corner.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "tracker.models.corner.Corner.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "tracker.models.corner.Corner", "line_number": 24, "usage_type": "name"}, {"api_name": "tracker.models.shot.Shot.objects.create", "line_number": 32, "usage_type": "call"}, {"api_name": "tracker.models.shot.Shot.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tracker.models.shot.Shot", "line_number": 32, "usage_type": "name"}, {"api_name": "tracker.models.goal.Goal.objects.create", "line_number": 42, "usage_type": "call"}, {"api_name": "tracker.models.goal.Goal.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tracker.models.goal.Goal", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "70059846246", "text": "# Libraries\nimport time\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\nimport os\nimport pyinotify\nimport paramiko\nimport torch\nimport textwrap\n\n#-----------------------------------LOAD TOKENIZER AND MODEL----------------------------------------\ntokenizer = AutoTokenizer.from_pretrained(\"WizardLM/WizardCoder-15B-V1.0\")\nmodel = AutoModelForCausalLM.from_pretrained(\"WizardLM/WizardCoder-15B-V1.0\", torch_dtype=torch.float16, device_map='sequential', max_memory={0: '49 GiB'}, revision='main', low_cpu_mem_usage = True, offload_folder='offload')\nprint(\"Model Loaded\")\n\n#Path to the necessary files\nmainFolder = '/home/pragya/DesktopCode/'\nuserTask_file_path = mainFolder + 'UserTask.txt'\npromptStructure_file_path = mainFolder + 'PromptStructure.txt'\ncode_file_path = mainFolder + 'LLM_generated_code.py'\n\n#the directory that the EventHandler should monitor for changes\ndir_to_watch = os.path.abspath(mainFolder)\nwatcher_manager = pyinotify.WatchManager()\n\n#----------------------------- DEFINE THE EVENT HANDLER ---------------\nclass EventHandler(pyinotify.ProcessEvent):\n '''\n def process_IN_MODIFY(self, event):\n file_path = os.path.join(event.path, event.name)\n if file_path == prompt_file_path:\n print(f\"File: {prompt_file_path} is being modified...\")\n '''\n def process_IN_CLOSE_WRITE(self, event):\n file_path = os.path.join(event.path, event.name)\n if file_path == userTask_file_path:\n # Process the file update event\n cur_time = time.ctime(time.time())\n print(f\"User Task received\")\n central_loop()\n \n#----------------------------- AUXILIARY FUNCTIONS ------------------\ndef write_to_comp():\n '''\n Sends the code generated by the LLM (Wizard Coder) to the Nvidia Jetson to execute.\n '''\n \n hostname = '192.168.50.211'\n username = 'nesl'\n password = 'nesl'\n\n local_path = code_file_path\n remote_path = '/home/nesl/JetsonCode/desktopTransferredCode.py'\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n try:\n ssh.connect(hostname, username=username, password=password)\n scp = ssh.open_sftp()\n scp.put(local_path, remote_path)\n print(\"File transferred successfully\")\n finally:\n ssh.close()\n\ndef generate_code():\n '''\n First, it creates the prompt by appending inserting the user input into the general prompt file with the APIs and other descriptions.\n Then it passes it into the LLM to generate an output. Upon receiving the output, it writes only the code portion of the output into a .py file.\n '''\n \n print(\"Reading prompt.\")\n # read from prompt file\n try:\n with open(promptStructure_file_path, 'r', encoding='UTF-8') as prompt_file:\n prompt = prompt_file.read()\n \n with open (userTask_file_path, 'r', encoding='UTF-8') as task_file:\n \tuserTask = task_file.read()\n \t\n except Exception as e:\n print(\"Error when reading files:\", str(e))\n exit(e)\n \n if userTask is None:\n with open(code_file_path, 'w', encoding='UTF-8') as code_file:\n code_file.write(\"\")\n return\n \n if userTask[0] == ' ':\n \tuserTask = userTask[1:]\n #Lowercase the first letter of the first word in the usertask\n userTask = userTask[0].lower() + userTask[1:]\n \n #Remove new line characters from userTask string\n userTask = userTask.replace(\"\\n\", \"\")\n #Replace holder for user task with actual user task\n prompt = prompt.replace('', userTask)\n \n start_time = time.time()\n # tokenize prompt and model generate\n print(\"Running model.\")\n try:\n inputs = tokenizer(prompt, return_tensors=\"pt\").to(\"cuda\").input_ids\n #outputs = model.generate(inputs, pad_token_id = tokenizer.pad_token_id, bos_token_id = tokenizer.bos_token_id, eos_token_id = tokenizer.eos_token_id,max_new_tokens = 10000, temperature=0.2, do_sample=True, top_k=15, top_p=0.95)\n outputs = model.generate(inputs, pad_token_id = tokenizer.pad_token_id, bos_token_id = tokenizer.bos_token_id, eos_token_id = tokenizer.eos_token_id,max_new_tokens = 10000, temperature=0.2, do_sample=False)\n except Exception as e:\n print(\"Error when tokenize input the generate output:\", str(e))\n exit(e)\n\n # decode output\n try:\n code = tokenizer.batch_decode(outputs, skip_special_tokens=True)\n except Exception as e:\n print(\"Error when decoding:\", str(e))\n exit(e)\n\t\n end_time = time.time()\n # save to code file\n try:\n with open(code_file_path, 'w', encoding='UTF-8') as code_file:\n for item in code:\n print(item)\n first_index = item.find(\"```python\")\n if first_index == -1:\n continue\n first_index += len(\"```python\")\n last_index = item[first_index:].find(\"```\") + first_index\n code_file.write(str(item[first_index:last_index]) + '\\n')\n except Exception as e:\n print(\"Error when write file LLM_genereated_code_test.py:\", str(e))\n exit(e)\n time_used = end_time - start_time\n print(\"Finish Generating Code:\", time_used)\n\n#----------------------------- MAIN LOOPED FUNCTION ------------------\ndef central_loop():\n '''\n First, it runs the LLM with the prompt given and writes the Python code portion of the output to a file using the generate_code function.\n Then it sends the generated Python code file to the Jetson using the write_to_comp function.\n '''\n\n generate_code()\n write_to_comp()\n\n\n#----------------------------- MODEL BOOT UP ------------------\n\ndef initialModelBootUp():\n bootPrompts_path = mainFolder + 'BootUpPrompts.txt'\n \n with open(bootPrompts_path, 'r', encoding='UTF-8') as file:\n prompts = file.readlines()\n \n # Strip newline characters and create a list of lines\n prompt_list = [prompt.strip() for prompt in prompts]\n \n for prompt in prompt_list:\n with open(userTask_file_path, 'w') as file:\n file.write(prompt)\n generate_code()\n \n print(\"BootUp Files successfully run.\")\n \n#initialModelBootUp()\n\n#----------------------------- ACTIVATING LOOP ------------------\n# Add the directory to the watcher\n# watch_mask = pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE\nwatch_mask = pyinotify.IN_CLOSE_WRITE\nwatcher_manager.add_watch(dir_to_watch, watch_mask)\n\n# Create the notifier and associate it with the watcher and event handler\nnotifier = pyinotify.Notifier(watcher_manager, EventHandler())\n\n# Start monitoring for file changes\ntry:\n print(f\"Awaiting User Task\")\n notifier.loop()\nexcept KeyboardInterrupt:\n # Exit gracefully when interrupted by Ctrl+C\n notifier.stop()\n print(\"Monitoring stopped\")\n", "repo_name": "nesl/LLM_Based_Robots", "sub_path": "Main_System_Code/DesktopCode/WizardCoder_CodeGen.py", "file_name": "WizardCoder_CodeGen.py", "file_ext": "py", "file_size_in_byte": 6876, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "transformers.AutoTokenizer.from_pretrained", "line_number": 11, "usage_type": "call"}, {"api_name": "transformers.AutoTokenizer", "line_number": 11, "usage_type": "name"}, {"api_name": "transformers.AutoModelForCausalLM.from_pretrained", "line_number": 12, "usage_type": "call"}, {"api_name": "transformers.AutoModelForCausalLM", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.float16", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pyinotify.WatchManager", "line_number": 23, "usage_type": "call"}, {"api_name": "pyinotify.ProcessEvent", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "time.ctime", "line_number": 37, "usage_type": "call"}, {"api_name": "time.time", "line_number": 37, "usage_type": "call"}, {"api_name": "paramiko.SSHClient", "line_number": 54, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "pyinotify.IN_CLOSE_WRITE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pyinotify.Notifier", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "40447511007", "text": "from odoo.addons.web.controllers import database\nimport odoo\nfrom lxml import html\nfrom odoo.tools.misc import file_open\nfrom odoo import http\nfrom odoo.http import request\nfrom odoo.addons.base.models.ir_qweb import render as qweb_render\n\nDBNAME_PATTERN = '^[a-zA-Z0-9][a-zA-Z0-9_.-]+$'\n\n\nclass Database(database.Database):\n \"\"\"A class that represents a database.\n\n This class inherits from `database.Database` and provides additional functionality for managing and rendering database templates.\n\n Attributes:\n None\n \"\"\"\n\n def _render_template(self, **d):\n \"\"\"Render the database template with the given data.\n\n Args:\n **d: The data to render the template with.\n\n Returns:\n str: The rendered database template.\n \"\"\"\n d.setdefault('manage', True)\n d['insecure'] = odoo.tools.config.verify_admin_password('admin')\n d['list_db'] = odoo.tools.config['list_db']\n d['langs'] = odoo.service.db.exp_list_lang()\n d['countries'] = odoo.service.db.exp_list_countries()\n d['pattern'] = DBNAME_PATTERN\n # databases delete protection\n db_to_restrict_delete = odoo.tools.config.get('db_delete_restrict',\n False)\n if db_to_restrict_delete:\n databases_restrict_delete = db_to_restrict_delete.replace(\" \", \"\")\n d['delete_restrict'] = databases_restrict_delete.split(',')\n # databases list\n try:\n d['databases'] = http.db_list()\n d['incompatible_databases'] = odoo.service.db.list_db_incompatible(\n d['databases'])\n except odoo.exceptions.AccessDenied:\n d['databases'] = [request.db] if request.db else []\n\n templates = {}\n\n with file_open(\n \"database_delete_protection/static/src/public/database_manager.qweb.html\",\n \"r\") as fd:\n templates['database_manager'] = fd.read()\n with file_open(\n \"web/static/src/public/database_manager.master_input.qweb.html\",\n \"r\") as fd:\n templates['master_input'] = fd.read()\n with file_open(\n \"web/static/src/public/database_manager.create_form.qweb.html\",\n \"r\") as fd:\n templates['create_form'] = fd.read()\n\n def load(template_name):\n fromstring = html.document_fromstring if template_name == 'database_manager' else html.fragment_fromstring\n return (fromstring(templates[template_name]), template_name)\n\n return qweb_render('database_manager', d, load)\n", "repo_name": "CybroOdoo/CybroAddons", "sub_path": "database_delete_protection/controller/database_delete_protection.py", "file_name": "database_delete_protection.py", "file_ext": "py", "file_size_in_byte": 2673, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 204, "dataset": "github-code", "pt": "52", "api": [{"api_name": "odoo.addons.web.controllers.database.Database", "line_number": 12, "usage_type": "attribute"}, {"api_name": "odoo.addons.web.controllers.database", "line_number": 12, "usage_type": "name"}, {"api_name": "odoo.tools.config.verify_admin_password", "line_number": 31, "usage_type": "call"}, {"api_name": "odoo.tools", "line_number": 31, "usage_type": "attribute"}, {"api_name": "odoo.tools", "line_number": 32, "usage_type": "attribute"}, {"api_name": "odoo.service.db.exp_list_lang", "line_number": 33, "usage_type": "call"}, {"api_name": "odoo.service", "line_number": 33, "usage_type": "attribute"}, {"api_name": "odoo.service.db.exp_list_countries", "line_number": 34, "usage_type": "call"}, {"api_name": "odoo.service", "line_number": 34, "usage_type": "attribute"}, {"api_name": "odoo.tools.config.get", "line_number": 37, "usage_type": "call"}, {"api_name": "odoo.tools", "line_number": 37, "usage_type": "attribute"}, {"api_name": "odoo.http.db_list", "line_number": 44, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 44, "usage_type": "name"}, {"api_name": "odoo.service.db.list_db_incompatible", "line_number": 45, "usage_type": "call"}, {"api_name": "odoo.service", "line_number": 45, "usage_type": "attribute"}, {"api_name": "odoo.exceptions", "line_number": 47, "usage_type": "attribute"}, {"api_name": "odoo.http.request.db", "line_number": 48, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 48, "usage_type": "name"}, {"api_name": "odoo.tools.misc.file_open", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.tools.misc.file_open", "line_number": 56, "usage_type": "call"}, {"api_name": "odoo.tools.misc.file_open", "line_number": 60, "usage_type": "call"}, {"api_name": "lxml.html.document_fromstring", "line_number": 66, "usage_type": "attribute"}, {"api_name": "lxml.html", "line_number": 66, "usage_type": "name"}, {"api_name": "lxml.html.fragment_fromstring", "line_number": 66, "usage_type": "attribute"}, {"api_name": "odoo.addons.base.models.ir_qweb.render", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "14385072101", "text": "import pytest\nimport flet as ft\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom flet_mvc import FletModel, data\nfrom data.component_attribute import component_value_attr_map, potential_attributes\n\n\n# define your model\nclass MockModel(FletModel):\n @data\n def datapoint(self):\n return \"\"\n\n @data\n def datapoint_list(self):\n return []\n\n @data\n def ref_datapoint(self):\n return \"Datapoint initial value\"\n\n @data.RefOnly\n def ref_only_datapoint(self):\n return None\n\n\n# Test DataPoint initialization\ndef test_datapoint_init():\n model = MockModel()\n assert isinstance(model.datapoint, data)\n\n\n# Test DataPoint set, get and reset value\ndef test_datapoint_set_get():\n model = MockModel()\n model.datapoint.set_value(\"Test Value\")\n assert model.datapoint() == \"Test Value\"\n model.datapoint.reset()\n assert model.datapoint() == \"\"\n\n\n# Test DataPoint append value\ndef test_datapoint_append():\n model = MockModel()\n assert not model.datapoint_list.has_set_value()\n\n model.datapoint_list().append(\"Test Value\") # normal append won't modify has_set_value attr.\n assert not model.datapoint_list.has_set_value()\n assert model.datapoint_list() == [\"Test Value\"]\n\n model.datapoint_list.set_value([\"Test Value\"])\n assert model.datapoint_list.has_set_value()\n\n model.datapoint_list.reset()\n assert not model.datapoint_list.has_set_value()\n assert not model.datapoint_list()\n\n\n# Test DataPoint logical operations\ndef test_datapoint_logical():\n model = MockModel()\n model.datapoint.set_value(None)\n assert not model.datapoint() # Should evaluate to False\n model.datapoint.set_value(\"Test Value\")\n assert model.datapoint() # Should evaluate to True\n\n\n# Test DataPoint logical operations\ndef test_datapoint_ref_only():\n model = MockModel()\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint()\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.set_value()\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.append(1)\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.value\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.value = 1\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.set_default()\n\n with pytest.raises(TypeError):\n model.ref_only_datapoint.reset()\n\n\ndef test_datapoint_ref_only2():\n model = MockModel()\n assert not model.ref_only_datapoint.current\n ft.Text(ref=model.ref_only_datapoint)\n assert model.ref_only_datapoint.current\n\n\n# Test that we can set the initial value of a component using a Ref object\n@pytest.mark.parametrize(\"component, value_attr\", component_value_attr_map.items())\ndef test_initial_value(component, value_attr):\n model = MockModel()\n\n if potential_attributes[value_attr] == list:\n value = [\"item1\", \"item2\"]\n new_default = [\"default1\", \"default2\"]\n elif potential_attributes[value_attr] == str:\n value = \"Initial value\"\n new_default = \"Default\"\n else:\n value = ft.Text(\"test\") # Default case\n new_default = ft.Text(\"new_default\")\n\n model.ref_datapoint.set_default(new_default)\n kwargs = {value_attr: value}\n\n component_instance = component(ref=model.ref_datapoint, **kwargs)\n\n assert getattr(component_instance, value_attr) == value\n model.ref_datapoint.reset()\n assert getattr(component_instance, value_attr) == new_default\n model.ref_datapoint.set_value(value)\n assert getattr(component_instance, value_attr) == value\n\n model.ref_datapoint.__hard_reset__()\n", "repo_name": "o0Adrian/flet-mvc", "sub_path": "tests/test_datapoints.py", "file_name": "test_datapoints.py", "file_ext": "py", "file_size_in_byte": 3666, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "flet_mvc.FletModel", "line_number": 11, "usage_type": "name"}, {"api_name": "flet_mvc.data", "line_number": 12, "usage_type": "name"}, {"api_name": "flet_mvc.data", "line_number": 16, "usage_type": "name"}, {"api_name": "flet_mvc.data", "line_number": 20, "usage_type": "name"}, {"api_name": "flet_mvc.data.RefOnly", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flet_mvc.data", "line_number": 24, "usage_type": "name"}, {"api_name": "flet_mvc.data", "line_number": 32, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 74, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 77, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 80, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 83, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 89, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 92, "usage_type": "call"}, {"api_name": "flet.Text", "line_number": 99, "usage_type": "call"}, {"api_name": "data.component_attribute.potential_attributes", "line_number": 108, "usage_type": "name"}, {"api_name": "data.component_attribute.potential_attributes", "line_number": 111, "usage_type": "name"}, {"api_name": "flet.Text", "line_number": 115, "usage_type": "call"}, {"api_name": "flet.Text", "line_number": 116, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 104, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 104, "usage_type": "attribute"}, {"api_name": "data.component_attribute.component_value_attr_map.items", "line_number": 104, "usage_type": "call"}, {"api_name": "data.component_attribute.component_value_attr_map", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "41493786594", "text": "import os\nimport sys\nimport re\nimport fnmatch\n\nif sys.version_info < (2, 7):\n # Python < 2.7 doesn't have the cmp_to_key function.\n from baboon.common.utils import cmp_to_key\nelse:\n from functools import cmp_to_key\n\nfrom baboon.baboon.monitor import EventHandler\nfrom baboon.common.errors.baboon_exception import BaboonException\n\n\nclass EventHandlerGit(EventHandler):\n def __init__(self, project_path):\n super(EventHandlerGit, self).__init__(project_path)\n\n # My ignore file name is...\n self.gitignore_path = os.path.join(project_path, '.gitignore')\n\n # Lists of compiled RegExp objects\n self.include_regexps = []\n self.exclude_regexps = []\n\n # Update those lists\n self._populate_gitignore_items()\n\n @property\n def scm_name(self):\n return 'git'\n\n def exclude(self, rel_path):\n # First, check if the modified file is the gitignore file. If it's the\n # case, update include/exclude paths lists.\n if rel_path == self.gitignore_path:\n self._populate_gitignore_items()\n\n # Return True only if rel_path matches an exclude pattern AND does NOT\n # match an include pattern. Else, return False\n if (self._match_excl_regexp(rel_path) and\n not self._match_incl_regexp(rel_path)):\n\n return True\n\n return False\n\n def on_modified(self, event):\n \"\"\"\n \"\"\"\n\n rel_path = os.path.relpath(event.src_path, self.project_path)\n if rel_path == '.gitignore':\n # Reparse the gitignore.\n self._populate_gitignore_items()\n\n super(EventHandlerGit, self).on_modified(event)\n\n def _populate_gitignore_items(self):\n \"\"\" This method populates include and exclude lists with\n compiled regexps objects.\n \"\"\"\n\n # Reset the include_regexps and exclude_regexps.\n self.include_regexps = []\n self.exclude_regexps = [re.compile('.*\\.git/.*\\.lock'),\n re.compile('.*\\.baboon-timestamp'),\n re.compile('.*baboon.*')]\n\n # If there's a .gitignore file in the watched directory.\n if os.path.exists(self.gitignore_path):\n # Parse the gitignore.\n ignores = self._parse_gitignore()\n if ignores is not None:\n # Populate the regexps list with the ignores result.\n self.include_regexps += [re.compile(x) for x in ignores[0]]\n self.exclude_regexps += [re.compile(x) for x in ignores[1]]\n\n def _match_excl_regexp(self, rel_path):\n \"\"\" Returns True if rel_path matches any item in\n exclude_regexp list.\n \"\"\"\n\n for regexp in self.exclude_regexps:\n if regexp.search(rel_path) is not None:\n self.logger.debug(\"The path %s matches the ignore regexp\"\n \" %s.\" % (rel_path, regexp.pattern))\n return True\n\n return False\n\n def _match_incl_regexp(self, rel_path):\n \"\"\" Returns True if rel_path matches any item in\n include_regexp list.\n \"\"\"\n\n for neg_regexp in self.include_regexps:\n if neg_regexp.search(rel_path) is not None:\n self.logger.debug(\"The same path %s matches the include\"\n \" regexp %s.\" % (rel_path,\n neg_regexp.pattern))\n return True\n\n return False\n\n def _parse_gitignore(self):\n \"\"\" Parses the .gitignore file in the repository.\n Returns a tuple with:\n 1st elem: negative regexps (regexps to not match)\n 2nd elem: regexps\n \"\"\"\n gitignore_path = os.path.join(self.project_path, '.gitignore')\n lines = [] # contains each line of the .gitignore file\n results = [] # contains the result regexp patterns\n neg_results = [] # contains the result negative regexp patterns\n\n try:\n with open(gitignore_path, 'r') as f:\n lines = f.readlines()\n except IOError as err:\n raise BaboonException(format(err))\n\n # Sort the line in order to have inverse pattern first\n lines = sorted(lines, key=cmp_to_key(self._gitline_comparator))\n\n # For each git pattern, convert it to regexp pattern\n for line in lines:\n regexp = self._gitline_to_regexp(line)\n if regexp is not None:\n if not line.startswith('!'):\n results.append(regexp)\n else:\n neg_results.append(regexp)\n\n return neg_results, results\n\n def _gitline_comparator(self, a, b):\n \"\"\" Compares a and b. I want to have pattern started with '!'\n firstly\n \"\"\"\n if a.startswith('!'):\n return -1\n elif b.startswith('!'):\n return 1\n else:\n return a == b\n\n def _gitline_to_regexp(self, line):\n \"\"\" Convert the unix pattern (line) to a regex pattern\n \"\"\"\n negation = False # if True, inverse the pattern\n\n # Remove the dirty characters like spaces at the beginning\n # or at the end, carriage returns, etc.\n line = line.strip()\n\n # A blank line matches no files, so it can serve as a\n # separator for readability.\n if line == '':\n return\n\n # A line starting with # serves as a comment.\n if line.startswith('#'):\n return\n\n # An optional prefix ! which negates the pattern; any\n # matching file excluded by a previous pattern will become\n # included again. If a negated pattern matches, this will\n # override\n if line.startswith('!'):\n line = line[1:]\n negation = True\n\n # If the pattern does not contain a slash /, git treats it\n # as a shell glob pattern and checks for a match against\n # the pathname relative to the location of the .gitignore\n # file (relative to the toplevel of the work tree if not\n # from a .gitignore file).\n\n # Otherwise, git treats the pattern as a shell glob\n # suitable for consumption by fnmatch(3) with the\n # FNM_PATHNAME flag: wildcards in the pattern will not\n # match a / in the pathname. For example,\n # \"Documentation/*.html\" matches \"Documentation/git.html\"\n # but not \"Documentation/ppc/ppc.html\" or\n # \"tools/perf/Documentation/perf.html\".\n regex = fnmatch.translate(line)\n regex = regex.replace('\\\\Z(?ms)', '')\n\n if not negation:\n regex = '.*%s.*' % regex\n\n return regex\n", "repo_name": "SeyZ/baboon", "sub_path": "baboon/baboon/plugins/git/monitor_git.py", "file_name": "monitor_git.py", "file_ext": "py", "file_size_in_byte": 6691, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 119, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.version_info", "line_number": 6, "usage_type": "attribute"}, {"api_name": "baboon.baboon.monitor.EventHandler", "line_number": 16, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 67, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 68, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 77, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "baboon.common.errors.baboon_exception.BaboonException", "line_number": 122, "usage_type": "call"}, {"api_name": "functools.cmp_to_key", "line_number": 125, "usage_type": "call"}, {"api_name": "fnmatch.translate", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "73521323364", "text": "import os\nos.environ[\"LRU_CACHE_CAPACITY\"] = \"3\"# reduces RAM usage massively with pytorch 1.4 or older\nimport time\nimport argparse\nimport math\nfrom numpy import finfo\nimport numpy as np\n\nimport torch\nfrom distributed import DistributedDataParallel\nfrom torch.utils.data.distributed import DistributedSampler\nimport torch.distributed as dist\nfrom torch.nn import DataParallel\nfrom torch.utils.data import DataLoader\n\nfrom fp16_optimizer import FP16_Optimizer\n\nfrom model import load_model\nfrom model import Tacotron2\nfrom data_utils import TextMelLoader, TextMelCollate\nfrom hparams import create_hparams\nfrom train import init_distributed\n\nimport time\n\n\nclass StreamingMovingAverage:\n def __init__(self, window_size):\n self.window_size = window_size\n self.values = []\n self.sum = 0\n \n def process(self, value):\n self.values.append(value)\n self.sum += value\n if len(self.values) > self.window_size:\n self.sum -= self.values.pop(0)\n return float(self.sum) / len(self.values)\n\n\ndef init_distributed(hparams, n_gpus, rank, group_name):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n print(\"Initializing distributed\")\n # Set cuda device so everything is done on the right GPU.\n torch.cuda.set_device(rank % torch.cuda.device_count())\n # Initialize distributed communication\n torch.distributed.init_process_group(\n backend=hparams.dist_backend, init_method=hparams.dist_url,\n world_size=n_gpus, rank=rank, group_name=group_name)\n print(\"Done initializing distributed\")\n\n\ndef prepare_dataloaders(hparams, audio_offset=0):\n # Get data, data loaders and collate function ready\n trainset = TextMelLoader(hparams.training_files, hparams, TBPTT=False, check_files=False, verbose=True, audio_offset=audio_offset)\n collate_fn = TextMelCollate(hparams)\n \n if hparams.distributed_run:\n train_sampler = DistributedSampler(trainset, shuffle=False)\n else:\n train_sampler = None\n \n train_loader = DataLoader(trainset, num_workers=hparams.num_workers, shuffle=False,\n sampler=train_sampler,\n batch_size=hparams.batch_size, pin_memory=False, # default pin_memory=False, True should allow async memory transfers # Causes very random CUDA errors (after like 4+ hours)\n drop_last=True, collate_fn=collate_fn)\n return train_loader, None, collate_fn, train_sampler, trainset\n\n\ndef warm_start_model(checkpoint_path, model):\n assert os.path.isfile(checkpoint_path)\n print(\"Warm starting model from checkpoint '{}'\".format(checkpoint_path))\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n model_dict = model.state_dict()\n pretrained_dict = checkpoint_dict['state_dict']\n filtered_dict = {k: v for k,v in pretrained_dict.items() if k in model_dict and pretrained_dict[k].shape == model_dict[k].shape}\n model_dict_missing = {k: v for k,v in pretrained_dict.items() if k not in model_dict}\n print(\"model_dict_missing.keys() =\", model_dict_missing.keys())\n model.load_state_dict(filtered_dict)\n return model\n\n\ndef get_global_mean(data_loader, global_mean_npy, hparams):\n if global_mean_npy and os.path.exists(global_mean_npy):\n global_mean = np.load(global_mean_npy)\n return (torch.tensor(global_mean).half()).cuda() if hparams.fp16_run else (torch.tensor(global_mean).float()).cuda()\n else:\n raise Exception(\"No global_mean.npy found while in training_mode.\")\n return global_mean\n\n\ndef get_durations(alignments, output_lengths, input_lengths):\n batch_durations = []\n for alignment, output_length, input_length in zip(alignments, output_lengths, input_lengths):\n alignment = alignment[:output_length, :input_length]\n dur_frames = torch.histc(torch.argmax(alignment, dim=1).float(), min=0, max=input_length-1, bins=input_length)# number of frames each letter taken the maximum focus of the model.\n assert dur_frames.sum().item() == output_length, f'{dur_frames.sum().item()} != {output_length}'\n batch_durations.append(dur_frames)\n return batch_durations# [[enc_T], [enc_T], [enc_T], ...]\n\n\ndef get_alignments(alignments, output_lengths, input_lengths):\n alignments_arr = []\n for alignment, output_length, input_length in zip(alignments, output_lengths, input_lengths):\n alignment = alignment[:output_length, :input_length]\n alignments_arr.append(alignment)\n return alignments_arr# [[dec_T, enc_T], [dec_T, enc_T], [dec_T, enc_T], ...]\n\n\n@torch.no_grad()\ndef GTA_Synthesis(output_directory, checkpoint_path, n_gpus,\n rank, group_name, hparams, training_mode, verify_outputs, use_val_files, use_hidden_state, fp16_save, max_mse, max_mae, args=None, extra_info='', audio_offset=0):\n \"\"\"Generate Ground-Truth-Aligned Spectrograms for Training WaveGlow.\"\"\"\n if audio_offset:\n hparams.load_mel_from_disk = False\n torch.manual_seed(hparams.seed)\n torch.cuda.manual_seed(hparams.seed)\n \n if use_val_files:\n filelisttype = \"val\"\n hparams.training_files = hparams.validation_files\n else:\n filelisttype = \"train\"\n \n train_loader, _, collate_fn, train_sampler, train_set = prepare_dataloaders(hparams, audio_offset=audio_offset)\n \n if training_mode and hparams.drop_frame_rate > 0.:\n if rank != 0: # if global_mean not yet calcuated, wait for main thread to do it\n while not os.path.exists(hparams.global_mean_npy): time.sleep(1)\n global_mean = get_global_mean(train_loader, hparams.global_mean_npy, hparams)\n hparams.global_mean = global_mean\n \n model = load_model(hparams)\n # Load checkpoint if one exists\n assert checkpoint_path is not None\n if checkpoint_path is not None:\n model = warm_start_model(checkpoint_path, model)\n \n if training_mode:\n model.train()\n else:\n model.eval()\n \n if hparams.fp16_run:\n model = amp.initialize(model, opt_level='O2')\n \n model.decoder.dump_attention_weights = False if (args.save_letter_durations or args.save_phone_durations) else True # hidden param to dump attention weights\n \n # ================ MAIN TRAINNIG LOOP! ===================\n os.makedirs(os.path.join(output_directory), exist_ok=True)\n f = open(os.path.join(output_directory, f'map_{filelisttype}_{rank}.txt'),'a', encoding='utf-8')\n os.makedirs(os.path.join(output_directory,'mels'), exist_ok=True)\n \n total_number_of_data = len(train_set.audiopaths_and_text)\n max_itter = total_number_of_data//(hparams.batch_size*n_gpus)\n remainder_size = (total_number_of_data % (hparams.batch_size*n_gpus))\n remainder_size = min(remainder_size-(rank*hparams.batch_size), hparams.batch_size)\n \n processed_files = 0\n failed_files = 0\n duration = time.time()\n total = len(train_loader)\n rolling_sum = StreamingMovingAverage(100)\n for i, batch in enumerate(train_loader):\n last_batch = i == max_itter\n batch_size = remainder_size if last_batch else hparams.batch_size\n \n # get wavefile path\n batch_start = (i*hparams.batch_size*n_gpus) + rank\n batch_end = ((i+1)*hparams.batch_size*n_gpus) + rank\n audiopaths_and_text = train_set.audiopaths_and_text[batch_start:batch_end][::n_gpus]\n audiopaths = [x[0] for x in audiopaths_and_text] # filelist\n speaker_ids = [x[2] for x in audiopaths_and_text] # filelist\n \n # get len texts\n indx_list = np.arange(batch_start, batch_end, n_gpus).tolist()\n len_text_list = []\n for batch_index in indx_list:\n text, *_ = train_set.get_mel_text_pair(batch_index,\n ignore_emotion=1, ignore_speaker=1, ignore_torchmoji=1, ignore_sylps=1, ignore_mel=1)\n len_text_list.append(text.size(0))\n \n _, input_lengths, _, _, output_lengths, *_, index = batch # output_lengths: original mel length\n input_lengths_, ids_sorted_decreasing = torch.sort(torch.LongTensor(len_text_list), dim=0, descending=True)\n assert (input_lengths_ == input_lengths).all().item(), 'Error loading text lengths! Text Lengths from Dataloader do not match Text Lengths from GTA.py'\n ids_sorted_decreasing = ids_sorted_decreasing.numpy() # ids_sorted_decreasing, original index\n \n sorted_audiopaths, sorted_mel_paths, sorted_speaker_ids = [], [], [] # original_file_name\n for k in range(batch_size):\n sorted_audiopath = audiopaths[ids_sorted_decreasing[k]]\n sorted_audiopaths.append(sorted_audiopath)\n sorted_mel_paths.append( sorted_audiopath.replace(\".npy\",\".mel\").replace('.wav','.mel'))\n \n sorted_speaker_id = speaker_ids[ids_sorted_decreasing[k]]\n sorted_speaker_ids.append(sorted_speaker_id)\n \n x, _ = model.parse_batch(batch)\n mel_outputs, mel_outputs_postnet, _, alignments, *_, additional = model(x, teacher_force_till=9999, p_teacher_forcing=1.0, drop_frame_rate=0.0, p_emotionnet_embed=1.0, return_hidden_state=use_hidden_state)\n if use_hidden_state:\n hidden_att_contexts = additional[0]# [[B, dim],] -> [B, dim]\n hidden_att_contexts = hidden_att_contexts.data.cpu()\n if args.save_letter_encoder_outputs or args.save_phone_encoder_outputs:\n memory = additional[1] # [B, enc_T, mem_dim][B, dim]\n memory = memory.data.cpu()\n if args.save_letter_durations or args.save_phone_durations:\n alignments = alignments.data.cpu()\n print(alignments.shape)\n durations = get_durations(alignments, output_lengths, input_lengths)\n if args.save_letter_alignments or args.save_phone_alignments:\n alignments = alignments.data.cpu()\n print(alignments.shape)\n alignments = get_alignments(alignments, output_lengths, input_lengths)\n if mel_outputs_postnet is None:\n mel_outputs_postnet = mel_outputs\n mel_outputs_postnet = mel_outputs_postnet.data.cpu()\n \n for k in range(batch_size):\n wav_path = sorted_audiopaths[k].replace(\".npy\",\".wav\")\n hidden_path = wav_path.replace(\".wav\",\".hdn\")\n mel = mel_outputs_postnet[k,:,:output_lengths[k]]\n mel_shape = list(mel[:model.n_mel_channels, :].shape)\n mel_path = sorted_mel_paths[k]\n speaker_id = sorted_speaker_ids[k]\n \n offset_append = '' if audio_offset == 0 else str(audio_offset)\n save_path = mel_path+offset_append+'.npy' # ext = '.mel.npy' or '.mel1.npy' ... '.mel599.npy'\n save_path_hidden = hidden_path+offset_append+'.npy' if use_hidden_state else '' # ext = '.hdn.npy' or '.hdn1.npy' ... '.hdn599.npy'\n \n if verify_outputs or max_mse or max_mae:\n gt_mel = train_set.get_mel(wav_path.replace('.wav','.npy')) if train_set.load_mel_from_disk else train_set.get_mel(wav_path)\n orig_shape = list(gt_mel.shape)\n MAE = torch.nn.functional.l1_loss(mel[:model.n_mel_channels, :], gt_mel).item()\n MSE = torch.nn.functional.mse_loss(mel[:model.n_mel_channels, :], gt_mel).item()\n # check mel from wav_path has same shape as mel just saved\n if max_mse and MSE > max_mse:\n failed_files+=1\n print(f\"MSE ({MSE}) is greater than max MSE ({max_mse}).\\nFilepath: '{wav_path}'\\n\")\n continue\n if max_mae and MAE > max_mae:\n failed_files+=1\n print(f\"MAE ({MAE}) is greater than max MAE ({max_mae}).\\nFilepath: '{wav_path}'\\n\")\n continue\n else:\n MSE = MAE = orig_shape = 'N/A'\n \n if orig_shape == 'N/A' or orig_shape == mel_shape:\n processed_files+=1\n else:\n failed_files+=1\n print(f\"Target shape {orig_shape} does not match generated mel shape {mel_shape}.\\nFilepath: '{wav_path}'\\n\")\n continue\n \n print(f\"PATH: '{wav_path}'\\nText Length: {input_lengths[k].item()}\\nMel Shape:{mel_shape}\\nSpeaker_ID: {speaker_id}\\nTarget Shape: {orig_shape}\\nMSE: {MSE}\\nMAE: {MAE}\")\n \n if not args.do_not_save_mel:\n mel = mel.numpy()\n mel = mel.astype(np.float16) if fp16_save else mel\n np.save(save_path, mel)\n save_path_hidden = duration_path = save_path_enc_out = ''\n if use_hidden_state:\n hidden_att_context = hidden_att_contexts[k,:,:output_lengths[k]]\n hidden_att_context = hidden_att_context.numpy()\n hidden_att_context = hidden_att_context.astype(np.float16) if fp16_save else hidden_att_context\n np.save(save_path_hidden, hidden_att_context)\n if args.save_letter_durations and hparams.p_arpabet == 0.:\n durs = durations[k]\n print(f\"durs.std() = {durs.std()}, durs.mean() = {durs.mean()}, durs.max()/len = {durs.max()/orig_shape[1]}, durs.min() = {durs.min()}, durs.topk(5)[0] = {durs.topk(min(5, durs.view(-1).shape[0]))[0]}\")\n durs = durs.numpy()\n durs = durs.astype(np.float16) if fp16_save else hidden_att_context\n duration_path = wav_path.replace('.wav','_gdur.npy')\n np.save(duration_path, durs)\n if args.save_phone_durations and hparams.p_arpabet == 1.:\n durs = durations[k]\n print(f\"durs.std() = {durs.std()}, durs.mean() = {durs.mean()}, durs.max()/len = {durs.max()/orig_shape[1]}, durs.min() = {durs.min()}, durs.topk(5)[0] = {durs.topk(min(5, durs.view(-1).shape[0]))[0]}\")\n durs = durs.numpy()\n durs = durs.astype(np.float16) if fp16_save else hidden_att_context\n duration_path = wav_path.replace('.wav','_pdur.npy')\n np.save(duration_path, durs)\n if args.save_letter_encoder_outputs and hparams.p_arpabet == 0.:\n encoder_outputs = memory[k, :input_lengths[k], :]\n encoder_outputs = encoder_outputs.numpy()\n encoder_outputs = encoder_outputs.astype(np.float16) if fp16_save else hidden_att_context\n save_path_enc_out = wav_path.replace('.wav','_genc_out.npy')\n np.save(save_path_enc_out, encoder_outputs)\n if args.save_letter_alignments and hparams.p_arpabet == 0.:\n alignment = alignments[k]\n alignment = alignment.numpy()\n alignment = alignment.astype(np.float16) if fp16_save else hidden_att_context\n save_path_align_out = wav_path.replace('.wav','_galign_out.npy')\n np.save(save_path_align_out, alignment)\n if args.save_phone_alignments and hparams.p_arpabet == 1.:\n alignment = alignments[k]\n alignment = alignment.numpy()\n alignment = alignment.astype(np.float16) if fp16_save else hidden_att_context\n save_path_align_out = wav_path.replace('.wav','_palign_out.npy')\n np.save(save_path_align_out, alignment)\n if args.save_phone_encoder_outputs and hparams.p_arpabet == 1.:\n encoder_outputs = memory[k, :input_lengths[k], :]\n encoder_outputs = encoder_outputs.numpy()\n encoder_outputs = encoder_outputs.astype(np.float16) if fp16_save else hidden_att_context\n save_path_enc_out = wav_path.replace('.wav','_penc_out.npy')\n np.save(save_path_enc_out, encoder_outputs)\n \n map = f\"{wav_path}|{save_path}|{speaker_id}|{save_path_hidden}|{duration_path}|{save_path_enc_out}\\n\"\n f.write(map) # write paths to text file\n print(\"\")\n \n duration = time.time() - duration\n avg_duration = rolling_sum.process(duration)\n time_left = round(((total-i) * avg_duration)/3600, 2)\n print(f'{extra_info}{i}/{total} compute and save GTA melspectrograms in {i}th batch, {duration}s, {time_left}hrs left. {processed_files} processed, {failed_files} failed.')\n duration = time.time()\n f.close()\n \n # merge all generated filelists from every GPU\n filenames = [f'map_{filelisttype}_{j}.txt' for j in range(n_gpus)]\n if rank == 0:\n with open(os.path.join(output_directory, f'map_{filelisttype}.txt'), 'w') as outfile:\n for fname in filenames:\n with open(os.path.join(output_directory, fname)) as infile:\n for line in infile:\n if len(line.strip()):\n outfile.write(line)\n\n\nif __name__ == '__main__':\n \"\"\"\n This script will run Tacotron2 over the hparams filelist(s), and save ground truth aligned spectrograms for each file.\n In the output_directory will be a filelist that can be used to train WaveGlow/WaveFlow on the aligned tacotron outputs, which will increase audio quality when generating new text.\n \n Example:\n CUDA_VISIBLE_DEVICES=0,1,2 python3 -m multiproc GTA.py -o \"GTA_flist\" -c \"outdir/checkpoint_300000\" --extremeGTA 100 --hparams=distributed_run=True,fp16_run=True --verify_outputs --save_hidden_state --fp16_save --max_mse 0.35\n CUDA_VISIBLE_DEVICES=0,1,2 python3 -m multiproc GTA.py -o \"GTA_flist\" -c \"outdir/checkpoint_300000\" --extremeGTA 100 --hparams=distributed_run=True,fp16_run=True --verify_outputs --save_hidden_state --fp16_save --max_mse 0.35 --use_validation_files\n \n - In this example, CUDA_VISIBLE_DEVICES selects the 1st, 2nd and 3rd GPUs\n - '-o GTA_flist' is the location that the new filelist(s) will be saved\n - '-c ...' is the Tacotron2 checkpoint that will be used.\n - There are 2 commands here because both the training and validation_files are being generated.\n \n Params:\n --output_directory:\n Where to save the new filelist that is generated by this process.\n -o\n --checkpoint_path:\n Where to save the new filelist that is generated by this process.\n -c\n --extremeGTA: INT\n Example: 'python3 GTA.py -o outdir -c checkpoint_10000 --extremeGTA 100'\n Align to same file multiple times with a time offset.\n Will save `hop_length // extremeGTA` aligned copies for each file.\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output_directory', type=str,\n help='directory to save checkpoints')\n parser.add_argument('-c', '--checkpoint_path', type=str, default=None,\n required=False, help='checkpoint path')\n parser.add_argument('--n_gpus', type=int, default=1,\n required=False, help='number of gpus')\n parser.add_argument('--rank', type=int, default=0,\n required=False, help='rank of current gpu')\n parser.add_argument('--num_workers', type=int, default=8,\n required=False, help='how many processes (workers) are used to load data for each GPU. Each process will use a chunk of RAM. 24 Workers on a Threadripper 2950X is enough to feed three RTX 2080 Ti\\'s in fp16 mode.')\n parser.add_argument('--extremeGTA', type=int, default=0, required=False,\n help='Generate a Ground Truth Aligned output every interval specified. This will run tacotron hop_length//interval times per file and can use thousands of GBs of storage. Caution is advised')\n parser.add_argument('--max_mse', type=float, default=1e3, required=False,\n help='Maximum MSE from Ground Truth to be valid for saving. (Anything above this value will be discarded)')\n parser.add_argument('--max_mae', type=float, default=1e3, required=False,\n help='Maximum MAE from Ground Truth to be valid for saving. (Anything above this value will be discarded)')\n parser.add_argument('--use_training_mode', action='store_true',\n help='Use model.train() while generating alignments. Will increase both variablility and inaccuracy.')\n parser.add_argument('--verify_outputs', action='store_true',\n help='Debug Option. Checks output file and input file match.')\n parser.add_argument('--use_validation_files', action='store_true',\n help='Ground Truth Align validation files instead of training files.')\n parser.add_argument('--save_hidden_state', action='store_true',\n help='Save model internal state as well as spectrograms (decoder_hidden_attention_context). Hidden states can be used as alternatives to spectrograms for training Vocoders.')\n parser.add_argument('--save_letter_durations', action='store_true',\n help='Save durations of each grapheme in the input.')\n parser.add_argument('--save_phone_durations', action='store_true',\n help='Save durations of each phoneme in the input.')\n parser.add_argument('--save_letter_alignments', action='store_true',\n help='Save alignments of each grapheme in the input.')\n parser.add_argument('--save_phone_alignments', action='store_true',\n help='Save alignments of each phoneme in the input.')\n parser.add_argument('--save_letter_encoder_outputs', action='store_true',\n help='Save encoded graphemes.')\n parser.add_argument('--save_phone_encoder_outputs', action='store_true',\n help='Save encoded phonemes.')\n parser.add_argument('--do_not_save_mel', action='store_true',\n help='Do not save predicted mel-spectrograms / AEFs.')\n parser.add_argument('--fp16_save', action='store_true',\n help='Save spectrograms using np.float16 aka Half Precision. Will reduce the storage space required.')\n parser.add_argument('--group_name', type=str, default='group_name',\n required=False, help='Distributed group name')\n parser.add_argument('--hparams', type=str, required=False, help='comma separated name=value pairs')\n _=\"\"\"\n --save_letter_durations --save_phone_durations --save_letter_alignments --save_phone_alignments --save_letter_encoder_outputs --save_phone_encoder_outputs\n \"\"\"\n args = parser.parse_args()\n hparams = create_hparams(args.hparams)\n hparams.n_gpus = args.n_gpus\n hparams.rank = args.rank\n hparams.num_workers = args.num_workers\n hparams.use_TBPTT = False # remove limit\n hparams.truncated_length = 2**15 # remove limit\n hparams.check_files=False # disable checks\n hparams.p_arpabet = 0.0\n \n torch.backends.cudnn.enabled = hparams.cudnn_enabled\n torch.backends.cudnn.benchmark = hparams.cudnn_benchmark\n \n print(\"FP16 Run:\", hparams.fp16_run)\n print(\"Distributed Run:\", hparams.distributed_run)\n print(\"Rank:\", args.rank)\n \n if hparams.fp16_run:\n from apex import amp\n \n # cookie stuff\n #hparams.load_mel_from_disk = False\n #hparams.training_files = hparams.training_files.replace(\"mel_train\",\"train\")\n hparams.training_files = hparams.training_files.replace(\"_merged.txt\",\".txt\")\n #hparams.validation_files = hparams.validation_files.replace(\"mel_val\",\"val\")\n hparams.validation_files = hparams.validation_files.replace(\"_merged.txt\",\".txt\")\n \n if not args.use_validation_files:\n hparams.batch_size = hparams.batch_size * 8 # no gradients stored so batch size can go up a bunch\n \n torch.autograd.set_grad_enabled(False)\n \n if hparams.distributed_run:\n init_distributed(hparams, args.n_gpus, args.rank, args.group_name)\n torch.manual_seed(hparams.seed)\n torch.cuda.manual_seed(hparams.seed)\n \n if args.extremeGTA:\n for ind, ioffset in enumerate(range(0, hparams.hop_length, args.extremeGTA)): # generate aligned spectrograms for all audio samples\n if ind < 0:\n continue\n GTA_Synthesis(args.output_directory, args.checkpoint_path, args.n_gpus, args.rank, args.group_name, hparams, args.use_training_mode, args.verify_outputs, args.use_validation_files, args.save_hidden_state, args.fp16_save, args.max_mse, args.max_mae, args=args, audio_offset=ioffset, extra_info=f\"{ind+1}/{hparams.hop_length//args.extremeGTA} \")\n elif (args.save_letter_durations or args.save_letter_alignments or args.save_letter_encoder_outputs) and (args.save_phone_durations or args.save_phone_alignments or args.save_phone_encoder_outputs):\n hparams.p_arpabet = 0.0\n GTA_Synthesis(args.output_directory, args.checkpoint_path, args.n_gpus, args.rank, args.group_name, hparams, args.use_training_mode, args.verify_outputs, args.use_validation_files, args.save_hidden_state, args.fp16_save, args.max_mse, args.max_mae, args=args, extra_info=\"1/2 \")\n hparams.p_arpabet = 1.0\n GTA_Synthesis(args.output_directory, args.checkpoint_path, args.n_gpus, args.rank, args.group_name, hparams, args.use_training_mode, args.verify_outputs, args.use_validation_files, args.save_hidden_state, args.fp16_save, args.max_mse, args.max_mae, args=args, extra_info=\"2/2 \")\n else:\n GTA_Synthesis(args.output_directory, args.checkpoint_path, args.n_gpus, args.rank, args.group_name, hparams, args.use_training_mode, args.verify_outputs, args.use_validation_files, args.save_hidden_state, args.fp16_save, args.max_mse, args.max_mae, args=args)\n print(\"GTA Done!\")\n", "repo_name": "CookiePPP/cookietts", "sub_path": "CookieTTS/_2_ttm/tacotron2_ssvae/GTA.py", "file_name": "GTA.py", "file_ext": "py", "file_size_in_byte": 25674, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 42, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.cuda.set_device", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.distributed.init_process_group", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 47, "usage_type": "attribute"}, {"api_name": "hparams.dist_backend", "line_number": 48, "usage_type": "attribute"}, {"api_name": "hparams.dist_url", "line_number": 48, "usage_type": "attribute"}, {"api_name": "data_utils.TextMelLoader", "line_number": 55, "usage_type": "call"}, {"api_name": "hparams.training_files", "line_number": 55, "usage_type": "attribute"}, {"api_name": "data_utils.TextMelCollate", "line_number": 56, "usage_type": "call"}, {"api_name": "hparams.distributed_run", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.utils.data.distributed.DistributedSampler", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 63, "usage_type": "call"}, {"api_name": "hparams.num_workers", "line_number": 63, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 73, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 74, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 85, "usage_type": "call"}, {"api_name": "hparams.fp16_run", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.histc", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 96, "usage_type": "call"}, {"api_name": "hparams.load_mel_from_disk", "line_number": 115, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 116, "usage_type": "call"}, {"api_name": "hparams.seed", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 117, "usage_type": "attribute"}, {"api_name": "hparams.seed", "line_number": 117, "usage_type": "attribute"}, {"api_name": "hparams.training_files", "line_number": 121, "usage_type": "attribute"}, {"api_name": "hparams.validation_files", "line_number": 121, "usage_type": "attribute"}, {"api_name": "hparams.drop_frame_rate", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "hparams.global_mean_npy", "line_number": 129, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 129, "usage_type": "call"}, {"api_name": "hparams.global_mean_npy", "line_number": 130, "usage_type": "attribute"}, {"api_name": "hparams.global_mean", "line_number": 131, "usage_type": "attribute"}, {"api_name": "model.load_model", "line_number": 133, "usage_type": "call"}, {"api_name": "model.train", "line_number": 140, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 142, "usage_type": "call"}, {"api_name": "hparams.fp16_run", "line_number": 144, "usage_type": "attribute"}, {"api_name": "model.decoder", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 155, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 156, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 157, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 161, "usage_type": "call"}, {"api_name": "hparams.batch_size", "line_number": 166, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 169, "usage_type": "attribute"}, {"api_name": "hparams.batch_size", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 184, "usage_type": "call"}, {"api_name": "model.parse_batch", "line_number": 197, "usage_type": "call"}, {"api_name": "model.n_mel_channels", "line_number": 221, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.l1_loss", "line_number": 232, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 232, "usage_type": "attribute"}, {"api_name": "model.n_mel_channels", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 233, "usage_type": "attribute"}, {"api_name": "model.n_mel_channels", "line_number": 233, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 257, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 263, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 264, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 269, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 271, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 272, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 276, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 278, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 279, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 282, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 284, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 285, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 288, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 290, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 291, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 294, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 296, "usage_type": "call"}, {"api_name": "hparams.p_arpabet", "line_number": 297, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 300, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 302, "usage_type": "call"}, {"api_name": "time.time", "line_number": 308, "usage_type": "call"}, {"api_name": "time.time", "line_number": 312, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 318, "usage_type": "call"}, {"api_name": "os.path", "line_number": 318, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 320, "usage_type": "call"}, {"api_name": "os.path", "line_number": 320, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 110, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 352, "usage_type": "call"}, {"api_name": "hparams.create_hparams", "line_number": 400, "usage_type": "call"}, {"api_name": "hparams.n_gpus", "line_number": 401, "usage_type": "attribute"}, {"api_name": "hparams.rank", "line_number": 402, "usage_type": "attribute"}, {"api_name": "hparams.num_workers", "line_number": 403, "usage_type": "attribute"}, {"api_name": "hparams.use_TBPTT", "line_number": 404, "usage_type": "attribute"}, {"api_name": "hparams.truncated_length", "line_number": 405, "usage_type": "attribute"}, {"api_name": "hparams.check_files", "line_number": 406, "usage_type": "attribute"}, {"api_name": "hparams.p_arpabet", "line_number": 407, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 409, "usage_type": "attribute"}, {"api_name": "hparams.cudnn_enabled", "line_number": 409, "usage_type": "attribute"}, {"api_name": "torch.backends", "line_number": 410, "usage_type": "attribute"}, {"api_name": "hparams.cudnn_benchmark", "line_number": 410, "usage_type": "attribute"}, {"api_name": "hparams.fp16_run", "line_number": 412, "usage_type": "attribute"}, {"api_name": "hparams.distributed_run", "line_number": 413, "usage_type": "attribute"}, {"api_name": "hparams.fp16_run", "line_number": 416, "usage_type": "attribute"}, {"api_name": "hparams.training_files", "line_number": 422, "usage_type": "attribute"}, {"api_name": "hparams.training_files.replace", "line_number": 422, "usage_type": "call"}, {"api_name": "hparams.validation_files", "line_number": 424, "usage_type": "attribute"}, {"api_name": "hparams.validation_files.replace", "line_number": 424, "usage_type": "call"}, {"api_name": "hparams.batch_size", "line_number": 427, "usage_type": "attribute"}, {"api_name": "torch.autograd.set_grad_enabled", "line_number": 429, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 429, "usage_type": "attribute"}, {"api_name": "hparams.distributed_run", "line_number": 431, "usage_type": "attribute"}, {"api_name": "train.init_distributed", "line_number": 432, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 433, "usage_type": "call"}, {"api_name": "hparams.seed", "line_number": 433, "usage_type": "attribute"}, {"api_name": "torch.cuda.manual_seed", "line_number": 434, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 434, "usage_type": "attribute"}, {"api_name": "hparams.seed", "line_number": 434, "usage_type": "attribute"}, {"api_name": "hparams.hop_length", "line_number": 437, "usage_type": "attribute"}, {"api_name": "hparams.hop_length", "line_number": 440, "usage_type": "attribute"}, {"api_name": "hparams.p_arpabet", "line_number": 442, "usage_type": "attribute"}, {"api_name": "hparams.p_arpabet", "line_number": 444, "usage_type": "attribute"}]} +{"seq_id": "27097850785", "text": "import sys\nfrom collections import deque\nsys.stdin=open(\"../input.txt\", \"r\")\nneed=input()\nn=int(input())\nfor i in range(n):\n plan=input()\n dq=deque(need)\n for x in plan:\n if x in dq:\n if x!=dq.popleft():\n print(\"#%d NO\" %(i+1))\n break\n else:\n if len(dq)==0:\n print(\"#%d YES\" %(i+1))\n else:\n print(\"#%d NO\" %(i+1))\n\n\"\"\" import sys\nfrom collections import deque\n\nsys.stdin=open(\"../input.txt\",\"r\")\nneed=deque(input())\nn=int(input())\n\nfor i in range(n):\n plan=deque(input())\n dq=deque(need)\n \n while dq and plan: \n if plan.popleft()==dq[0]:\n dq.popleft()\n if dq:\n print(\"#%d N0\"%(i+1))\n else: \n print(\"#%d YES\"%(i+1))\n\n \"\"\"", "repo_name": "areum514/Algorithm", "sub_path": "section5/수업설계.py", "file_name": "수업설계.py", "file_ext": "py", "file_size_in_byte": 766, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.stdin", "line_number": 3, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "42163806510", "text": "import cv2\nimport math\nfrom .common_color import CommonColor\nimport sys\nfrom .detect_color import DetectColor\n\nclass InitDetect:\n def start(self, path, name, savePath):\n try:\n # center = (w / 2, h / 2)\n # cl = CommonColor()\n dt = DetectColor()\n # Read the image from the path\n resize_img = cv2.imread(path)\n \n resize_img = cv2.resize(resize_img, (559, 494))\n # get height and width of the image\n (h, w) = resize_img.shape[:2]\n print('height', h, w)\n y1 = math.floor(h/2) - 150\n y2 = math.floor(h/2) + 100\n x1 = math.floor(w/2) - 100\n x2 = math.floor(w/2) + 200\n print('coordinates', y1, y2, x1, x2)\n crop_img = resize_img[y1:y2, x1:x2]\n\n color = dt.find(crop_img, name, savePath)\n print('color on image', color)\n return color\n except Exception as ex:\n print('Exception in InitDetect', str(ex))", "repo_name": "niravkapoor/color-recognition", "sub_path": "opencv/helper/init.py", "file_name": "init.py", "file_ext": "py", "file_size_in_byte": 1030, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "detect_color.DetectColor", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 16, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 20, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 21, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 22, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "40321546554", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 20 15:42:28 2017\r\n\r\n@author: Sampson\r\n\r\nrely on tensorflow && Keras\r\n\"\"\"\r\nimport numpy as np\r\nimport threading, os, time\r\n\r\nlabeldict={'airplane':1,\r\n 'automobile':2,\r\n 'bird':3,\r\n 'cat':4,\r\n 'deer':5,\r\n 'dog':6,\r\n 'frog':7,\r\n 'horse':8,\r\n 'ship':9,\r\n 'truck':10 \r\n }\r\n\r\ndef makeDir(path):\r\n try:\r\n if not os.path.exists(path):\r\n if not os.path.isfile(path):\r\n # os.mkdir(path)\r\n os.makedirs(path)\r\n return 0\r\n else:\r\n return 1\r\n except Exception as e:\r\n print(str(e))\r\n return -2\r\n \r\n#def threadOPS(path, new_path):\r\n# \"\"\"\r\n# 多线程处理事务\r\n# :param src_path: 资源文件\r\n# :param des_path: 目的地文件\r\n# :return:\r\n# \"\"\"\r\n# if os.path.isdir(path):\r\n# img_names = os.listdir(path)\r\n# else:\r\n# img_names = [path]\r\n# for img_name in img_names:\r\n# print(img_name)\r\n# tmp_img_name = os.path.join(path, img_name)\r\n# if os.path.isdir(tmp_img_name):\r\n# if makeDir(os.path.join(new_path, img_name)) != -1:\r\n# threadOPS(tmp_img_name, os.path.join(new_path, img_name))\r\n# else:\r\n# print('create new dir failure')\r\n# return -1\r\n# # os.removedirs(tmp_img_name)\r\n# elif tmp_img_name.split('.')[1] != \"DS_Store\":\r\n# # 读取文件并进行操作\r\n# image = myData.openImage(tmp_img_name)\r\n# threadImage = [0] * 5\r\n# _index = 0\r\n# for ops_name in opsList:\r\n# threadImage[_index] = threading.Thread(target=imageOps,\r\n# args=(ops_name, image, new_path, img_name,))\r\n# threadImage[_index].start()\r\n# _index += 1\r\n# time.sleep(0.2)\r\n\r\ndef readlmdb(lmdb_dir):\r\n import caffe\r\n import lmdb\r\n dictlmdb={}\r\n env = lmdb.open(lmdb_dir, readonly=True)\r\n with env.begin() as txn:\r\n raw_datum = txn.get(b'00000000')\r\n datum = caffe.proto.caffe_pb2.Datum()\r\n datum.ParseFromString(raw_datum) \r\n flat_x = np.fromstring(datum.data, dtype=np.uint8)\r\n dictlmdb['X'] = flat_x.reshape(datum.channels, datum.height, datum.width)\r\n dictlmdb['y'] = datum.label \r\n return dictlmdb\r\n\r\ndef savelmdb(dictlmdb,lmdb_dir):\r\n import caffe\r\n import lmdb\r\n X=dictlmdb['X']\r\n y=dictlmdb['y']\r\n env=lmdb.open(lmdb_dir,map_size=X.nbytes * 20)\r\n with env.begin(write=True) as txn:\r\n count=0\r\n for i in range(X.shape[0]):\r\n datum = caffe.proto.caffe_pb2.Datum()\r\n datum.channels = X.shape[1]\r\n datum.height = X.shape[2]\r\n datum.width = X.shape[3]\r\n datum.data = X[i].tobytes()\r\n datum.label = int(y[i])\r\n str_id = '{:08}'.format(i)\r\n txn.put(str_id,datum.SerializeToString())\r\n \r\n count+=1\r\n if count%1000==0:\r\n print('already handled with {} pictures'.format(count))\r\n txn.commit()\r\n txn=env.begin(write=True)\r\n\r\n\r\ndef readMat(mat_dir):\r\n import scipy.io as sio\r\n dictmat = sio.loadmat(mat_dir)\r\n # 通常需要检查一下data结构,type(data),data.keys(),data.values(),data.items()\r\n return dictmat\r\n\r\ndef saveMat(dictmat,mat_dir):\r\n import scipy.io as sio\r\n sio.savemat(mat_dir,dictmat)\r\n print('save mat')\r\n \r\ndef readPickle(pickle_dir):\r\n import cPickle\r\n fo=open(pickle_dir,'rb')\r\n dictpickle=cPickle.load(fo)\r\n fo.close()\r\n return dictpickle\r\n\r\ndef savePickle(dictpickle,pickle_dir):\r\n import cPickle\r\n fo=open(pickle_dir,'wb')\r\n cPickle.dump(fo)\r\n fo.close()\r\n print('save pickle')\r\n \r\ndef readImg(img_dir,label=0,img_size=(128,128)):\r\n if not img_dir.endswith('/'):\r\n img_dir += '/'\r\n \r\n import cv2 \r\n img_list = os.listdir(img_dir)\r\n img_num = len(img_list)\r\n imgarr = np.zeros((img_num,)+img_size + (3,),dtype=np.int8)\r\n labelarr = np.ones((img_num,1),dtype=np.int8)*label \r\n for i in range(img_num):\r\n img_name = img_list[i]\r\n print(img_name)\r\n print(i)\r\n img = cv2.cvtColor(cv2.imread(img_dir + img_name), cv2.COLOR_BGR2RGB) # (height, width, channels) \r\n imgarr[i,:,:,:] = cv2.resize(img,img_size,interpolation=cv2.INTER_CUBIC) # (samples, height, width, channels)\r\n \r\n dictimg={} \r\n dictimg['X']=imgarr\r\n dictimg['y']=labelarr\r\n return dictimg\r\n\r\ndef augmentation(ndarr_x,ndarr_y=None,epoches=1,save_dir='None',prefix='name',imgformat='jpg'):\r\n '''\r\n docs: https://keras-cn.readthedocs.io/en/latest/preprocessing/image/\r\n param ndarr_x: data.the shape of ndarr_x is (samples, height, width, channels)\r\n param ndarr_y: labels.the shape of ndarr_y is (samples,label)\r\n param epoches: samples*epoches*0.1 is number of new samples\r\n param save_dir: a directory to save the augmented pictures being generated,just for checking\r\n param name: prefix to use for filenames of saved pictures \r\n '''\r\n from keras.preprocessing.image import ImageDataGenerator\r\n datagen = ImageDataGenerator(\r\n featurewise_center=False, # 输入数据集去中心化(均值为0)\r\n samplewise_center=False, # 输入数据的每个样本均值为0\r\n featurewise_std_normalization=False, # 除以数据集的标准差以完成标准化\r\n samplewise_std_normalization=False, # 每个样本除以其自身的标准差\r\n zca_whitening=False, # 对输入数据施加ZCA白化\r\n rotation_range=0.2, # 图片随机转动的角度\r\n width_shift_range=0.2, # 图片水平偏移的幅度\r\n height_shift_range=0.2, # 图片竖直偏移的幅度\r\n shear_range=0.2, # 剪切强度(逆时针方向的剪切变换角度)\r\n zoom_range=0.2, # 随机缩放的幅度,范围是[1-zoom_range, 1+zoom_range]\r\n channel_shift_range=0., # 随机通道偏移的幅度\r\n fill_mode='nearest', # 插值方式,‘constant’,‘nearest’,‘reflect’或‘wrap’之一\r\n cval=0., # 当fill_mode=constant时,指定要向超出边界的点填充的值\r\n horizontal_flip=True, # 随机水平翻转\r\n vertical_flip=True, # 随机竖直翻转\r\n rescale=None) # 重放缩因子,默认为None.如果为None或0则不进行放缩,否则会将该数值乘到数据上(在应用其他变换之前)\r\n \r\n if ndarr_x.shape[0] < 10:\r\n batch_size = 1\r\n else:\r\n batch_size=int(ndarr_x.shape[0] * 0.1)\r\n print(batch_size)\r\n batch_x=np.zeros((batch_size*epoches,)+ndarr_x.shape[1:])\r\n batch_y=np.zeros((batch_size*epoches,)+ndarr_y.shape[1:])\r\n e=0\r\n #生成器不能用for i in range()\r\n for xb,yb in datagen.flow(x=ndarr_x, #\r\n y=ndarr_y, #\r\n batch_size = batch_size, #单次增强图像的数目\r\n shuffle=True, #\r\n save_to_dir=save_dir, #图像存储的路径\r\n save_prefix=prefix, #图像名字的首部\r\n save_format=imgformat): #图像存储的格式\r\n batch_x[e*batch_size:(e+1)*batch_size,:,:,:] = xb\r\n batch_y[e*batch_size:(e+1)*batch_size,:,:,:] = yb\r\n e += 1\r\n if(e>=epoches):\r\n break\r\n return np.concatenate((ndarr_x,batch_x)),np.concatenate((ndarr_y,batch_y)) #将原始数据与增强数据合并后返回\r\n\r\ndef classifyPictures(img_path, txt_path):\r\n if not img_path.endswith('/'):\r\n img_path += '/' \r\n\r\n import re #正则表达式模块\r\n import shutil\r\n fw = open(txt_path,\"w\")\r\n labels_key = labeldict.keys()\r\n labels_val = labeldict.values()\r\n img_name = os.listdir(img_path) #列出路径中的所有文件\r\n for itemname in img_name:\r\n #正则表达式规则:找以label[:]开头,紧跟字符串或者数字0或无限次,并以jpg或者png结尾的图片文件\r\n for index,keyname in enumerate(labels_key): \r\n res = re.search(r'^' + keyname + '[0-9_]*' + '.(jpg|png)$',itemname)\r\n #只有当返回结果不为空时,进行生成清单和分类图片\r\n if res != None:\r\n #生成清单\r\n fw.write(res.group(0) + ' ' + str(labels_val[index]) + '\\n')\r\n #分类图片\r\n if makeDir(img_path + keyname) != -2: \r\n shutil.copy(img_path+itemname, img_path + keyname) #若移动,改为move\r\n else:\r\n print('create new dir failure') \r\n print(itemname) \r\n \r\n print(\"generate txt successfully\")\r\n fw.close()\r\n \r\nif __name__ == '__main__':\r\n images_dir = './dog/dog'\r\n lmdb_dir = './lmdb'\r\n txt_path = './dog/label.txt'\r\n pickle_dir='./cifar-10-batches-py/data_batch_1'\r\n dictimg=readImg(img_dir=images_dir,label=labeldict['dog']) \r\n# print(imgndarr.shape)\r\n# cv2.imshow('image',imgndarr.reshape((128,128,3)))\r\n# cv2.waitKey(0)\r\n# new=augmentation(ndarr_x=imgndarr,ndarr_y=None,epoches=10,save_dir=save_path)\r\n# print(new.shape)\r\n mat_path = './1.mat'\r\n# x,y = readMat(mat_path)\r\n# res=makeDir(images_dir)\r\n# txt_path = './labellist.txt'\r\n# classifyPictures(images_dir, txt_path)\r\n# dictpickle=readPickle(pickle_dir)\r\n# if makeDir(lmdb_dir) == 0:\r\n# savelmdb(dictimg,lmdb_dir)\r\n dictlmdb=readlmdb(lmdb_dir)\r\n# saveMat(dictimg,mat_path)\r\n# else:\r\n# print(\"Conversion was already done. Did not convert twice, or data just become bigger\")\r\n #常用函数:type(object),ndarray.shape\r\n# print(type(img))\r\n# print(type(mat))\r\n", "repo_name": "Sampson-Lee/myResNet", "sub_path": "data/myProcess.py", "file_name": "myProcess.py", "file_ext": "py", "file_size_in_byte": 10193, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "lmdb.open", "line_number": 74, "usage_type": "call"}, {"api_name": "caffe.proto.caffe_pb2.Datum", "line_number": 77, "usage_type": "call"}, {"api_name": "caffe.proto", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.fromstring", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 79, "usage_type": "attribute"}, {"api_name": "lmdb.open", "line_number": 89, "usage_type": "call"}, {"api_name": "caffe.proto.caffe_pb2.Datum", "line_number": 93, "usage_type": "call"}, {"api_name": "caffe.proto", "line_number": 93, "usage_type": "attribute"}, {"api_name": "scipy.io.loadmat", "line_number": 111, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 111, "usage_type": "name"}, {"api_name": "scipy.io.savemat", "line_number": 117, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 117, "usage_type": "name"}, {"api_name": "cPickle.load", "line_number": 123, "usage_type": "call"}, {"api_name": "cPickle.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 142, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 147, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 148, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 148, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 204, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 215, "usage_type": "call"}, {"api_name": "re.search", "line_number": 219, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "41291708416", "text": "import logging\nfrom typing import Optional, Iterable, Any, Tuple\n\nimport matplotlib.pyplot as plt\n\nfrom fuse.data.visualizer.visualizer_base import FuseVisualizerBase\nfrom fuse.utils.utils_hierarchical_dict import FuseUtilsHierarchicalDict\nfrom fuse.utils.utils_logger import log_object_input_state\nfrom fuse.utils.utils_image_processing import FuseUtilsImageProcessing\nimport torch\n\n\nclass FuseVisualizerDefault(FuseVisualizerBase):\n \"\"\"\n Visualizer for data including single 2D image with optional mask\n \"\"\"\n\n def __init__(self, image_name: str, mask_name: Optional[str] = None,\n label_name: Optional[str] = None, metadata_names: Iterable[str] = tuple(),\n pred_name: Optional[str] = None,\n gray_scale: bool = True):\n \"\"\"\n :param image_name: hierarchical key name of the image in batch_dict\n :param mask_name: hierarchical key name of the mask (gt map) in batch_dict.\n Optional, won't be displayed if not specified.\n :param label_name: hierarchical key name of the to a global label in batch_dict.\n Optional, won't be displayed if not specified.\n :param metadata_names: list of hierarchical key name of the metadata - will be printed for every sample\n :param pred_name: hierarchical key name of the prediction in batch_dict.\n Optional, won't be displayed if not specified.\n :param gray_scale: If True, each channel will be displayed as gray scale image. Otherwise, assuming 3 channels and RGB image either normalize to [0-1] or to [0-255]\n \"\"\"\n # log object input state\n log_object_input_state(self, locals())\n\n # store input parameters\n self.image_pointer = image_name\n self.mask_name = mask_name\n self.label_name = label_name\n self.metadata_pointers = metadata_names\n self.pred_name = pred_name\n self.matching_function = FuseUtilsImageProcessing.match_img_to_input\n self._gray_scale = gray_scale\n\n def extract_data(self, sample: dict) -> Tuple[Any, Any, Any, Any, Any]:\n \"\"\"\n extract required data to visualize from sample\n :param sample: global dict of a sample\n :return: image, mask, label, metadata\n \"\"\"\n\n # image\n image = FuseUtilsHierarchicalDict.get(sample, self.image_pointer)\n\n # mask\n if self.mask_name is not None:\n mask = FuseUtilsHierarchicalDict.get(sample, self.mask_name)\n else:\n mask = None\n\n # label\n if self.label_name is not None:\n label = FuseUtilsHierarchicalDict.get(sample, self.label_name)\n else:\n label = ''\n\n # mask\n if self.pred_name is not None:\n pred_mask = FuseUtilsHierarchicalDict.get(sample, self.pred_name)\n else:\n pred_mask = None\n\n # metadata\n metadata = {metadata_ptr: FuseUtilsHierarchicalDict.get(sample, metadata_ptr) for metadata_ptr in\n self.metadata_pointers}\n\n return image, mask, label, metadata, pred_mask\n\n def visualize(self, sample: dict, block: bool = True) -> None:\n \"\"\"\n visualize sample\n :param sample: batch_dict - to extract the sample from\n :param block: set to False if the process should not be blocked until the plot will be closed\n :return: None\n \"\"\"\n # extract data\n image, mask, label, metadata, pred_mask = self.extract_data(sample)\n\n if mask is not None:\n mask = self.matching_function(mask, image)\n\n if pred_mask is not None:\n pred_mask = self.matching_function(pred_mask, image)\n\n # visualize\n if self._gray_scale:\n num_channels = image.shape[0]\n\n if pred_mask is not None:\n fig, ax = plt.subplots(num_channels, pred_mask.shape[0]+1, squeeze=False)\n else:\n fig, ax = plt.subplots(num_channels, 1, squeeze=False)\n\n for channel_idx in range(num_channels):\n ax[channel_idx, 0].title.set_text('image (ch %d) (lbl %s)' % (channel_idx, str(label)))\n\n ax[channel_idx, 0].imshow(image[channel_idx].squeeze(), cmap='gray')\n if mask is not None:\n ax[channel_idx, 0].imshow(mask[channel_idx], alpha=0.3)\n\n if pred_mask is not None:\n for c_id in range(pred_mask.shape[0]):\n max_prob = pred_mask[c_id].max()\n ax[channel_idx, c_id+1].title.set_text('image (ch %d) (max prob %s)' % (channel_idx, str(max_prob)))\n\n ax[channel_idx, c_id+1].imshow(image[channel_idx].squeeze(), cmap='gray')\n ax[channel_idx, c_id+1].imshow(pred_mask[c_id], alpha=0.3)\n else:\n if pred_mask is not None:\n fig, ax = plt.subplots(1, pred_mask.shape[0]+1, squeeze=False)\n else:\n fig, ax = plt.subplots(1, 1, squeeze=False)\n\n ax[0, 0].title.set_text('image (lbl %s)' % (str(label)))\n\n image = image.permute((1,2,0)) # assuming torch dimension order [C, H, W] and conver to [H, W, C]\n image = torch.clip(image, 0.0, 1.0) # assuming range is [0-1] and clip values that might be a bit out of range\n ax[0, 0].imshow(image)\n if mask is not None:\n ax[0, 0].imshow(mask, alpha=0.3)\n\n if pred_mask is not None:\n for c_id in range(pred_mask.shape[0]):\n max_prob = pred_mask[c_id].max()\n ax[0, c_id+1].title.set_text('image(max prob %s)' % (str(max_prob)))\n ax[0, c_id+1].imshow(pred_mask[c_id], cmap='gray')\n\n lgr = logging.getLogger('Fuse')\n lgr.info('------------------------------------------')\n lgr.info(metadata)\n lgr.info('image label = ' + str(label))\n lgr.info('------------------------------------------')\n\n try:\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n except:\n pass\n \n fig.tight_layout()\n plt.show(block=block)\n\n def visualize_aug(self, orig_sample: dict, aug_sample: dict, block: bool = True) -> None:\n \"\"\"\n Visualise and compare augmented and non-augmented version of the sample\n :param orig_sample: batch_dict to extract the original sample from\n :param aug_sample: batch_dict to extract the augmented sample from\n :param block: set to False if the process should not be blocked until the plot will be closed\n :return: None\n \"\"\"\n # extract data\n orig_image, orig_mask, orig_label, orig_metadata, pred_mask = self.extract_data(orig_sample)\n aug_image, aug_mask, aug_label, aug_metadata, pred_mask = self.extract_data(aug_sample)\n\n # visualize\n if self._gray_scale:\n num_channels = orig_image.shape[0]\n\n fig, ax = plt.subplots(num_channels, 2, squeeze=False)\n for channel_idx in range(num_channels):\n # orig\n ax[channel_idx, 0].title.set_text('image (ch %d) (lbl %s)' % (channel_idx, str(orig_label)))\n ax[channel_idx, 0].imshow(orig_image[channel_idx].squeeze(), cmap='gray')\n if (orig_mask is not None) and (None not in orig_mask):\n ax[channel_idx, 0].imshow(orig_mask, alpha=0.3)\n\n # augmented\n ax[channel_idx, 1].title.set_text('image (ch %d) (lbl %s)' % (channel_idx, str(aug_label)))\n ax[channel_idx, 1].imshow(aug_image[channel_idx].squeeze(), cmap='gray')\n if (aug_mask is not None) and (None not in aug_mask):\n ax[channel_idx, 1].imshow(aug_mask, alpha=0.3)\n else:\n fig, ax = plt.subplots(1, 2, squeeze=False)\n # orig\n ax[0, 0].title.set_text('image (lbl %s)' % (str(orig_label)))\n orig_image = orig_image.permute((1,2,0)) # assuming torch dimension order [C, H, W] and conver to [H, W, C]\n orig_image = torch.clip(orig_image, 0.0, 1.0) # assuming range is [0-1] and clip values that might be a bit out of range\n ax[0, 0].imshow(orig_image)\n if (orig_mask is not None) and (None not in orig_mask):\n ax[0, 0].imshow(orig_mask, alpha=0.3)\n\n # augmented\n ax[0, 1].title.set_text('image (lbl %s)' % (str(aug_label)))\n aug_image = aug_image.permute((1,2,0)) # assuming torch dimension order [C, H, W] and conver to [H, W, C]\n aug_image = torch.clip(aug_image, 0.0, 1.0) # assuming range is [0-1] and clip values that might be a bit out of range\n ax[0, 1].imshow(aug_image)\n if (aug_mask is not None) and (None not in aug_mask):\n ax[1].imshow(aug_mask, alpha=0.3)\n\n lgr = logging.getLogger('Fuse')\n lgr.info('------------------------------------------')\n lgr.info(\"original\")\n lgr.info(orig_metadata)\n lgr.info('image label = ' + str(orig_label))\n lgr.info(\"augmented\")\n lgr.info(aug_metadata)\n lgr.info('image label = ' + str(aug_label))\n lgr.info('------------------------------------------')\n\n try:\n mng = plt.get_current_fig_manager()\n mng.resize(*mng.window.maxsize())\n except:\n pass\n\n fig.tight_layout()\n plt.show(block=block)\n", "repo_name": "MEDAL-IITB/KNIGHT22", "sub_path": "fuse/data/visualizer/visualizer_default.py", "file_name": "visualizer_default.py", "file_ext": "py", "file_size_in_byte": 9561, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fuse.data.visualizer.visualizer_base.FuseVisualizerBase", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 19, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "fuse.utils.utils_logger.log_object_input_state", "line_number": 34, "usage_type": "call"}, {"api_name": "fuse.utils.utils_image_processing.FuseUtilsImageProcessing.match_img_to_input", "line_number": 42, "usage_type": "attribute"}, {"api_name": "fuse.utils.utils_image_processing.FuseUtilsImageProcessing", "line_number": 42, "usage_type": "name"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get", "line_number": 53, "usage_type": "call"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict", "line_number": 53, "usage_type": "name"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get", "line_number": 57, "usage_type": "call"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict", "line_number": 57, "usage_type": "name"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get", "line_number": 63, "usage_type": "call"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict", "line_number": 63, "usage_type": "name"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get", "line_number": 69, "usage_type": "call"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict", "line_number": 69, "usage_type": "name"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict.get", "line_number": 74, "usage_type": "call"}, {"api_name": "fuse.utils.utils_hierarchical_dict.FuseUtilsHierarchicalDict", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.clip", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "torch.clip", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.clip", "line_number": 195, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}]} +{"seq_id": "14027727296", "text": "\"\"\"\nPython script & data contains information about all Danish lawyers.\nSome of the data are duplicated in csv (due to geographical zones), in Excel duplicates removed.\nPython code may be a little silly, but it works\n\"\"\"\n# -*- coding: UTF-8 -*-\n\nimport re\nfrom urllib.request import urlopen\n\nfrom bs4 import BeautifulSoup\n\n\ndef comma(block):\n \"\"\"\n Check for comma (for csv)\n \"\"\"\n if ',' in block:\n block = '\"' + block + '\"'\n return block\n\n\ndef has_inside(block):\n \"\"\"\n Check for something inside\n \"\"\"\n return comma(block[0]) if block else '#N/A'\n\n\ndef check_mail(eml):\n \"\"\"\n Check for email inside\n \"\"\"\n return eml[::-1] if eml != '#N/A' else '#N/A'\n\n\nDATA = open('Advokater.csv', 'w')\nDATA.write('''Page, URL, Name, Name2, Surname, Title, Title 2, Area, \nBeskikkelsesår, Fødselsår, Møderet landsret, Møderet højesteret, Email, \nMobile, Firma, Gade, Postnummer, By, Land, Telefon, Email, CVR, WWW, \nRetskreds, Ansatte\\n''')\n\nfor page in range(0, 246): # 246\n\n print('Getting data for ' + str(page) + ' page')\n url = f\"http://www.advokatnoeglen.dk/sog.aspx?s=1&t=1&zf=0000&zt=999999\" \\\n f\"&p={page}\"\n urlDetail = \"http://www.advokatnoeglen.dk\"\n page_adv = urlopen(url)\n soup = BeautifulSoup(page_adv, 'html.parser')\n table = soup.findAll('tr')\n\n for line in table[1:]: # 1:\n cut = urlDetail + re.findall(r\"href='([\\S]+)'\", line.__str__())[0]\n print('Scraping ', cut)\n\n pageDetail = urlopen(cut)\n soupDetail = BeautifulSoup(pageDetail, 'html.parser')\n\n names = soupDetail.findAll(r'h1')[1].getText().split(' ') # navn\n\n if len(names) > 2:\n name = names[0]\n name2 = names[1]\n surname = names[2]\n else:\n name = names[0]\n name2 = ''\n surname = names[1]\n\n status = soupDetail.findAll(r'h2')[0].getText().split(',') # Advokat\n\n if len(status) > 1:\n title = status[0]\n title2 = status[1]\n else:\n title = status[0]\n title2 = ''\n\n workspaces = ' '.join(soupDetail.findAll(r'h2')[1].getText().split())\n print(workspaces)\n # cut intro Arbejdsområder\n workspaces2 = has_inside(re.findall(\n r'Arbejdsområder: ([\\S\\s]+)', workspaces))\n\n area = workspaces2\n\n works = soupDetail.findAll('p') # [0].getText() #all in Arbejdsområder\n firm = comma(' '.join(soupDetail.findAll(\n r'h2')[2].getText().split())) # firma\n print('Name: {}, status: {}, workspaces: {}, firm: {}'.format(\n name, status, workspaces2, firm))\n\n # works[1] - work\n work = works[1].__str__()\n start = has_inside(re.findall(\n r'Beskikkelsesår: ([\\S]+)<', work)) # Beskikkelsesår\n birth = has_inside(re.findall(\n r'Fødselsår: ([\\S]+)<', work)) # Fødselsår\n # Møderet for landsret\n highcourt = has_inside(re.findall(r'landsret: ([\\S]+)<', work))\n # Møderet for højesteret\n supremecourt = has_inside(re.findall(r'højesteret: ([\\S]+)<', work))\n liame = has_inside(re.findall(r'e=([\\S]+@[\\S]+)\"', work)) # reversed\n email = check_mail(liame) # email\n mobile = has_inside(re.findall(r'\\.: ([+0-9]+)', work)) # mobile phone\n print('''Start: {}, birth: {}, @ highcourt: {}, @ supremecourt {}, email: {},\n mobile: {}'''.format(start, birth, highcourt, supremecourt,\n email, mobile))\n\n # works[2] - address\n dats = ' '.join(works[2].__str__().split())\n adr1 = has_inside(\n re.findall(r'p>(.+?)(.+?)[\\S\\s]+
[\\S\\s]+
([\\S\\s]+).+(www.[\\S]+)<', staff)) # www\n stafflawyers = has_inside(re.findall(\n r'advokater: ([\\S\\s]+?)<', staff)) # Ansatte advokater\n retskreds = has_inside(re.findall(\n r'Retskreds: ([\\S\\s]+)<', staff)) # Retskreds\n print(\"WWW: {}, staff: {}, retskreds: {}\".format(\n www, stafflawyers, retskreds))\n print()\n\n stringToWrite = '''{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},\n {},{},{},{},{},{},{},{}, {}\\n'''.format(str(page), cut, name, name2,\n surname,\n title, title2, area,\n start, birth, highcourt,\n supremecourt,\n email, mobile, firm,\n adr1, postnumm, by, country,\n phone,\n email2, cvr, www,\n retskreds, stafflawyers)\n\n DATA.write(stringToWrite)\n\nDATA.close()\nprint(\"\\nDONE!!!\")\n", "repo_name": "AlexeyYurko/Various-python-scrapers", "sub_path": "advokatnoeglen/advocates.py", "file_name": "advocates.py", "file_ext": "py", "file_size_in_byte": 6018, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "urllib.request.urlopen", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 57, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 58, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 83, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 96, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 98, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 101, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 103, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 104, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 106, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 114, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 116, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 123, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 130, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 131, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 132, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 134, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 140, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 141, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "74118551524", "text": "\nfrom dateutil.parser import parse\nfrom datetime import datetime\nfrom functools import reduce\nimport operator\nimport json\n\n\nclass Entry:\n\n def __init__(self, entity):\n\n self.user_id = entity['user_id']\n self.id = entity['_id']\n\n self.sgv = entity.get('sgv')\n self.direction = entity.get('direction')\n self.device = entity['device'].replace('\\x00', '').replace('\\\\u0000', '') if 'device' in entity else None\n self.type = entity.get('type')\n self.rssi = entity.get('rssi')\n self.rawbg = entity.get('rawbg')\n self.trend = entity['trend'] if 'trend' in entity else entity.get('trend_arrow')\n self.glucose = entity.get('glucose')\n self.mbg = entity.get('mbg')\n self.delta = entity.get('delta')\n self.filtered = entity.get('filtered')\n self.unfiltered = entity.get('unfiltered')\n self.noise = entity.get('noise') if entity.get('noise') != 'Clean' else None\n self.scale = entity.get('scale')\n self.slope = entity.get('slope')\n self.intercept = entity.get('intercept')\n\n self.raw_json = json.dumps(entity)\n\n self.source_entity = entity['source_entity']\n self.system_time = entity['system_time'] if 'system_time' in entity else entity.get('sysTime')\n\n self.date = datetime.fromtimestamp(entity['date']/1000).strftime('%Y-%m-%d %H:%M:%S')\n self.date_string = entity.get('dateString') if 'dateString' in entity and entity.get('dateString') else self.date\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass Treatment:\n\n def __init__(self, entity):\n\n self.user_id = entity['user_id']\n self.id = entity['_id'] if '_id' in entity else entity['id'] if 'id' in entity else entity['uuid']\n self.event_type = entity.get('eventType')\n self.timestamp = self._parse_date(entity.get('timestamp')) if entity.get('timestamp') != 0 else entity.get('created_at')\n\n self.insulin = entity.get('insulin')\n self.carbs = entity.get('carbs')\n self.protein = entity.get('protein') if entity.get('protein') != '' else None\n self.fat = entity.get('fat') if entity.get('fat') != '' else None\n self.glucose = entity.get('glucose')\n self.glucose_type = entity.get('glucoseType')\n self.food_type = entity.get('foodType')\n\n self.temp = entity.get('temp')\n self.rate = entity.get('rate') if entity.get('rate') != 'offset' else None\n self.duration = entity.get('duration')\n self.units = entity.get('units')\n self.amount = entity.get('amount')\n self.absolute = entity.get('absolute')\n self.medtronic = entity.get('medtronic')\n\n self.type = entity.get('type')\n self.absorption_time = entity.get('absorptionTime')\n self.unabsorbed = entity.get('unabsorbed')\n self.ratio = entity.get('ratio')\n self.target_top = entity.get('targetTop')\n self.target_bottom = entity.get('targetBottom')\n self.fixed = entity.get('fixed')\n self.programmed = entity.get('programmed')\n\n self.reason = entity.get('reason')\n self.notes = entity.get('notes')\n\n self.raw_json = json.dumps(entity)\n\n self.source_entity = entity['source_entity']\n self.entered_by = entity.get('enteredBy')\n self.created_at = entity.get('created_at')\n\n @staticmethod\n def _parse_date(date):\n\n if not date:\n return date\n elif isinstance(date, int):\n return str(datetime.fromtimestamp(float(date) / 1000))\n else:\n return str(parse(date))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass Profile:\n\n def __init__(self, entity):\n\n self.user_id = entity['user_id']\n self.id = entity['_id']\n self.default_profile = entity.get('defaultProfile')\n self.mills = entity.get('mills')\n self.units = entity.get('units')\n\n self.raw_json = json.dumps(entity)\n\n self.source_entity = entity['source_entity']\n self.start_date = entity.get('startDate')\n self.created_at = entity.get('created_at')\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass DeviceStatus:\n\n def __init__(self, entity):\n\n self.entity = entity\n\n self.user_id = entity['user_id']\n self.id = entity['_id']\n self.device = entity.get('device')\n\n self.pump_id = self._extract(['pump', 'pumpID'])\n self.pump_bolusing = self._extract(['pump', 'bolusing']) if 'pump' in entity and 'bolusing' in entity['pump'] else self._extract(['pump', 'status', 'bolusing'])\n self.pump_suspended = self._extract(['pump', 'suspended']) if 'pump' in entity and 'suspended' in entity['pump'] else self._extract(['pump', 'status', 'suspended'])\n self.pump_model = self._extract(['pump', 'model'])\n\n self.loop_cob = self._extract(['loop', 'cob', 'cob'])\n self.loop_iob = self._extract(['loop', 'iob', 'iob'])\n self.loop_version = self._extract(['loop', 'version'])\n self.loop_failure_reason = self._extract(['loop', 'failureReason'])\n\n self.snooze = entity.get('snooze')\n self.override_active = self._extract(['override', 'active'])\n\n self.raw_json = json.dumps(entity)\n\n self.source_entity = entity['source_entity']\n self.created_at = entity['created_at']\n\n del self.entity\n\n def _extract(self, keys):\n try:\n return reduce(operator.getitem, keys, self.entity)\n except (KeyError,TypeError):\n return None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass DeviceStatusMetric:\n\n def __init__(self, entity):\n\n self.entity = entity\n\n self.device_status_id = entity['device_status_id']\n\n self.iob_iob = self._extract(['iob', 'iob'])\n self.iob_activity = self._extract(['iob', 'activity'])\n self.iob_basal_iob = self._extract(['iob', 'basaliob'])\n self.iob_bolus_iob = self._extract(['iob', 'bolusiob'])\n self.iob_net_basal_insulin = self._extract(['iob', 'netbasalinsulin'])\n self.iob_bolus_insulin = self._extract(['iob', 'bolusinsulin'])\n self.iob_timestamp = self._extract(['iob', 'timestamp'])\n\n self.suggested_temp = self._extract(['suggested', 'temp'])\n self.suggested_bg = self._extract(['suggested', 'bg'])\n self.suggested_tick = self._extract(['suggested', 'tick'])\n self.suggested_eventual_bg = self._extract(['suggested', 'eventualBG'])\n self.suggested_insulin_req = self._extract(['suggested', 'insulinReq'])\n self.suggested_reservoir = self._extract(['suggested', 'reservoir'])\n self.suggested_cob = self._extract(['suggested', 'COB'])\n self.suggested_iob = self._extract(['suggested', 'IOB'])\n\n self.enacted_temp = self._extract(['enacted', 'temp'])\n self.enacted_bg = self._extract(['enacted', 'bg'])\n self.enacted_tick = self._extract(['enacted', 'tick'])\n self.enacted_eventual_bg = self._extract(['enacted', 'eventualBG'])\n self.enacted_insulin_req = self._extract(['enacted', 'insulinReq'])\n self.enacted_reservoir = self._extract(['enacted', 'reservoir'])\n self.enacted_cob = self._extract(['enacted', 'COB'])\n self.enacted_iob = self._extract(['enacted', 'IOB'])\n self.enacted_duration = self._extract(['enacted', 'duration'])\n self.enacted_rate = self._extract(['enacted', 'rate'])\n\n self.enacted_timestamp = self._extract(['enacted', 'timestamp'])\n\n del self.entity\n\n def _extract(self, keys):\n try:\n return reduce(operator.getitem, keys, self.entity)\n except (KeyError,TypeError):\n return None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass NightscoutSurvey:\n\n def __init__(self, entity):\n\n self.ts = self._get_date(entity['Timestamp'])\n self.project_member_id = entity['Your Nightscout Data Commons \"project member ID\"']\n self.google_sheets_source = 'nightscout'\n self.date_of_birth = self._date_from_year(entity[\"Year of birth\"])\n self.gender = entity['Gender']\n self.ethnicity = entity['Race/Ethnicity']\n self.country = entity['Country of residence?']\n self.first_diagnosed_date = self._get_date(entity['Diagnosis date of diabetes:'])\n self.first_insulin_pump_date = self._get_date(entity['When did you/your child first go on an insulin pump?'])\n self.first_glucose_monitor_date = self._get_date(entity['When did you first go on a continuous glucose monitor (CGM)?'])\n self.first_diy_closed_loop_date = self._get_date(entity['When did you first start using a DIY closed loop?'])\n self.diy_closed_loop_type = entity['What type of DIY close loop technology do you use? Select all that you actively use:']\n self.who_uses_the_closed_loop_system = entity['Please describe your relationship to the individual who is donating their data:']\n self.weight = entity['How much do you weigh?']\n self.height = entity['How tall are you?']\n self.insulin_units_per_day = self._numeric(entity['How many units of insulin do you take per day?'])\n self.basal_insulin_units_per_day = self._numeric(entity['How many units of basal insulin do you take per day, on average?'])\n self.carb_grams_per_day = self._numeric(entity['On average, how many grams of carbohydrates do you eat in a day?'])\n self.last_lab_reported_a1c = self._numeric(entity['What was your last lab-reported A1C?'])\n self.last_lab_reported_a1c_date = self._get_date(entity['When was your last lab-reported A1C?'])\n\n @staticmethod\n def _numeric(val):\n\n try:\n return float(val)\n except (TypeError, ValueError):\n return None\n\n @staticmethod\n def _get_date(date):\n\n try:\n return parse(date).strftime('%Y-%m-%d %H:%M:%S\"')\n except (ValueError, TypeError, AttributeError):\n return None\n\n def _date_from_year(self, year):\n\n if len(str(year)) != 4:\n try:\n return self._get_date(year)\n except TypeError:\n return None\n else:\n return self._get_date(f'01-01-{year}')\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n\n\nclass OpenapsSurvey:\n\n def __init__(self, entity):\n\n self.ts = self._get_date(entity['Timestamp'])\n self.project_member_id = entity['Your OpenHumans OpenAPS Data Commons \"project member ID\"']\n self.google_sheets_source = 'openaps'\n self.date_of_birth = self._get_date(entity['When were you born?'])\n self.gender = entity['Gender']\n self.ethnicity = entity['Ethnicity origin:']\n self.country = entity['What country do you live in?']\n self.first_diagnosed_date = self._get_date(entity['When were you diagnosed with diabetes?'])\n self.first_insulin_pump_date = self._get_date(entity['When did you first go on an insulin pump?'])\n self.first_glucose_monitor_date = self._get_date(entity['When did you first go on a continuous glucose monitor (CGM)?'])\n self.first_diy_closed_loop_date = self._get_date(entity['When did you first start using a DIY closed loop?'])\n self.diy_closed_loop_type = entity['What type of DIY close loop technology do you use? Select all that you actively use:']\n self.who_uses_the_closed_loop_system = entity['Do you yourself have diabetes, or are you filling out this form for a child/loved one who has diabetes?']\n self.weight = entity['How much do you weigh?']\n self.height = entity['How tall are you?']\n self.insulin_units_per_day = self._numeric(entity['How many units of insulin do you take per day?'])\n self.basal_insulin_units_per_day = self._numeric(entity['How many units of basal insulin do you take per day, on average?'])\n self.carb_grams_per_day = self._numeric(entity['On average, how many grams of carbohydrates do you eat in a day?'])\n self.last_lab_reported_a1c = self._numeric(entity['What was your last lab-reported A1C?'])\n self.last_lab_reported_a1c_date = self._get_date(entity['When was your last lab-reported A1C?'])\n\n @staticmethod\n def _numeric(val):\n\n try:\n return float(val)\n except (TypeError, ValueError):\n return None\n\n @staticmethod\n def _get_date(date):\n try:\n return parse(date).strftime('%Y-%m-%d %H:%M:%S\"')\n except (ValueError, TypeError, AttributeError):\n return None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n pass\n", "repo_name": "danamlewis/open-aps-streaming", "sub_path": "open-humans-etl/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 13133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 38, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "name"}, {"api_name": "dateutil.parser.parse", "line_number": 99, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 118, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 163, "usage_type": "call"}, {"api_name": "operator.getitem", "line_number": 163, "usage_type": "attribute"}, {"api_name": "functools.reduce", "line_number": 216, "usage_type": "call"}, {"api_name": "operator.getitem", "line_number": 216, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 264, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 321, "usage_type": "call"}]} +{"seq_id": "39340530363", "text": "\nfrom os.path import sep\nimport numpy as np\nimport pickle as pkl\nfrom scipy.spatial.distance import mahalanobis\nfrom utils import find_threshold\n\ndef get_m_dist(data_path, metadata_file, genes = None):\n\n with open('{0}{1}{2}'.format(data_path, sep, metadata_file), 'rb') as f:\n metadata = pkl.load(f)\n\n # Get cell pixel values\n cell_pixels_filt_file = metadata['cell_pixels_filt_file']\n with open('{0}{1}{2}'.format(data_path, sep, cell_pixels_filt_file), 'rb') as f:\n cell_px = pkl.load(f)\n\n cell_data_file = metadata['cell_data_file']\n with open('{0}{1}{2}'.format(data_path, sep, cell_data_file), 'rb') as f:\n cell_data = pkl.load(f)\n\n # Get background pixel values\n bg_pixels_filt_file = metadata['bg_pixels_filt_file']\n with open('{0}{1}{2}'.format(data_path, sep, bg_pixels_filt_file), 'rb') as f:\n bg_px = pkl.load(f)\n\n # Get annotated lipofuscin pixel values\n lipo_pixels_cells_filt_file = metadata['lipo_pixels_cells_filt_file']\n with open('{0}{1}{2}'.format(data_path, sep, lipo_pixels_cells_filt_file), 'rb') as f:\n lipo_px = pkl.load(f)\n\n # Concatenate lipofuscin pixel values\n if genes == None:\n genes = list(lipo_px.keys())\n n_genes = len(genes)\n lipo_px_vals = np.zeros([n_genes, len(lipo_px[genes[0]])])\n for g in range(n_genes):\n lipo_px_vals[g, :] = lipo_px[genes[g]]\n\n mu = np.mean(lipo_px_vals, axis = 1)\n cov = np.cov(lipo_px_vals)\n VI = np.linalg.inv(cov)\n\n m_dist_cells = {}\n m_dist_bg = {}\n m_dist_lipo = np.zeros(lipo_px_vals.shape[1])\n\n cells = list(cell_data.keys())\n for cell in cells:\n\n m_dist_cells[cell] = {}\n m_dist_bg[cell] = {}\n planes = cell_data[cell]['z_planes']\n for plane in planes:\n n_px = len(cell_px[genes[0]][cell][plane])\n u_cell = np.zeros([n_genes, n_px])\n\n n_px_bg = len(bg_px[genes[0]][cell][plane])\n u_bg = np.zeros([n_genes, n_px_bg])\n\n for g in range(n_genes):\n u_cell[g, :] = cell_px[genes[g]][cell][plane]\n u_bg[g, :] = bg_px[genes[g]][cell][plane]\n\n m_dist_cells[cell][plane] = np.zeros(n_px)\n m_dist_bg[cell][plane] = np.zeros(n_px_bg)\n\n for p in range(n_px):\n m_dist_cells[cell][plane][p] = mahalanobis(u_cell[:, p], mu, VI)\n for p in range(n_px_bg):\n m_dist_bg[cell][plane][p] = mahalanobis(u_bg[:, p], mu, VI)\n\n n_px = lipo_px_vals.shape[1]\n for p in range(n_px):\n u = lipo_px_vals[:, p]\n m_dist_lipo[p] = mahalanobis(u, mu, VI)\n\n m_dist_cells_file = metadata['m_dist_cells_file']\n with open('{0}{1}{2}'.format(data_path, sep, m_dist_cells_file), 'wb') as f:\n pkl.dump(m_dist_cells, f)\n\n m_dist_bg_file = metadata['m_dist_bg_file']\n with open('{0}{1}{2}'.format(data_path, sep, m_dist_bg_file), 'wb') as f:\n pkl.dump(m_dist_bg, f)\n\n m_dist_lipo_file = metadata['m_dist_lipo_file']\n with open('{0}{1}{2}'.format(data_path, sep, m_dist_lipo_file), 'wb') as f:\n pkl.dump(m_dist_lipo, f)\n\n return m_dist_cells, m_dist_bg, m_dist_lipo\n\n\n\n\ndef get_lipo(data_path, metadata_file, thresh_scale = 1.5):\n\n # Load metadata\n with open('{0}{1}{2}'.format(data_path, sep, metadata_file), 'rb') as f:\n metadata = pkl.load(f)\n\n # Load cell mask data\n cell_data_file = metadata['cell_data_file']\n with open('{0}{1}{2}'.format(data_path, sep, cell_data_file), 'rb') as f:\n cell_data = pkl.load(f)\n\n cell_pixels_file = metadata['cell_pixels_file']\n with open('{0}{1}{2}'.format(data_path, sep, cell_pixels_file), 'rb') as f:\n cell_pixels = pkl.load(f)\n\n # Load background pixels\n bg_pixels_file = metadata['bg_pixels_file']\n with open('{0}{1}{2}'.format(data_path, sep, bg_pixels_file), 'rb') as f:\n bg_pixels = pkl.load(f)\n\n # Load mahalanobis distances of cell and background pixels\n m_dist_cells_file = metadata['m_dist_cells_file']\n with open('{0}{1}{2}'.format(data_path, sep, m_dist_cells_file), 'rb') as f:\n m_dist_cells = pkl.load(f)\n\n m_dist_bg_file = metadata['m_dist_bg_file']\n with open('{0}{1}{2}'.format(data_path, sep, m_dist_bg_file), 'rb') as f:\n m_dist_bg = pkl.load(f)\n\n # Concatenate cell and bg pixel distances to get threshold\n cells = list(cell_data.keys())\n cell = cells[0]\n planes = cell_data[cell]['z_planes']\n points = m_dist_cells[cell][planes[0]]\n points = np.concatenate([points, m_dist_bg[cell][planes[0]]])\n for plane in planes[1:]:\n points = np.concatenate([points, m_dist_cells[cell][plane]])\n points = np.concatenate([points, m_dist_bg[cell][plane]])\n\n for cell in cells[1:]:\n planes = cell_data[cell]['z_planes']\n for plane in planes:\n points = np.concatenate([points, m_dist_cells[cell][plane]])\n points = np.concatenate([points, m_dist_bg[cell][plane]])\n\n thresh = find_threshold.find_threshold(points, thresh_scale)\n\n # Find new cell and background masks excluding Lipofuscin\n cell_pixels_no_lipo = {}\n bg_pixels_no_lipo = {}\n for cell in cells:\n cell_pixels_no_lipo[cell] = {}\n bg_pixels_no_lipo[cell] = {}\n planes = cell_data[cell]['z_planes']\n for plane in planes:\n cell_pixels_no_lipo[cell][plane] = np.array(cell_pixels[cell][plane])[:, np.where(m_dist_cells[cell][plane] > thresh)[0]]\n bg_pixels_no_lipo[cell][plane] = np.array(bg_pixels[cell][plane])[np.where(m_dist_bg[cell][plane] > thresh)[0], :]\n\n cell_pixels_no_lipo_file = metadata['cell_pixels_no_lipo_file']\n with open('{0}{1}{2}'.format(data_path, sep, cell_pixels_no_lipo_file), 'wb') as f:\n pkl.dump(cell_pixels_no_lipo, f)\n\n bg_pixels_no_lipo_file = metadata['bg_pixels_no_lipo_file']\n with open('{0}{1}{2}'.format(data_path, sep, bg_pixels_no_lipo_file), 'wb') as f:\n pkl.dump(bg_pixels_no_lipo, f)\n\n return cell_pixels_no_lipo, bg_pixels_no_lipo\n", "repo_name": "amrita112/FISH-Image-Analysis", "sub_path": "lipofuscin/detect.py", "file_name": "detect.py", "file_ext": "py", "file_size_in_byte": 6023, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.sep", "line_number": 10, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 15, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 19, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 24, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 29, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 66, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.mahalanobis", "line_number": 69, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.mahalanobis", "line_number": 71, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.mahalanobis", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 79, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 83, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 87, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 98, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 103, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 107, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 112, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 117, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 121, "usage_type": "argument"}, {"api_name": "pickle.load", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 138, "usage_type": "call"}, {"api_name": "utils.find_threshold.find_threshold", "line_number": 140, "usage_type": "call"}, {"api_name": "utils.find_threshold", "line_number": 140, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 154, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 155, "usage_type": "call"}, {"api_name": "os.path.sep", "line_number": 158, "usage_type": "argument"}, {"api_name": "pickle.dump", "line_number": 159, "usage_type": "call"}]} +{"seq_id": "7293250247", "text": "from typing import List, Optional\n\nfrom idb.grpc.types import CompanionClient\nfrom idb.grpc.idb_pb2 import DebugServerRequest, DebugServerResponse\n\n\nasync def _unary(\n client: CompanionClient, request: DebugServerRequest\n) -> DebugServerResponse:\n async with client.stub.debugserver.open() as stream:\n await stream.send_message(request, end=True)\n return await stream.recv_message()\n\n\nasync def debugserver_start(client: CompanionClient, bundle_id: str) -> List[str]:\n response = await _unary(\n client=client,\n request=DebugServerRequest(start=DebugServerRequest.Start(bundle_id=bundle_id)),\n )\n return response.status.lldb_bootstrap_commands\n\n\nasync def debugserver_stop(client: CompanionClient) -> None:\n await _unary(\n client=client, request=DebugServerRequest(stop=DebugServerRequest.Stop())\n )\n\n\nasync def debugserver_status(client: CompanionClient) -> Optional[List[str]]:\n response = await _unary(\n client=client, request=DebugServerRequest(status=DebugServerRequest.Status())\n )\n commands = response.status.lldb_bootstrap_commands\n if not len(commands):\n return None\n return commands\n\n\nCLIENT_PROPERTIES = [ # pyre-ignore\n debugserver_start,\n debugserver_status,\n debugserver_stop,\n]\n", "repo_name": "sergey-plevako-badoo/FBSimulatorControl", "sub_path": "idb/ipc/debugserver.py", "file_name": "debugserver.py", "file_ext": "py", "file_size_in_byte": 1287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "idb.grpc.types.CompanionClient", "line_number": 8, "usage_type": "name"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest", "line_number": 8, "usage_type": "name"}, {"api_name": "idb.grpc.idb_pb2.DebugServerResponse", "line_number": 9, "usage_type": "name"}, {"api_name": "idb.grpc.types.CompanionClient", "line_number": 15, "usage_type": "name"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest", "line_number": 18, "usage_type": "call"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest.Start", "line_number": 18, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "idb.grpc.types.CompanionClient", "line_number": 23, "usage_type": "name"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest", "line_number": 25, "usage_type": "call"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest.Stop", "line_number": 25, "usage_type": "call"}, {"api_name": "idb.grpc.types.CompanionClient", "line_number": 29, "usage_type": "name"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest", "line_number": 31, "usage_type": "call"}, {"api_name": "idb.grpc.idb_pb2.DebugServerRequest.Status", "line_number": 31, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "8616415516", "text": "import numpy as np\nimport pandas as pd\nimport seaborn\nfrom numpy import size, log, pi, sum, diff, array, zeros, diag, mat, asarray, sqrt, copy\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\nfrom sklearn.utils.multiclass import unique_labels\nimport scipy.optimize as opt\n\nclass Garch(BaseEstimator, ClassifierMixin):\n \"\"\"\n\tClass Garch takes an argument for model. Should be specified as one of \"vanilla_garch\" or \"gjr_garch\". \n\tGarch has methods for fit and predict. After being fitted Garch has properties for fit results and params.\n \"\"\"\n def __init__(self, model):\n self.__model = model\n\n @property\n def model(self):\n return self.__model\n \n# These properties are from fit method\n @property\n def results(self):\n return self.__results\n @property\n def params(self):\n return self.__params\n\n\n def fit(self, X, begVals, method = \"Nelder-Mead\", jac=None, hess=None, hessp=None, bounds=None,\n constraints=(), tol=None, callback=None, options=None):\n \"\"\"\n \tfit method takes arguments for X, begVals, method, and other arguments which are \n used to scipy.optimize.minimize (see scipy documentation). Where X is the data, \n begVals are initial values to be used, and method is one of the minimization methods\n as described in scipy.optimize.minimize. Has properties for params and results.\n \"\"\" \n self.X_ = X\n self.begVals = begVals\n self.method = method\n self.jac = jac\n self.hess = hess\n self.hessp = hessp\n self.bounds = bounds\n self.constraints = constraints\n self.tol = tol\n self.callback = callback\n self.options = options\n \n self.__model = self.__model(self.begVals, self.X_)\n minimization = self.__model.minimize(self.method, self.jac, self.hess, self.hessp, \n self.bounds, self.constraints, self.tol, \n self.callback, self.options)\n \n self.output = GarchOutput(minimization)\n self.__params = self.output.params\n self.__results = self.output.results\n\t\n return self.output\n\n \n def predict(self, steps = 1):\n \"\"\"\n Predict method takes argument for steps, where steps are number of future time periods,\n default is steps = 1 for 1 time period in the future. Predict returns an array of predicted values.\n \"\"\"\n # Ensure that instance has been fitted\n check_is_fitted(self, \"X_\")\n self.steps = steps\n est_params = self.output.params\n \n prediction = self.__model.forecast(self.steps, est_params)\n \n return prediction\n \nclass GarchOutput():\n def __init__(self, results):\n self.__results = results\n self.__params = results[\"x\"]\n \n @property\n def results(self):\n return self.__results\n @property\n def params(self):\n return self.__params\n\n \n\n\n### Different Types of Garch Models\n\nclass vanilla_garch():\n def __init__(self, params, data):\n self.__params = params\n self.__data = data\n \n @property\n def params(self):\n return self.__params\n @property\n def data(self):\n return self.__data\n \n\n def likelihood(self, params, data, sigma_last = False):\n mu = params[0]\n omega = params[1]\n alpha = params[2]\n beta = params[3]\n\n T = size(data, 0)\n eps = data - mu\n\n sigma2 = np.empty(T)\n\n sigma2[0] = omega/(1 - alpha - beta)\n\n for t in range(1, T):\n sigma2[t] = omega + alpha * eps[t-1]**2 + beta * sigma2[t-1]\n\n lls = 0.5 * (log(2 * pi) + log(sigma2) + eps**2/sigma2)\n ll = sum(lls)\n\n if sigma_last is True:\n results = [sigma2[-1], eps[-1]]\n else:\n results = ll\n\n\n return results\n \n def minimize(self, method, jac=None, hess=None, hessp=None, bounds=None, constraints=(), \n tol=None, callback=None, options=None):\n \n results = opt.minimize(fun = self.likelihood, x0 = self.__params, args = self.__data, method = method,\n jac = jac, hess = hess, hessp = hessp, bounds = bounds, constraints = constraints, tol = tol, \n callback = callback, options = options)\n return results\n \n\n def forecast(self, steps, est_params):\n est_mu = est_params[0]\n est_omega = est_params[1]\n est_alpha = est_params[2]\n est_beta = est_params[3]\n init_sigma2, init_eps = self.likelihood(self.__params, self.__data, sigma_last = True)\n \n \n forecast_values = np.empty(steps)\n forecast_values[0] = est_omega + est_alpha * init_eps**2 + est_beta * init_sigma2\n for t in range(1,steps):\n forecast_values[t] = est_omega + forecast_values[t-1] * (est_alpha + est_beta)\n \n return forecast_values\n \n \nclass gjr_garch():\n def __init__(self, params, data,sigma_last = False):\n self.__params = params\n self.__data = data\n self.sigma_last = sigma_last\n \n @property\n def params(self):\n return self.__params\n @property\n def data(self):\n return self.__data\n \n def likelihood(self, params, data, sigma_last = False):\n mu = params[0]\n omega = params[1]\n alpha = params[2]\n beta = params[3]\n gamma = params[4]\n\n T = size(data, 0)\n eps = data - mu\n\n sigma2 = np.empty(T)\n\n sigma2[0] = omega/(1 - alpha - beta - 0.5 * gamma)\n\n for t in range(1, T):\n if eps[t-1] >= 0:\n sigma2[t] = omega + alpha * eps[t-1]**2 + beta * sigma2[t-1]\n else:\n sigma2[t] = omega + alpha * eps[t-1]**2 + gamma * eps[t-1]**2 + beta * sigma2[t-1] \n \n\n lls = 0.5 * (log(2 * pi) + log(sigma2) + eps**2/sigma2)\n ll = sum(lls)\n\n if sigma_last is True:\n results = [sigma2[-1], eps[-1]]\n else:\n results = ll\n\n return results\n \n def minimize(self, method, jac=None, hess=None, hessp=None, bounds=None, constraints=(), \n tol=None, callback=None, options=None):\n results = opt.minimize(fun = self.likelihood, x0 = self.__params, args = self.__data, method = method,\n jac = jac, hess = hess, hessp = hessp, bounds = bounds, constraints = constraints, tol = tol, \n callback = callback, options = options)\n return results\n \n### Check with Dr. Brough \n def forecast(self, steps, est_params):\n est_mu = est_params[0]\n est_omega = est_params[1]\n est_alpha = est_params[2]\n est_beta = est_params[3]\n est_gamma = est_params[4]\n init_sigma2, init_eps = self.likelihood(self.__params, self.__data, sigma_last = True)\n \n \n forecast_values = np.empty(steps)\n if init_eps >= 0:\n forecast_values[0] = est_omega + est_alpha * init_eps**2 + est_beta * init_sigma2\n else:\n forecast_values[0] = est_omega + (est_alpha + est_gamma) * init_eps**2 + est_beta * init_sigma2\n \n for t in range(1,steps):\n forecast_values[t] = est_omega + forecast_values[t-1] * (est_alpha + est_beta + 0.5*est_gamma)\n \n return forecast_values\n", "repo_name": "USUECN6990/Garch", "sub_path": "Garch/Garch.py", "file_name": "Garch.py", "file_ext": "py", "file_size_in_byte": 7566, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sklearn.base.BaseEstimator", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.base.ClassifierMixin", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.utils.validation.check_is_fitted", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 137, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 193, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 194, "usage_type": "call"}, {"api_name": "scipy.optimize.minimize", "line_number": 205, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "6473959821", "text": "from app.extensions import db, redis, mail\nfrom app.models import *\nfrom config import Config\nfrom flask import Flask, g\nfrom flask_login import LoginManager, current_user\nfrom flask_migrate import Migrate\nfrom celery import Celery, Task\nfrom celery.schedules import crontab\nfrom kombu import Exchange, Queue\nimport os\n\n\ndef celery_init_app(app: Flask) -> Celery:\n class FlaskTask(Task):\n def __call__(self, *args: object, **kwargs: object) -> object:\n with app.app_context():\n return self.run(*args, **kwargs)\n\n CELERY = {\n 'CELERY_BROKER_URL': os.environ.get('CELERY_BROKER_URL') or 'redis://redis:6379/0',\n 'CELERY_RESULT_BACKEND': os.environ.get('CELERY_RESULT_BACKEND') or 'redis://redis:6379/0',\n 'CELERY_IMPORTS': (\n 'app.script_check',\n ),\n 'CELERYBEAT_SCHEDULE': {\n 'process-queue-every-minute': {\n 'task': 'app.script_check.process_new_files',\n # 'schedule': crontab(minute='*/1'),\n 'schedule': 10.0,\n 'args': ()\n },\n },\n 'CELERY_QUEUES': (\n Queue('default', Exchange('default'), routing_key='default'),\n Queue('checks', Exchange('checks'), routing_key='checks'),\n Queue('notifications', Exchange('notifications'), routing_key='notifications'),\n ),\n\n }\n\n celery_app = Celery(\n app.name,\n task_cls=FlaskTask,\n\n )\n\n celery_app.config_from_object(CELERY)\n celery_app.set_default()\n app.extensions[\"celery\"] = celery_app\n return celery_app\n\n\ndef create_app(config_class=Config) -> Flask:\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n # Initialize extensions\n db.init_app(app)\n migrate = Migrate()\n migrate.init_app(app, db)\n\n # Register login manager\n login_manager = LoginManager()\n login_manager.init_app(app)\n\n # Register redis\n redis.init_app(app)\n\n # Register Celery\n celery_init_app(app)\n\n # Register mail\n mail.init_app(app)\n\n @app.before_request\n def before_request():\n g.user_authenticated = current_user.is_authenticated\n\n @login_manager.user_loader\n def load_user(user_id):\n return User.query.get(int(user_id))\n\n return app\n\n\napp = create_app()\ncelery_inst = celery_init_app(app)\n\nfrom app.main import routes\n", "repo_name": "Aevum12/sky_test", "sub_path": "app/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2373, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "name"}, {"api_name": "celery.Task", "line_number": 14, "usage_type": "name"}, {"api_name": "app.extensions.app_context", "line_number": 16, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 16, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "kombu.Queue", "line_number": 34, "usage_type": "call"}, {"api_name": "kombu.Exchange", "line_number": 34, "usage_type": "call"}, {"api_name": "kombu.Queue", "line_number": 35, "usage_type": "call"}, {"api_name": "kombu.Exchange", "line_number": 35, "usage_type": "call"}, {"api_name": "kombu.Queue", "line_number": 36, "usage_type": "call"}, {"api_name": "kombu.Exchange", "line_number": 36, "usage_type": "call"}, {"api_name": "celery.Celery", "line_number": 41, "usage_type": "call"}, {"api_name": "app.extensions.name", "line_number": 42, "usage_type": "attribute"}, {"api_name": "app.extensions", "line_number": 42, "usage_type": "name"}, {"api_name": "app.extensions.extensions", "line_number": 49, "usage_type": "attribute"}, {"api_name": "app.extensions", "line_number": 49, "usage_type": "name"}, {"api_name": "celery.Celery", "line_number": 13, "usage_type": "name"}, {"api_name": "config.Config", "line_number": 53, "usage_type": "name"}, {"api_name": "app.extensions", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 54, "usage_type": "call"}, {"api_name": "app.extensions.config.from_object", "line_number": 55, "usage_type": "call"}, {"api_name": "app.extensions.config", "line_number": 55, "usage_type": "attribute"}, {"api_name": "app.extensions", "line_number": 55, "usage_type": "name"}, {"api_name": "app.extensions.db.init_app", "line_number": 58, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 58, "usage_type": "argument"}, {"api_name": "app.extensions.db", "line_number": 58, "usage_type": "name"}, {"api_name": "flask_migrate.Migrate", "line_number": 59, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 60, "usage_type": "argument"}, {"api_name": "app.extensions.db", "line_number": 60, "usage_type": "argument"}, {"api_name": "flask_login.LoginManager", "line_number": 63, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 64, "usage_type": "argument"}, {"api_name": "app.extensions.redis.init_app", "line_number": 67, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 67, "usage_type": "argument"}, {"api_name": "app.extensions.redis", "line_number": 67, "usage_type": "name"}, {"api_name": "app.extensions", "line_number": 70, "usage_type": "argument"}, {"api_name": "app.extensions.mail.init_app", "line_number": 73, "usage_type": "call"}, {"api_name": "app.extensions", "line_number": 73, "usage_type": "argument"}, {"api_name": "app.extensions.mail", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.g.user_authenticated", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 77, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 77, "usage_type": "name"}, {"api_name": "app.extensions.before_request", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app.extensions", "line_number": 75, "usage_type": "name"}, {"api_name": "app.extensions", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 53, "usage_type": "name"}, {"api_name": "app.extensions", "line_number": 86, "usage_type": "name"}, {"api_name": "app.extensions", "line_number": 87, "usage_type": "argument"}]} +{"seq_id": "33662986775", "text": "import http.client\n\nconn = http.client.HTTPSConnection(\"vindecoder.p.rapidapi.com\")\n\nheaders = {\n 'x-rapidapi-key': \"e25f4fcc35msh18f89aff8212734p1a7245jsn79e152a13e68\",\n 'x-rapidapi-host': \"vindecoder.p.rapidapi.com\"\n }\n\nconn.request(\"GET\", \"/salvage_check?vin=JMYLYV78W5J000603\", headers=headers)\n\nres = conn.getresponse()\ndata = res.read()\n\nprint(data.decode(\"utf-8\"))", "repo_name": "sebastianjorc/rimbu", "sub_path": "recursos_bd/python scrapper/get_vin.py", "file_name": "get_vin.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "http.client.client.HTTPSConnection", "line_number": 3, "usage_type": "call"}, {"api_name": "http.client.client", "line_number": 3, "usage_type": "attribute"}, {"api_name": "http.client", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "15148596498", "text": "#!usr/bin/python\n\n__author__ = 'zabidon'\n\nimport vk\nimport re\nimport time\nimport requests\nfrom os.path import exists, isfile, join, splitext, curdir\nfrom os import mkdir, remove\nfrom arg_parse import arg_parse\n\n\ndef check_create_directory(path):\n if exists(path) and isfile(path):\n remove(path)\n if not exists(path):\n mkdir(path)\n\n\nclass VK_downloader(object):\n def __init__(self, session):\n self.session = session\n self.download_dir = join(curdir, \"download\")\n self.music_dir = join(self.download_dir, \"music\")\n\n for directory in [self.download_dir, self.music_dir]:\n check_create_directory(directory)\n\n\n def download_audio_all(self,\n owner_id=None,\n album_id=None,\n through_albums=False,\n offset=0,\n count=6000,\n q=None):\n \"\"\"\n Сохраняет список аудиозаписей пользователя или сообщества.\n\n\n :param owner_id: Идентификатор владельца аудиозаписей\n :param album_id: Идентификатор альбома с аудиозаписями\n :param through_albums: Сохранять аудиозаписи находящиеся в альбомах\n :param offset: Смещение, необходимое для выборки определенного количества аудиозаписей. По умолчанию — 0\n :param count: Количество аудиозаписей, информацию о которых необходимо вернуть. Максимальное значение — 6000.\n \"\"\"\n\n if through_albums:\n if owner_id:\n albums = self.session.audio.getAlbums(owner_id=owner_id)\n owner_dir = join(self.music_dir, str(owner_id))\n check_create_directory(owner_dir)\n count_album = 0\n print(u\"All albums: {}\".format(len(albums['items'])))\n for album in albums['items']:\n album['title'] = re.sub(r\"[^\\w().-]\", \" \", album['title'])\n dir = join(owner_dir, str(album['title']))\n\n check_create_directory(dir)\n count_album += 1\n print(u\"{0}) Start downloading album: {1}\".format(count_album, album['title']))\n audios = self.session.audio.get(owner_id=owner_id,\n album_id=album['id'],\n offset=offset,\n count=count)\n\n self.download_audios(audios, dir)\n else:\n\n dir = join(self.music_dir, str(album_id or owner_id))\n check_create_directory(dir)\n\n audios = self.session.audio.get(owner_id=owner_id, album_id=album_id)\n\n self.download_audios(audios, dir)\n\n\n def download_audio_api(self, dir=None, func=\"audio.get\", **kwargs):\n self.download_audios(self.session(func, **kwargs), join(self.music_dir, dir))\n\n\n def download_audios(self, audios, dir=None):\n if not dir:\n dir = join(self.music_dir, \"noname\")\n check_create_directory(dir)\n count_audio = 0\n print(u\"All audios: {0}\".format(len(audios['items'])))\n for audio in audios['items']:\n if 'url' in audio and 'artist' in audio and 'title' in audio:\n count_audio += 1\n print(u\"{0}) Start downloading audio: {1}\".format(count_audio, (audio['title'], audio['artist'])))\n self.download_audio(audio['url'], audio['title'], audio['artist'], dir=dir)\n print(\"Downloading is ended\")\n\n\n def download_audio(self, url=None, name=None, artists=None, force=False, *, dir):\n if name is not None and artists is not None and url is not None:\n filename = artists.strip() + '-' + name.strip()\n filename = re.sub(r\"[^\\w().-]\", \" \", filename)\n filepath = join(dir, filename[0:64]) + \".mp3\"\n\n res = requests.get(url, stream=True)\n\n counter = 1\n if force:\n if exists(filepath):\n name, ext = splitext(filepath)\n filepath = name + \" ({})\".format(counter) + ext\n\n while exists(filepath):\n counter += 1\n name, ext = splitext(filepath)\n filepath = name[:-4] + \" ({})\".format(counter) + ext\n else:\n if exists(filepath):\n print(\"File '{}' exist\".format(filepath))\n return\n file = open(filepath, 'wb')\n file.write(res.content)\n file.close()\n\n\nif __name__ == \"__main__\":\n # args = arg_parse()\n downloader = VK_downloader(vk.API('app_id', 'name', 'password'))\n downloader.download_audio_api( \"Ludovico Einaudi\", \"audio.search\", q=\"Ludovico Einaudi\", count=300)\n # downloader.download_audio_all(owner_id = args.id)\n # album_id = args.aid,\n # through_albums=args.through_albums)", "repo_name": "D504/vk_downloader", "sub_path": "vk_downloader.py", "file_name": "vk_downloader.py", "file_ext": "py", "file_size_in_byte": 5275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.exists", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 15, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.curdir", "line_number": 24, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 101, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 116, "usage_type": "call"}, {"api_name": "vk.API", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "69964675044", "text": "from flask import Flask\nimport pytest\nimport requests\nimport ssl\nimport os\nfrom requests.exceptions import ConnectionError\nimport sys\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\n\nfrom application.app import app, get_country_city_data, get_stations_by_country\n\ndef test_Is_online_index():\n '''Här görs en request.get fär att kolla om våran endpoint är funktionell'''\n assert requests.get(\"http://127.0.0.1:5000\", timeout=10)\n\ndef test_url_up_and_running():\n '''Här skriver vi ett test för att simulera en request till Flask applikationen utan att köra servern, detta görs genom att använda Flask 'test_client' '''\n with app.test_client() as client:\n try:\n response = client.get('/')\n assert response.status_code == 200\n except ConnectionError:\n pytest.fail(\"Failed to connect to the URL\")\n\ndef test_file_structure():\n '''Denna test_case kollar filstrukturer och kollar om den stämmer'''\n assert \"application\" in os.listdir(os.curdir)\n assert \"docs\" in os.listdir(os.curdir)\n assert \"tests\" in os.listdir(os.curdir)\n assert \"__init__.py\" in os.listdir(os.curdir+\"/application\")\n assert \"app.py\" in os.listdir(os.curdir+\"/application\")\n assert \"func.py\" in os.listdir(os.curdir+\"/application\")\n assert \"form.html\" in os.listdir(os.curdir+\"/application\"+\"/templates\")\n assert \"index.html\" in os.listdir(os.curdir+\"/application\"+\"/templates\")\n assert \"layout.html\" in os.listdir(os.curdir+\"/application\"+\"/templates\")\n\ndef test_get_country_city_data():\n '''I denna test_case görs ett anrop till funktionen get_country_city_data. \n Den hämtar sedan resultatet som förväntas returnera två listor. En för länder och den andra för cities.'''\n countries, cities = get_country_city_data()\n assert isinstance(countries, list)\n assert isinstance(cities, list)\n\ndef test_get_stations_by_country():\n '''Här anropar vi get_station_by_country funktionen som gör ett anrop där den förväntas returnera 'SE' (Sverige) som ett valalternativ om det existerar.'''\n stations = get_stations_by_country('SE')\n for station in stations:\n assert station['location']['country'] == 'SE'", "repo_name": "YasinGl/GrupparbeteJokes", "sub_path": "tests/test_.py", "file_name": "test_.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "sv", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "application.app.app.test_client", "line_number": 21, "usage_type": "call"}, {"api_name": "application.app.app", "line_number": 21, "usage_type": "name"}, {"api_name": "requests.exceptions.ConnectionError", "line_number": 25, "usage_type": "name"}, {"api_name": "pytest.fail", "line_number": 26, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 33, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 34, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 37, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 38, "usage_type": "attribute"}, {"api_name": "application.app.get_country_city_data", "line_number": 43, "usage_type": "call"}, {"api_name": "application.app.get_stations_by_country", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "23102057536", "text": "# -*- coding: UTF-8 -*-\nimport tools\nfrom html import unescape\nfrom bs4 import BeautifulSoup as BS\nimport re\nfrom requests import get\nimport os\n\nclass Musec():\n def __init__(self, mid, platform, albn='', art='', img='', sformat='m4a'):\n self.guid = '8962339369'\n self.mid = mid\n\n self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}\n self.platform = platform\n self.sformat = sformat\n\n #Info\n h=get('http://y.qq.com/n/ryqq/songDetail/%s' % (mid), verify=False, headers=self.headers)\n soup = BS(h.text, 'html.parser')\n\n self.name = unescape(soup.select('div.data__name > h1')[0].string)\n\n if albn:\n self.albn = albn\n else:\n try:\n self.albn = unescape(soup.select('li.data_info__item_song > a')[0].string)\n except IndexError: # Song has no album\n self.albn = ' '\n\n if img:\n self.img = img\n else:\n part = '.*?window.__INITIAL_DATA__ ={\"detail\":{\"title\":\"%s\",\"picurl\":\"(.*?)\"' % (re.escape(self.name).replace('\"', '\\\\\\\\\"').replace('/','\\\\\\\\u002F'))\n scripts = set(soup.select('script')) - set(soup.select('script[crossorigin=anonymous]'))\n for script in scripts:\n imgurl = ''\n try:\n imgurl = 'http:' + re.match(part, script.text).group(1).replace('\\\\u002F', '/')\n break\n except AttributeError:\n pass\n self.img = get(imgurl, verify=False, headers=self.headers).content\n\n if art:\n self.art = art\n else:\n art = ''\n for s in soup.select('a.data__singer_txt'):\n art += s.string + ' / '\n self.art = tools.del_cn(unescape(art[:-3]))\n\n def get_download_url(self, uin='0', cookies={}):\n #get_vkey:\n getvkurl = 'http://u.y.qq.com/cgi-bin/musicu.fcg?&data={\"req\":{\"param\":{\"guid\":\"%s\"}},\"req_0\":{\"module\":\"vkey.GetVkeyServer\",\"method\":\"CgiGetVkey\",\"param\":{\"guid\":\"%s\",\"songmid\":[\"%s\"],\"uin\":\"%s\"}},\"comm\":{\"uin\":%s}}' \\\n % (self.guid, self.guid, self.mid, uin, uin)\n\n vkres = get(getvkurl, verify=False, headers=self.headers, cookies=cookies)\n purl = vkres.json()['req_0']['data']['midurlinfo'][0]['purl']\n\n self.dlurl = 'http://dl.stream.qqmusic.qq.com/' + purl\n\n\n\n\n def download(self, path, uin='0', cookies={}, download_info=True):\n self.get_download_url(uin, cookies)\n h = get(self.dlurl, cookies=cookies, verify=False, headers=self.headers)\n\n if h.status_code == 200:\n # filter error character\n errcha = tools.get_errcha(self.platform)\n filename = re.sub(errcha, '-' , self.name) + '.' + self.sformat\n song_path = os.path.join(path, filename)\n\n with open(song_path, 'wb') as f:\n f.write(h.content)\n\n if download_info:\n tools.set_info(\n song_path,\n sformat=self.sformat,\n nam=self.name,\n art=self.art,\n alb=self.albn,\n img=self.img)\n return h.status_code\n", "repo_name": "DedSecer/musec", "sub_path": "Musec.py", "file_name": "Musec.py", "file_ext": "py", "file_size_in_byte": 3323, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 22, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 28, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 35, "usage_type": "call"}, {"api_name": "re.match", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 44, "usage_type": "call"}, {"api_name": "tools.del_cn", "line_number": 52, "usage_type": "call"}, {"api_name": "html.unescape", "line_number": 52, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 69, "usage_type": "call"}, {"api_name": "tools.get_errcha", "line_number": 73, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "tools.set_info", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "74541513445", "text": "import pandas as pd\nimport os\nfrom sklearn.cluster import AgglomerativeClustering\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Exercise 7.1\n\ndf = pd.read_csv('people\\BeatrizLucas\\EFIplus_medit.zip',compression='zip', sep=\";\")\ndf = df.dropna()\n\n# Subset the database - Douro and Tejo basins\n\ndf = df[ (df['Catchment_name'] == 'Tejo') | (df['Catchment_name'] == \"Douro\")]\n\n# Subset the database - environmental variables\n\nenv_var = ['Altitude', 'Actual_river_slope', 'Elevation_mean_catch', 'prec_ann_catch', 'temp_ann', 'temp_jan', 'temp_jul']\ndf = df[env_var]\ndf = df.reset_index(drop=True)\nprint(df)\n\n# Agglomerative clustering using different linkage methods\nlinkage_methods = ['ward', 'complete', 'average', 'single'] # List of linkage methods to try\n\nfor method in linkage_methods:\n \n clustering = AgglomerativeClustering(n_clusters=3, affinity='euclidean', linkage=method)\n\n labels = clustering.fit_predict(df)\n\n # Print the cluster labels\n print(f\"Cluster labels using {method} linkage:\")\n print(labels)\n print()\n\n# Exercise 7.2. \n\nsns.clustermap(df, col_cluster=False, row_cluster=True, method='average')\nplt.show()", "repo_name": "isa-ulisboa/greends-avcd", "sub_path": "people/BeatrizLucas/avdc-exerc7.py", "file_name": "avdc-exerc7.py", "file_ext": "py", "file_size_in_byte": 1156, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.cluster.AgglomerativeClustering", "line_number": 28, "usage_type": "call"}, {"api_name": "seaborn.clustermap", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "12952448039", "text": "from selenium import webdriver\nimport unittest\nimport time\n\nclass Unitest(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome('chromedriver.exe')\n\n def test_link_text(self):\n driver = self.driver\n driver.get(\"http://www.w3schools.com/\")\n time.sleep(3)\n encontrar_link = driver.find_element_by_link_text(\"Learn PHP\")\n encontrar_link.click()\n\nif __name__ == '__main__':\n unittest.main()", "repo_name": "galarragah/TestAutomationPython", "sub_path": "Hiperlink_by_link_text.py", "file_name": "Hiperlink_by_link_text.py", "file_ext": "py", "file_size_in_byte": 454, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 5, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 8, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "39780988191", "text": "#Python script which will scrape the data from a website and output a random fact\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport random\n\nbase_url = 'https://www.did-you-knows.com/'\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nfacts=[]\n\ndef facts_add(soup):\n\tfor fact in soup.findAll('span', attrs={'class':'dykText'}):\n\t\tfacts.append(fact.text)\n\ndef next_page(soup):\n\tfor links in soup.findAll('div', attrs={'class':'pagePagintionLinks'}):\n\t\tif soup.findAll('a', attrs={'class':'next'}):\n\t\t\tnav=links.find_all('a')[-1].get('href')\n\t\t\treturn nav\n\ndef main():\n\tnext_pg=''\n\tProceed=True\n\ttry:\n\t\twhile Proceed:\n\t\t\turl=base_url+next_pg\n\t\t\tresponse = requests.get(url, headers=headers, timeout=2)\n\t\t\tif response.status_code != 200:\n\t\t\t\treturn False\n\t\t\tsoup = BeautifulSoup(response.content,'html.parser')\n\t\t\tfacts_add(soup)\n\t\t\tnext_pg=next_page(soup)\n\t\t\tif not next_pg:\n\t\t\t\tProceed=False\n\texcept requests.ConnectionError as e:\n\t\tprint(\"OOPS!! Connection Error. Make sure you are connected to Internet. Technical Details given below.\\n\")\n\t\tprint(str(e))\n\texcept requests.Timeout as e:\n\t\tprint(\"OOPS!! Timeout Error\")\n\t\tprint(str(e))\n\texcept requests.RequestException as e:\n\t\tprint(\"OOPS!! General Error\")\n\t\tprint(str(e))\n\texcept KeyboardInterrupt:\n\t\tprint(\"Someone closed the program\")\n\tfinally:\n\t\tprint(f\"Total Records = {len(facts)}\")\n\t\tprint(random.choice(facts).encode('ascii',errors='ignore').decode())\n\nif __name__=='__main__':\n\tmain()\n", "repo_name": "AnilReddy231/handy_scripts", "sub_path": "did_you_know.py", "file_name": "did_you_know.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.ConnectionError", "line_number": 35, "usage_type": "attribute"}, {"api_name": "requests.Timeout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "requests.RequestException", "line_number": 41, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "43490928767", "text": "import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport time\nimport copy\n\n# Device configuration\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\n# Hyper parameters\nnum_epochs = 30\nnum_classes = 10\nbatch_size = 10\nlearning_rate = 0.001\n\ndata_dir = './bulloying_dataset/'\ntrain_data_dir = './bulloying_dataset/train_data'\nval_data_dir = './bulloying_dataset/val_data'\n# data_tranforms = {\n# 'train_data' : transforms.Compose([\n# transforms.RandomResizedCrop(224),\n# transforms.RandomHorizontalFlip(),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ]),\n# 'val_data' : transforms.Compose([\n# transforms.Resize(256),\n# transforms.RandomResizedCrop(224),\n# transforms.RandomHorizontalFlip(),\n# transforms.ToTensor(),\n# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n# ])\n# }\n# my_image_datasets = {\n# x: datasets.ImageFolder(os.path.join(data_dir, x), data_tranforms[x])\n# for x in ['train_data', 'val_data']\n# }\n\n# my_dataloaders = {\n# x: torch.utils.data.DataLoader(my_image_datasets[x], batch_size=4, shuffle=True, num_workers=4) \n# for x in ['train_data', 'val_data']\n# }\n\n# dataset_size = {x: len(my_image_datasets[x]) for x in ['train_data', 'val_data']}\n\n# Traning Data\ntrain_data_transforms = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n])\n\ntrain_image_dataset = datasets.ImageFolder(train_data_dir, train_data_transforms)\n\ntrain_dataloaders = torch.utils.data.DataLoader(train_image_dataset, batch_size=batch_size, shuffle=True)\n\ntrain_datasize = len(train_image_dataset)\n\n# Evaluation Data\nval_data_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n #transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n])\nval_image_dataset = datasets.ImageFolder(val_data_dir, val_data_transforms)\n\nval_dataloaders = torch.utils.data.DataLoader(val_image_dataset, batch_size=batch_size, shuffle=False)\n\nval_datasize = len(val_image_dataset)\n\n# Building Convolutional Neural Network\nclass MyCNN(nn.Module):\n \"\"\"Some Information about MyCNN\"\"\"\n #regard input image size as 224X224\n def __init__(self, num_classes = 10):\n super(MyCNN, self).__init__()\n # Conv1\n self.layer1 = nn.Sequential(\n # if stride size =1 , padding size = (kernel_size -1)/2\n nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(16),\n nn.ReLU(True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.dropout = nn.Dropout(p=0.5)\n # Conv2\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(True),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n self.dropout = nn.Dropout(p=0.5)\n # Full Connected layer\n self.fc1 = nn.Linear(32*56*56, 10)\n self.fc_bn = nn.BatchNorm1d(10)\n #self.fc2 = nn.Linear(1024, 10)\n self.initialize_weights()\n\n def initialize_weights(self):\n # classname = m.__class__.__name__\n # if classname.find('Conv') != -1:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # Initialize weight by using xavier\n nn.init.xavier_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.01)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n elif isinstance(m, nn.Linear): \n nn.init.normal_(m.weight, 0, 0.01) \n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(-1, 32*56*56)\n out = self.fc1(out)\n #out = self.fc2(out)\n return out\n\ncnn = MyCNN() # generalize an instance\n#cnn.apply(initialize_weights) # apply weight initialize\n\nif torch.cuda.device_count() > 1:\n cnn = nn.DataParallel(cnn()).cuda()\nelse:\n cnn = cnn.cuda()\n\n# Loss and Optimizer\nloss_function = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(cnn.parameters(), lr = learning_rate)\n\n# Training the Model\ncnn.train()\ntotal_train_step = len(train_dataloaders)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(train_dataloaders):\n images = images.cuda()\n labels = labels.cuda()\n\n # Forward pass\n outputs = cnn(images)\n loss = loss_function(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i+1)%100:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch+1, num_epochs, i+1, total_train_step, loss.item()))\n\n# Test the model\ncnn.eval()\nwith torch.no_grad():\n correct = 0\n total = 0\n\n for images, labels in val_dataloaders:\n images = images.cuda()\n labels = labels.cuda()\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct +=(predicted == labels).sum().item()\n print('Test Accuracy of the model on test images: {} %'.format(100 * correct / total))\n\n# Save the model checkpoint\ntorch.save(cnn.state_dict(), 'mycnn.ckpt')\n", "repo_name": "mwfj/MyCnn_In_Pytorch", "sub_path": "myCNN.py", "file_name": "myCNN.py", "file_ext": "py", "file_size_in_byte": 6028, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 60, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 60, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 61, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 61, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 62, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 63, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 64, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 64, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 68, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 70, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 75, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 75, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 76, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 76, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 77, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 77, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 78, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 78, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 79, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 79, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 80, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 80, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 83, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 98, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 99, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 99, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 100, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 100, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 101, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 103, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 108, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 111, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 113, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm1d", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 114, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 122, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.nn.init.xavier_uniform_", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 124, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 126, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.init.normal_", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 130, "usage_type": "name"}, {"api_name": "torch.cuda.device_count", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 143, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 150, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "29790772217", "text": "import tensorflow as tf\n\nfrom typing import Optional, List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\nfrom app.tokenizer import SequenceTokenizer\nfrom app.model import Encoder, BahdanauAttention, Decoder\nfrom app.utils import CustomUnpickler, load_object, add_sep_tokens, pad_sequence\nfrom app.config import (embedding_dim, units, max_len_encoder, max_len_decoder, checkpoint_dir, tokenizer_encode_path,\n tokenizer_decode_path)\n\n\napp = FastAPI()\n\ntokenizer_encode = CustomUnpickler(open(tokenizer_encode_path, 'rb')).load()\ntokenizer_decode = CustomUnpickler(open(tokenizer_decode_path, 'rb')).load()\n\n# tokenizer_encode = load_object(tokenizer_encode_path)\n# tokenizer_decode = load_object(tokenizer_decode_path)\n\nvocab_size_encode = len(tokenizer_encode.word2index) + 1\nvocab_size_decode = len(tokenizer_decode.word2index) + 1\n\nencoder = Encoder(vocab_size_encode, embedding_dim, units, 1)\nattention_layer = BahdanauAttention(10)\ndecoder = Decoder(vocab_size_decode, embedding_dim, units, 1)\n\noptimizer = tf.keras.optimizers.Adam()\n\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer,\n encoder=encoder,\n decoder=decoder)\ncheckpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))\n\n\ndef predict_v1(words: list):\n words = [add_sep_tokens([letter for letter in word]) for word in words]\n batch_size = len(words)\n result = [['']] * batch_size\n\n inputs = tokenizer_encode.transform(words)\n inputs = pad_sequence(inputs, max_len=max_len_encoder)\n inputs = tf.convert_to_tensor(inputs)\n\n hidden = tf.zeros((batch_size, units))\n enc_out, enc_hidden = encoder(inputs, hidden)\n\n dec_hidden = enc_hidden\n dec_input = tf.expand_dims([tokenizer_decode.word2index['']] * batch_size, 1)\n\n for t in range(max_len_decoder):\n predictions, dec_hidden, attention_weights = decoder(dec_input,\n dec_hidden,\n enc_out)\n prediction_ids = tf.argmax(predictions, axis=1)\n dec_input = tf.expand_dims(prediction_ids, 1)\n for index, id_ in enumerate(prediction_ids.numpy()):\n result[index] = result[index] + [tokenizer_decode.index2word[id_]]\n\n res = [[item for item in lst if item not in (\"\", \"\")] for lst in result]\n return res\n\n\nclass RequestItem(BaseModel):\n id: int\n word: str\n\n\nclass ResponseItem(BaseModel):\n id: int\n phonemes: list\n\n\nclass RequestItemList(BaseModel):\n result: List[RequestItem]\n\n\n@app.get(\"/\")\ndef root():\n return \"g2p_uk\"\n\n\n@app.post(\"/predict\")\ndef predict(items: List[RequestItem]):\n res = []\n for item in items:\n res.append({\"id\": item.id, \"phonemes\": predict_v1([item.word])})\n return res\n\n\n@app.post(\"/predict_list\")\ndef predict_list(items: List[str]):\n res = predict_v1(items)\n return res\n", "repo_name": "dsakovych/g2p_uk", "sub_path": "app/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2967, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "app.tokenizer", "line_number": 14, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 14, "usage_type": "call"}, {"api_name": "app.utils.CustomUnpickler", "line_number": 16, "usage_type": "call"}, {"api_name": "app.config.tokenizer_encode_path", "line_number": 16, "usage_type": "argument"}, {"api_name": "app.utils.CustomUnpickler", "line_number": 17, "usage_type": "call"}, {"api_name": "app.config.tokenizer_decode_path", "line_number": 17, "usage_type": "argument"}, {"api_name": "app.model.Encoder", "line_number": 25, "usage_type": "call"}, {"api_name": "app.config.embedding_dim", "line_number": 25, "usage_type": "argument"}, {"api_name": "app.config.units", "line_number": 25, "usage_type": "argument"}, {"api_name": "app.model.BahdanauAttention", "line_number": 26, "usage_type": "call"}, {"api_name": "app.model.Decoder", "line_number": 27, "usage_type": "call"}, {"api_name": "app.config.embedding_dim", "line_number": 27, "usage_type": "argument"}, {"api_name": "app.config.units", "line_number": 27, "usage_type": "argument"}, {"api_name": "tensorflow.keras.optimizers.Adam", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Checkpoint", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 34, "usage_type": "call"}, {"api_name": "app.config.checkpoint_dir", "line_number": 34, "usage_type": "argument"}, {"api_name": "tensorflow.train", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.utils.add_sep_tokens", "line_number": 38, "usage_type": "call"}, {"api_name": "app.utils.pad_sequence", "line_number": 43, "usage_type": "call"}, {"api_name": "app.config.max_len_encoder", "line_number": 43, "usage_type": "name"}, {"api_name": "tensorflow.convert_to_tensor", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "app.config.units", "line_number": 46, "usage_type": "name"}, {"api_name": "tensorflow.expand_dims", "line_number": 50, "usage_type": "call"}, {"api_name": "app.config.max_len_decoder", "line_number": 52, "usage_type": "argument"}, {"api_name": "tensorflow.argmax", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 57, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 65, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 70, "usage_type": "name"}, {"api_name": "pydantic.BaseModel", "line_number": 75, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 76, "usage_type": "name"}, {"api_name": "app.tokenizer.get", "line_number": 79, "usage_type": "call"}, {"api_name": "app.tokenizer", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "app.tokenizer.post", "line_number": 84, "usage_type": "call"}, {"api_name": "app.tokenizer", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 93, "usage_type": "name"}, {"api_name": "app.tokenizer.post", "line_number": 92, "usage_type": "call"}, {"api_name": "app.tokenizer", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "13747284036", "text": "import pygame\r\npygame.font.init()\r\n\r\n\r\nclass Button(pygame.sprite.Sprite):\r\n\tdef __init__(self, pos, size, index, win, font=pygame.font.Font(None, 30), shadow=(0, 0), text='', img=None, color=[(255, 255, 255), (220, 220, 220), (200, 200, 200)]):\r\n\t\tsuper().__init__()\r\n\t\tself.pos = pos\r\n\t\tself.orig_pos = pos\r\n\t\tself.size = size\r\n\t\tself.text = text\r\n\t\tself.img = img\r\n\t\tself.shadow = shadow\r\n\t\tself.rect = pygame.Rect(0, 0, 0, 0)\r\n\t\tself.color = color\r\n\t\tself.current_color = color[0]\r\n\r\n\t\tself.win = win\r\n\t\tself.font = font\r\n\r\n\t\tself.index = index\r\n\t\tself.clicked = False\r\n\r\n\t\tself.active = True\r\n\r\n\tdef display_button(self):\r\n\t\tif self.active:\r\n\t\t\tif self.img is None:\r\n\t\t\t\ttext_surf = self.font.render(self.text, True, 'Black')\r\n\t\t\t\ttext_rect = text_surf.get_rect(center=self.pos)\r\n\t\t\t\tself.rect = text_rect.inflate(self.size)\r\n\t\t\t\tpygame.draw.rect(self.win, 'Black', text_rect.inflate(self.size[0] + self.shadow[0], self.size[1] + self.shadow[1]), 0, 20)\r\n\t\t\t\tpygame.draw.rect(self.win, self.current_color, self.rect, 0, 20)\r\n\t\t\t\tself.win.blit(text_surf, text_rect)\r\n\t\t\telse:\r\n\t\t\t\timg_rect = self.img.get_rect(center=self.pos)\r\n\t\t\t\tself.rect = img_rect.inflate(self.size)\r\n\t\t\t\tself.win.blit(self.img, self.rect)\r\n\r\n\tdef check_button_collision(self):\r\n\t\tif self.rect.collidepoint(pygame.mouse.get_pos()):\r\n\t\t\tself.current_color = self.color[1]\r\n\t\t\tif pygame.mouse.get_pressed()[0]:\r\n\t\t\t\tself.current_color = self.color[2]\r\n\t\t\tif pygame.mouse.get_pressed()[0] and not self.clicked:\r\n\t\t\t\tself.clicked = True\r\n\t\t\t\treturn True\r\n\t\t\telif not pygame.mouse.get_pressed()[0] and self.clicked:\r\n\t\t\t\tself.clicked = False\r\n\t\telse:\r\n\t\t\tself.current_color = self.color[0]\r\n", "repo_name": "asd21342we423/age_incremental", "sub_path": "button.py", "file_name": "button.py", "file_ext": "py", "file_size_in_byte": 1662, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pygame.font.init", "line_number": 2, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 2, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 48, "usage_type": "attribute"}]} +{"seq_id": "27614774547", "text": "import contextlib\nimport io\nimport pathlib\nimport subprocess\nimport typing as t\n\nimport pytest\nfrom libtmux.server import Server\nfrom libtmux.session import Session\n\nfrom tmuxp import cli, exc\n\n\n@pytest.mark.parametrize(\"cli_cmd\", [[\"shell\"], [\"shell\", \"--pdb\"]])\n@pytest.mark.parametrize(\n \"cli_args,inputs,env,expected_output\",\n [\n (\n [\"-L{SOCKET_NAME}\", \"-c\", \"print(str(server.socket_name))\"],\n [],\n {},\n \"{SERVER_SOCKET_NAME}\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"{SESSION_NAME}\",\n \"-c\",\n \"print(session.name)\",\n ],\n [],\n {},\n \"{SESSION_NAME}\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"{SESSION_NAME}\",\n \"{WINDOW_NAME}\",\n \"-c\",\n \"print(server.has_session(session.name))\",\n ],\n [],\n {},\n \"True\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"{SESSION_NAME}\",\n \"{WINDOW_NAME}\",\n \"-c\",\n \"print(window.name)\",\n ],\n [],\n {},\n \"{WINDOW_NAME}\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"{SESSION_NAME}\",\n \"{WINDOW_NAME}\",\n \"-c\",\n \"print(pane.id)\",\n ],\n [],\n {},\n \"{PANE_ID}\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"-c\",\n \"print(pane.id)\",\n ],\n [],\n {\"TMUX_PANE\": \"{PANE_ID}\"},\n \"{PANE_ID}\",\n ),\n ],\n)\ndef test_shell(\n cli_cmd: t.List[str],\n cli_args: t.List[str],\n inputs: t.List[t.Any],\n expected_output: str,\n env: t.Dict[str, str],\n server: \"Server\",\n session: Session,\n tmp_path: pathlib.Path,\n monkeypatch: pytest.MonkeyPatch,\n capsys: pytest.CaptureFixture[str],\n) -> None:\n monkeypatch.setenv(\"HOME\", str(tmp_path))\n window_name = \"my_window\"\n window = session.new_window(window_name=window_name)\n window.split_window()\n\n assert window.attached_pane is not None\n\n template_ctx = {\n \"SOCKET_NAME\": server.socket_name,\n \"SESSION_NAME\": session.name,\n \"WINDOW_NAME\": window_name,\n \"PANE_ID\": window.attached_pane.id,\n \"SERVER_SOCKET_NAME\": server.socket_name,\n }\n\n cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]\n\n for k, v in env.items():\n monkeypatch.setenv(k, v.format(**template_ctx))\n\n monkeypatch.chdir(tmp_path)\n\n cli.cli(cli_args)\n result = capsys.readouterr()\n assert expected_output.format(**template_ctx) in result.out\n\n\n@pytest.mark.parametrize(\n \"cli_cmd\",\n [\n [\"shell\"],\n [\"shell\", \"--pdb\"],\n ],\n)\n@pytest.mark.parametrize(\n \"cli_args,inputs,env,template_ctx,exception,message\",\n [\n (\n [\"-LDoesNotExist\", \"-c\", \"print(str(server.socket_name))\"],\n [],\n {},\n {},\n subprocess.CalledProcessError,\n r\".*DoesNotExist.*\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"nonexistent_session\",\n \"-c\",\n \"print(str(server.socket_name))\",\n ],\n [],\n {},\n {\"session_name\": \"nonexistent_session\"},\n exc.TmuxpException,\n \"Session not found: nonexistent_session\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n \"{SESSION_NAME}\",\n \"nonexistent_window\",\n \"-c\",\n \"print(str(server.socket_name))\",\n ],\n [],\n {},\n {\"window_name\": \"nonexistent_window\"},\n exc.TmuxpException,\n \"Window not found: {WINDOW_NAME}\",\n ),\n ],\n)\ndef test_shell_target_missing(\n cli_cmd: t.List[str],\n cli_args: t.List[str],\n inputs: t.List[t.Any],\n env: t.Dict[t.Any, t.Any],\n template_ctx: t.Dict[str, str],\n exception: t.Union[\n t.Type[exc.TmuxpException], t.Type[subprocess.CalledProcessError]\n ],\n message: str,\n socket_name: str,\n server: \"Server\",\n session: Session,\n tmp_path: pathlib.Path,\n monkeypatch: pytest.MonkeyPatch,\n capsys: pytest.CaptureFixture[str],\n) -> None:\n monkeypatch.setenv(\"HOME\", str(tmp_path))\n window_name = \"my_window\"\n window = session.new_window(window_name=window_name)\n window.split_window()\n\n assert server.socket_name is not None\n assert session.name is not None\n\n template_ctx.update(\n {\n \"SOCKET_NAME\": server.socket_name,\n \"SESSION_NAME\": session.name,\n \"WINDOW_NAME\": template_ctx.get(\"window_name\", window_name),\n }\n )\n cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]\n\n for k, v in env.items():\n monkeypatch.setenv(k, v.format(**template_ctx))\n\n monkeypatch.chdir(tmp_path)\n\n if exception is not None:\n with pytest.raises(exception, match=message.format(**template_ctx)):\n cli.cli(cli_args)\n else:\n cli.cli(cli_args)\n result = capsys.readouterr()\n assert message.format(**template_ctx) in result.out\n\n\n@pytest.mark.parametrize(\n \"cli_cmd\",\n [\n # ['shell'],\n # ['shell', '--pdb'),\n [\"shell\", \"--code\"],\n # ['shell', '--bpython'],\n # ['shell', '--ptipython'],\n # ['shell', '--ptpython'],\n # ['shell', '--ipython'],\n ],\n)\n@pytest.mark.parametrize(\n \"cli_args,inputs,env,message\",\n [\n (\n [\n \"-L{SOCKET_NAME}\",\n ],\n [],\n {},\n \"(InteractiveConsole)\",\n ),\n (\n [\n \"-L{SOCKET_NAME}\",\n ],\n [],\n {\"PANE_ID\": \"{PANE_ID}\"},\n \"(InteractiveConsole)\",\n ),\n ],\n)\ndef test_shell_interactive(\n cli_cmd: t.List[str],\n cli_args: t.List[str],\n inputs: t.List[t.Any],\n env: t.Dict[str, str],\n message: str,\n server: \"Server\",\n session: Session,\n tmp_path: pathlib.Path,\n monkeypatch: pytest.MonkeyPatch,\n capsys: pytest.CaptureFixture[str],\n) -> None:\n monkeypatch.setenv(\"HOME\", str(tmp_path))\n window_name = \"my_window\"\n window = session.new_window(window_name=window_name)\n window.split_window()\n\n assert window.attached_pane is not None\n\n template_ctx = {\n \"SOCKET_NAME\": server.socket_name,\n \"SESSION_NAME\": session.name,\n \"WINDOW_NAME\": window_name,\n \"PANE_ID\": window.attached_pane.id,\n \"SERVER_SOCKET_NAME\": server.socket_name,\n }\n\n cli_args = cli_cmd + [cli_arg.format(**template_ctx) for cli_arg in cli_args]\n\n for k, v in env.items():\n monkeypatch.setenv(k, v.format(**template_ctx))\n\n monkeypatch.chdir(tmp_path)\n monkeypatch.setattr(\"sys.stdin\", io.StringIO(\"exit()\\r\"))\n with contextlib.suppress(SystemExit):\n cli.cli(cli_args)\n\n result = capsys.readouterr()\n assert message.format(**template_ctx) in result.err\n", "repo_name": "tmux-python/tmuxp", "sub_path": "tests/cli/test_shell.py", "file_name": "test_shell.py", "file_ext": "py", "file_size_in_byte": 7316, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3824, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 84, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 86, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 88, "usage_type": "attribute"}, {"api_name": "libtmux.session.Session", "line_number": 90, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "pytest.MonkeyPatch", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pytest.CaptureFixture", "line_number": 93, "usage_type": "attribute"}, {"api_name": "tmuxp.cli.cli", "line_number": 117, "usage_type": "call"}, {"api_name": "tmuxp.cli", "line_number": 117, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 14, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 15, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 170, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 171, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 172, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 172, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 173, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 173, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 174, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 175, "usage_type": "attribute"}, {"api_name": "typing.Type", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tmuxp.exc.TmuxpException", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tmuxp.exc", "line_number": 176, "usage_type": "name"}, {"api_name": "subprocess.CalledProcessError", "line_number": 176, "usage_type": "attribute"}, {"api_name": "libtmux.session.Session", "line_number": 181, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "pytest.MonkeyPatch", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pytest.CaptureFixture", "line_number": 184, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 209, "usage_type": "call"}, {"api_name": "tmuxp.cli.cli", "line_number": 210, "usage_type": "call"}, {"api_name": "tmuxp.cli", "line_number": 210, "usage_type": "name"}, {"api_name": "tmuxp.cli.cli", "line_number": 212, "usage_type": "call"}, {"api_name": "tmuxp.cli", "line_number": 212, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 122, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 122, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 129, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 129, "usage_type": "attribute"}, {"api_name": "subprocess.CalledProcessError", "line_number": 137, "usage_type": "attribute"}, {"api_name": "tmuxp.exc.TmuxpException", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tmuxp.exc", "line_number": 150, "usage_type": "name"}, {"api_name": "tmuxp.exc.TmuxpException", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tmuxp.exc", "line_number": 164, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 251, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 252, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 253, "usage_type": "attribute"}, {"api_name": "typing.Any", "line_number": 253, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 254, "usage_type": "attribute"}, {"api_name": "libtmux.session.Session", "line_number": 257, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 258, "usage_type": "attribute"}, {"api_name": "pytest.MonkeyPatch", "line_number": 259, "usage_type": "attribute"}, {"api_name": "pytest.CaptureFixture", "line_number": 260, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 283, "usage_type": "call"}, {"api_name": "contextlib.suppress", "line_number": 284, "usage_type": "call"}, {"api_name": "tmuxp.cli.cli", "line_number": 285, "usage_type": "call"}, {"api_name": "tmuxp.cli", "line_number": 285, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 217, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 217, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 229, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 229, "usage_type": "attribute"}]} +{"seq_id": "71373344804", "text": "from typing import Dict, List\nfrom django.http import HttpResponse\nfrom django.views.generic.base import View\nfrom django.shortcuts import render, redirect\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.contrib.auth import authenticate, login as loginUser\n\nfrom store.models import Size_Variant, Cart\nfrom store.forms.authforms import CustomerCreationForm\nfrom store.forms.authforms import CustomerAuthenticationForm\n\n\nclass RegistrationView(View):\n def get(self, request: WSGIRequest) -> HttpResponse:\n form: CustomerCreationForm = CustomerCreationForm()\n context: Dict = {\n \"form\": form\n }\n return render(request, 'store/registration.html', context=context)\n\n def post(self, request: WSGIRequest) -> HttpResponse:\n form: CustomerCreationForm = CustomerCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.email = user.username\n user.save()\n return redirect('login')\n\n context: Dict = {\n \"form\": form\n }\n return render(request, 'store/registration.html', context=context)\n\n\nclass LoginView(View):\n def get(self, request: WSGIRequest) -> HttpResponse:\n next_page: str = request.GET.get('next')\n if next_page:\n request.session['next_page'] = next_page\n\n form: CustomerAuthenticationForm = CustomerAuthenticationForm()\n context = {\n \"form\": form\n }\n return render(request, 'store/login.html', context=context)\n\n def post(self, request: WSGIRequest) -> HttpResponse:\n form: CustomerAuthenticationForm = CustomerAuthenticationForm(data=request.POST)\n if not form.is_valid():\n context: Dict = {\n \"form\": form\n }\n return render(request, 'store/login.html', context=context)\n\n username: str = form.cleaned_data.get(\"username\")\n password: str = form.cleaned_data.get(\"password\")\n user = authenticate(username=username, password=password)\n if not user:\n return\n\n loginUser(request, user)\n session_cart: List = request.session.get('cart')\n if session_cart:\n for c in session_cart:\n size: str = c.get('size')\n quantity: str = c.get('quantity')\n cloth_id: str = c.get('cloth')\n cart_obj: Cart = Cart()\n cart_obj.sizeVariant = Size_Variant.objects.get(size=size, cloth=cloth_id)\n cart_obj.quantity = quantity\n cart_obj.user = user\n cart_obj.save()\n\n cart: Cart = Cart.objects.filter(user=user)\n session_cart = []\n for cart_obj in cart:\n obj: Dict = {\n 'size': cart_obj.sizeVariant.size,\n 'cloth': cart_obj.sizeVariant.cloth.id,\n 'quantity': cart_obj.quantity\n }\n session_cart.append(obj)\n\n request.session['cart'] = session_cart\n next_page: str = request.session.get('next_page')\n if not next_page:\n next_page = \"Home\"\n if user.is_staff:\n next_page = \"/admin\"\n return redirect(next_page)\n\n\nclass LogoutView(View):\n def get(self, request: WSGIRequest) -> HttpResponse:\n request.session.clear()\n return redirect('Login')\n\n", "repo_name": "YogeshBisht2307/soch-apparels", "sub_path": "store/views/authentications.py", "file_name": "authentications.py", "file_ext": "py", "file_size_in_byte": 3374, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.views.generic.base.View", "line_number": 13, "usage_type": "name"}, {"api_name": "django.core.handlers.wsgi.WSGIRequest", "line_number": 14, "usage_type": "name"}, {"api_name": "store.forms.authforms.CustomerCreationForm", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 14, "usage_type": "name"}, {"api_name": "django.core.handlers.wsgi.WSGIRequest", "line_number": 21, "usage_type": "name"}, {"api_name": "store.forms.authforms.CustomerCreationForm", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 32, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 21, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 35, "usage_type": "name"}, {"api_name": "django.core.handlers.wsgi.WSGIRequest", "line_number": 36, "usage_type": "name"}, {"api_name": "store.forms.authforms.CustomerAuthenticationForm", "line_number": 41, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 36, "usage_type": "name"}, {"api_name": "django.core.handlers.wsgi.WSGIRequest", "line_number": 47, "usage_type": "name"}, {"api_name": "store.forms.authforms.CustomerAuthenticationForm", "line_number": 48, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 61, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 62, "usage_type": "name"}, {"api_name": "store.models.Cart", "line_number": 68, "usage_type": "name"}, {"api_name": "store.models.Size_Variant.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "store.models.Size_Variant.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "store.models.Size_Variant", "line_number": 69, "usage_type": "name"}, {"api_name": "store.models.Cart", "line_number": 74, "usage_type": "name"}, {"api_name": "store.models.Cart.objects.filter", "line_number": 74, "usage_type": "call"}, {"api_name": "store.models.Cart.objects", "line_number": 74, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 47, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 93, "usage_type": "name"}, {"api_name": "django.core.handlers.wsgi.WSGIRequest", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 96, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "71774273445", "text": "import pandas as pd\nimport streamlit as st\nimport plotly.graph_objects as go\n\nst.set_page_config(\n page_title = \"WDI dashboard\",\n layout = 'wide'\n)\n\n@st.cache\ndef loader():\n df = pd.read_csv('main.csv')\n df.fillna(0, inplace=True)\n return df\n\n\ndf = loader()\ndf2 = df.set_index('Country Name')\n\nst.markdown(\"

World Development Indicator

\", unsafe_allow_html=True)\nst.empty()\n\nhome, compare = st.tabs([\"home\", \"compare\"])\n\nwith home:\n country = st.sidebar.selectbox('Choose Country', df['Country Name'].unique())\n\n #Get all indicator names associated with the certain country\n #Some indicator names for some countries are missing\n indx = df[df['Country Name'] == country]['Indicator Name']\n\n indicator= st.sidebar.multiselect('Choose data', indx, default='Population, total')\n \n #Store all the plot details of a country\n data = []\n\n for ind in indicator:\n temp = df2.loc[df2['Indicator Name'] == ind]\n chart = go.Scatter(name=ind, x=temp.columns[2:], y=temp.loc[country][2:], mode='lines')\n data.append(chart)\n\n fig = go.Figure(data=data)\n st.plotly_chart(fig, use_container_width=True)\n\n\nwith compare:\n\n cnt_1, cnt_2 = st.columns(2)\n \n with cnt_1:\n country_filter_2 = st.selectbox(\"Select Country\", df['Country Name'].unique(), key=\"cmp2\")\n \n with cnt_2:\n ind_name = df[df['Country Name'] == country_filter_2]['Indicator Name']\n\n #As some countries have missing indicator names it helps to prevent error\n if len(ind_name)')\ndef transaction(lookup):\n a = Offer.query.filter_by(lookup=lookup).first()\n time_now = datetime.datetime.utcnow()\n i = False\n has_memo = False\n memo = None\n if datetime.datetime.utcnow() >= a.time + datetime.timedelta(minutes = 15):\n i = True\n else:\n i = False \n if a.original_crypto == \"XLM\" or a.original_crypto == \"USDC\":\n has_memo = True\n memo = a.system_key\n else:\n has_memo = False\n account = Crypto(a.original_crypto).init(a.system_key)\n bal = account.balance()\n if float(bal[\"confirmed\"]) >= a.amount_original and a.server_received == False:\n a.server_received = True\n db.session.commit() \n if bal[\"unconfirmed\"] == 0 and a.server_received == True:\n a.server_confirmed = True\n db.session.commit()\n if (a.server_received,a.server_confirmed) == (True,True) and a.server_sent == False:\n Base_account(a.converted_crypto).transact(recepient=a.send_to_address,amount=a.amount_converted)\n a.server_sent = True\n db.session.commit()\n if a.server_sent == True and bal[\"confirmed\"] > 0 and a.original_crypto == \"BCH\":\n temporary_account = Crypto(\"BCH\").init(a.system_key).sweep(\"bitcoincash:qz8v7kkv7pa2zqqjn8q82gp6xw3hdk0dtqfdg9vtga\")\n if a.server_sent == True and bal[\"confirmed\"] > 0 and a.original_crypto == \"BNB\":\n temporary_account = Crypto(\"BNB\").init(a.system_key)\n temporary_account.sweep(\"0xfBe1B4e1b5F63e55029Fa1F74A008A92fF6B7299\")\n url1 = f\"img/{a.original_crypto}.png\"\n url2 = f\"img/{a.converted_crypto}.png\"\n return render_template(\n 'transaction.html',\n id = lookup,\n original_crypto = a.original_crypto,\n converted_crypto = a.converted_crypto,\n rate = a.amount_original/a.amount_converted,\n amount_converted = a.amount_converted,\n time_now = time_now,\n time = a.time,\n expiry = a.time + datetime.timedelta(minutes = 15),\n is_expired = i,\n refund_address = a.refund_address,\n send_to = a.send_to_address,\n amount_original = a.amount_original,\n original_account_address = account.address,\n original_account_balance = bal[\"confirmed\"],\n has_memo = has_memo,\n memo = memo,\n confirmed_bal = bal[\"confirmed\"]-bal[\"unconfirmed\"],\n unconfirmed_bal = bal[\"unconfirmed\"],\n is_recv = a.server_received,\n is_conf = a.server_confirmed,\n is_sent = a.server_sent,\n url1=url1,\n url2=url2\n \n )\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n", "repo_name": "amrithcorp/ProofOfConcepts", "sub_path": "arex/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6255, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_qrcode.QRcode", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "attribute"}, {"api_name": "yfinance.Ticker", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 54, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 55, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 56, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 58, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 59, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 60, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 61, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "crypto.Base_account", "line_number": 71, "usage_type": "call"}, {"api_name": "secrets.token_hex", "line_number": 76, "usage_type": "call"}, {"api_name": "crypto.Crypto", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 96, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 96, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 96, "usage_type": "call"}, {"api_name": "crypto.Crypto", "line_number": 105, "usage_type": "call"}, {"api_name": "crypto.Base_account", "line_number": 114, "usage_type": "call"}, {"api_name": "crypto.Crypto", "line_number": 118, "usage_type": "call"}, {"api_name": "crypto.Crypto", "line_number": 120, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 124, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "11148811289", "text": "import torch\nfrom network.PSPNet import PSPNet\nn_classes = 3\nuse_aux = True\n\n# basically load model with state dict in Pytorch\n# model.load_state_dict(torch.load(\"./checkpoints/deneme_aux_multiclass.pth\"))\n\n\n\n\nmodel = PSPNet(layers=50,num_classes = n_classes,training=False,pretrained=False,use_aux=use_aux)\nstate_dict = torch.load(\"./checkpoints/deneme_aux_multiclass.pth\")\n\n\n# to show key and weights\n# for k,v in state_dict.items():\n# print(k)#weights\n# print(v)#keys\n\nupdate_dict = state_dict.copy()\n\n#delete keys that consist of \"aux\" \nfor k in state_dict:\n if \"aux\" in k:\n del update_dict[k]\n\nfor k,v in update_dict.items():\n print(k)#weights\n print(v)#keys\n\n# load new state_dict with strict flag. With strict=False flag, load state_dict function ignore non-matching keys(state dict keys and weights) \nmodel.load_state_dict(update_dict,strict=False)\n\nprint(model.load_state_dict(update_dict,strict=False).missing_keys)\n\n\n# #test\n# for k in model.state_dict():\n# print(k)\n\n\n", "repo_name": "burakalperen/Pytorch-Semantic-Segmentation", "sub_path": "PSPNet/utils/load_partial_torch_stateDict_model.py", "file_name": "load_partial_torch_stateDict_model.py", "file_ext": "py", "file_size_in_byte": 1008, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "network.PSPNet.PSPNet", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "18173961485", "text": "#imports\nfrom pydantic import BaseModel, Field\nfrom fastapi import FastAPI, Request, Response,status\nfrom pymongo import MongoClient\nfrom fastapi.encoders import jsonable_encoder\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi import FastAPI, Form, Cookie, Depends\nfrom starlette.responses import RedirectResponse, Response\nimport datetime,time\n\n#MongoDB Connection info\nclient = MongoClient(\"mongodb+srv://raquel:libelula46@cluster0.5ilbxxp.mongodb.net\")\n\n#Database\nbike_db = client['rental2']\n#Collection\nbike_collection = bike_db['bikes']\n\n#Model\nclass Bike(BaseModel):\n id: int\n model: str\n brand: str\n features: str \n year: int \n size: str \n availability: bool\n Price_day: list \n location_latitude: float\n location_length: float\n image: str\n \n \n\n#Initialize\napp = FastAPI()\n\n#Static file serv\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n#Jinja2 Template directory\ntemplates = Jinja2Templates(directory=\"templates\")\n\n@app.get(\"/\", response_class=HTMLResponse)\ndef home_page(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n@app.get(\"/bike/{id}\", response_class=HTMLResponse)\ndef read_bike(request: Request, id: int):\n print(f'find bike called with id :{id}')\n result = bike_collection.find_one({'id': id})\n print(result['model'])\n return templates.TemplateResponse(\"view_bike.html\", {\"request\": request, \"bike\": result})\n\n@app.get(\"/bike\", response_class=HTMLResponse)\ndef read_all_bike(request: Request):\n result = bike_collection.find({})\n print(result)\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"bike_list\": result})\n\n@app.get(\"/createui\", response_class=HTMLResponse)\nasync def create_bike_ui(request: Request):\n return templates.TemplateResponse(\"new_bike.html\", {\"request\": request})\n\n\n@app.post(\"/create\",response_class=HTMLResponse)\n\ndef create_bike(request:Request,id:int = Form(...), model:str = Form(...),brand:str = Form(...),features:str = Form(...),year:int = Form(...),size:int = Form(...), availability:bool = Form(...), Price_day:list = Form(...), location_latitude: float= Form(...), location_length: float=Form(...), image: str=Form(...)):\n print(f'id :{int(id)} model: {str(model)} brand:{str(brand)} features: {str(features)} year: {int(year)} size: {str(size)}, availability: {bool(availability)} Price_day: {str(Price_day)} location_latitude: {str(location_latitude)}, location_length {str(location_length)}')\n #initialize the model\n \n bike = Bike(id=id,model=model,brand=brand,features=features,year=year,size=size, availability=availability, Price_day=Price_day, location_latitude=location_latitude, location_length=location_length, image=image)\n print(str(bike.dict()))\n bike = jsonable_encoder(bike)\n bike_collection.insert_one(bike)\n print(\" Bike added : now db id \" + str(id))\n time.sleep(1)\n result = bike_collection.find({})\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"bike_list\": result})\n\n\n@app.get(\"/bike/delete/{id}\",response_class=HTMLResponse)\ndef delete_bike(id:int,request:Request):\n print(\" delete bike method called :\"+str(id))\n result = bike_collection.delete_one({'id':id})\n time.sleep(1)\n result = bike_collection.find({})\n print(result)\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"bike_list\": result})\n\n@app.get(\"/bike/edit/{id}\",response_class=HTMLResponse)\ndef edit_bike(id:int,request:Request):\n print(\" method called :\"+str(id))\n result = bike_collection.find_one({'id':id})\n return templates.TemplateResponse(\"edit_bike.html\", {\"request\": request, \"bike\": result})\n\n@app.post(\"/update\",response_class=HTMLResponse)\ndef update_bike(request:Request,id:int = Form(...), model:str = Form(...),brand:str = Form(...),features:str = Form(...),year:int = Form(...),size:str = Form(...), availability:bool = Form(...), Price_day:list = Form(...), location_latitude: float= Form(...), location_length: float=Form(...), image: str=Form(...)):\n print('id :'+str(id))\n print('model '+str(model))\n print('brand ' + str(brand))\n print('features ' + str(features))\n print('year ' + str(year))\n print('size ' + str(size))\n print('availability ' + str(availability))\n print('Price_day ' + str(Price_day))\n print('location_latitude ' + str(location_latitude))\n print('location_length ' + str(location_length))\n print('image' + str(image))\n #initialize the model\n bike = Bike(id=id,model=model,brand=brand,features=features,year=year,size=size, availability=availability, Price_day=Price_day, location_latitude=location_latitude, location_length=location_length, image=image)\n print(str(bike.dict()))\n #call internal api\n update_api(bike)\n time.sleep(1)\n #get the updated list\n result = bike_collection.find({})\n print(str(result))\n return templates.TemplateResponse(\"index.html\", {\"request\": request, \"bike_list\": result})\n\n\n@app.put(\"/updateapi\",status_code=202)\ndef update_api(bike:Bike):\n print('Update api called....'+str(bike.model))\n result = bike_collection.update_one({'id':bike.id},{\"$set\" : {'model':bike.model,'brand':bike.brand, 'features':bike.features, 'year':bike.year, 'size':bike.size, 'availability':bike.availability,'Price_day':bike.Price_day, 'location_latitude': bike.location_latitude, 'location_length': bike.location_length, 'image': bike.image}})\n return \"UPDATE SUCCESS\"\n\n", "repo_name": "Raquel43/fastapi_project", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 14, "usage_type": "call"}, {"api_name": "pydantic.BaseModel", "line_number": 22, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 38, "usage_type": "call"}, {"api_name": "fastapi.staticfiles.StaticFiles", "line_number": 41, "usage_type": "call"}, {"api_name": "fastapi.templating.Jinja2Templates", "line_number": 43, "usage_type": "call"}, {"api_name": "fastapi.Request", "line_number": 46, "usage_type": "name"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 45, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 50, "usage_type": "name"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 49, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 57, "usage_type": "name"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 56, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 63, "usage_type": "name"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 62, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 69, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 69, "usage_type": "call"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 75, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 67, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 84, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 83, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 93, "usage_type": "name"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 92, "usage_type": "name"}, {"api_name": "fastapi.Request", "line_number": 99, "usage_type": "name"}, {"api_name": "fastapi.Form", "line_number": 99, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 116, "usage_type": "call"}, {"api_name": "fastapi.responses.HTMLResponse", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "28908729289", "text": "import paramiko\n\ndef uploadToFTP(fileName, uploadData):\n host, port = uploadData.ftpip, uploadData.ftpport\n user, pasw = uploadData.ftpuser, uploadData.ftppas\n fileDir = \"Results/\" + fileName\n fileEndDir = \"/home/akademija/ftp/\" + fileName\n\n try:\n transport = paramiko.Transport((host, port))\n transport.connect(None, user, pasw)\n sftp = paramiko.SFTPClient.from_transport(transport)\n except:\n print(\"Failed to connect to the FTP server\")\n quit()\n\n try:\n sftp.put(fileDir, fileEndDir)\n except:\n print(\"Failed to upload file to FTP server\")\n quit()\n\n if sftp: sftp.close()\n if transport: transport.close()", "repo_name": "KrittyKrat/AT-Command-Testing", "sub_path": "modules/uploadFTP.py", "file_name": "uploadFTP.py", "file_ext": "py", "file_size_in_byte": 690, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "paramiko.Transport", "line_number": 10, "usage_type": "call"}, {"api_name": "paramiko.SFTPClient.from_transport", "line_number": 12, "usage_type": "call"}, {"api_name": "paramiko.SFTPClient", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "33998663720", "text": "\"\"\"Slope transformer.\"\"\"\nimport math\nimport statistics\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.datatypes import convert\nfrom sktime.transformations.base import BaseTransformer\n\n__all__ = [\"SlopeTransformer\"]\n__author__ = [\"mloning\"]\n\n\nclass SlopeTransformer(BaseTransformer):\n \"\"\"Slope-by-segment transformation.\n\n Class to perform the Slope transformation on a time series\n dataframe. It splits a time series into num_intervals segments.\n Then within each segment, it performs a total least\n squares regression to extract the gradient of the segment.\n\n Parameters\n ----------\n num_intervals : int, number of approx equal segments\n to split the time series into.\n \"\"\"\n\n _tags = {\n \"scitype:transform-input\": \"Series\",\n # what is the scitype of X: Series, or Panel\n \"scitype:transform-output\": \"Series\",\n # what scitype is returned: Primitives, Series, Panel\n \"scitype:instancewise\": False, # is this an instance-wise transform?\n \"X_inner_mtype\": \"nested_univ\", # which mtypes do _fit/_predict support for X?\n \"y_inner_mtype\": \"None\", # which mtypes do _fit/_predict support for X?\n \"fit_is_empty\": True,\n \"capability:unequal_length:removes\": True,\n # is transform result always guaranteed to be equal length (and series)?\n }\n\n def __init__(self, num_intervals=8):\n self.num_intervals = num_intervals\n super().__init__()\n\n def _transform(self, X, y=None):\n \"\"\"Transform X and return a transformed version.\n\n private _transform containing core logic, called from transform\n\n Parameters\n ----------\n X : nested pandas DataFrame of shape [n_instances, n_features]\n each cell of X must contain pandas.Series\n Data to fit transform to\n y : ignored argument for interface compatibility\n Additional data, e.g., labels for transformation\n\n Returns\n -------\n Xt : nested pandas DataFrame of shape [n_instances, n_features]\n each cell of Xt contains pandas.Series\n transformed version of X\n \"\"\"\n # Get information about the dataframe\n n_timepoints = len(X.iloc[0, 0])\n num_instances = X.shape[0]\n col_names = X.columns\n\n self._check_parameters(n_timepoints)\n\n Xt = pd.DataFrame()\n\n for x in col_names:\n # Convert one of the columns in the dataframe to numpy array\n arr = convert(\n pd.DataFrame(X[x]),\n from_type=\"nested_univ\",\n to_type=\"numpyflat\",\n as_scitype=\"Panel\",\n )\n\n # Calculate gradients\n transformedData = []\n for y in range(num_instances):\n res = self._get_gradients_of_lines(arr[y])\n transformedData.append(res)\n\n # Convert to Numpy array\n transformedData = np.asarray(transformedData)\n\n # Add it to the dataframe\n colToAdd = []\n for i in range(len(transformedData)):\n inst = transformedData[i]\n colToAdd.append(pd.Series(inst))\n\n Xt[x] = colToAdd\n\n return Xt\n\n def _get_gradients_of_lines(self, X):\n \"\"\"Get gradients of lines.\n\n Function to get the gradients of the line of best fits\n given a time series.\n\n Parameters\n ----------\n X : a numpy array of shape = [time_series_length]\n\n Returns\n -------\n gradients : a numpy array of shape = [num_intervals].\n It contains the gradients of the line of best fit\n for each interval in a time series.\n \"\"\"\n # Firstly, split the time series into approx equal length intervals\n splitTimeSeries = self._split_time_series(X)\n gradients = []\n\n for x in range(len(splitTimeSeries)):\n gradients.append(self._get_gradient(splitTimeSeries[x]))\n\n return gradients\n\n def _get_gradient(self, Y):\n \"\"\"Get gradient of lines.\n\n Function to get the gradient of the line of best fit given a\n section of a time series.\n\n Equation adopted from:\n real-statistics.com/regression/total-least-squares\n\n Parameters\n ----------\n Y : a numpy array of shape = [interval_size]\n\n Returns\n -------\n m : an int corresponding to the gradient of the best fit line.\n \"\"\"\n # Create a list that contains 1,2,3,4,...,len(Y) for the x coordinates.\n X = [(i + 1) for i in range(len(Y))]\n\n # Calculate the mean of both lists\n meanX = statistics.mean(X)\n meanY = statistics.mean(Y)\n\n # Calculate the list (yi-mean(y))^2\n yminYbar = [(y - meanY) ** 2 for y in Y]\n # Calculate the list (xi-mean(x))^2\n xminXbar = [(x - meanX) ** 2 for x in X]\n\n # Sum them to produce w.\n w = sum(yminYbar) - sum(xminXbar)\n\n # Calculate the list (xi-mean(x))*(yi-mean(y))\n temp = []\n for x in range(len(X)):\n temp.append((X[x] - meanX) * (Y[x] - meanY))\n\n # Sum it and multiply by 2 to calculate r\n r = 2 * sum(temp)\n\n if r == 0:\n # remove nans\n m = 0\n else:\n # Gradient is defined as (w+sqrt(w^2+r^2))/r\n m = (w + math.sqrt(w**2 + r**2)) / r\n\n return m\n\n def _split_time_series(self, X):\n \"\"\"Split a time series into approximately equal intervals.\n\n Adopted from = https://stackoverflow.com/questions/2130016/\n splitting-a-list-into-n-parts-of-approximately\n -equal-length\n\n Parameters\n ----------\n X : a numpy array of shape = [time_series_length]\n\n Returns\n -------\n output : a numpy array of shape = [num_intervals,interval_size]\n \"\"\"\n avg = len(X) / float(self.num_intervals)\n output = []\n beginning = 0.0\n\n while beginning < len(X):\n output.append(X[int(beginning) : int(beginning + avg)])\n beginning += avg\n\n return output\n\n def _check_parameters(self, n_timepoints):\n \"\"\"Check values of parameters for Slope transformer.\n\n Throws\n ------\n ValueError or TypeError if a parameters input is invalid.\n \"\"\"\n if isinstance(self.num_intervals, int):\n if self.num_intervals <= 0:\n raise ValueError(\n \"num_intervals must have the value \\\n of at least 1\"\n )\n if self.num_intervals > n_timepoints:\n raise ValueError(\n \"num_intervals cannot be higher than \\\n subsequence_length\"\n )\n else:\n raise TypeError(\n \"num_intervals must be an 'int'. Found '\"\n + type(self.num_intervals).__name__\n + \"'instead.\"\n )\n", "repo_name": "sktime/sktime", "sub_path": "sktime/transformations/panel/slope.py", "file_name": "slope.py", "file_ext": "py", "file_size_in_byte": 7080, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7028, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sktime.transformations.base.BaseTransformer", "line_number": 15, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "sktime.datatypes.convert", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 90, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 96, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 148, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 149, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "39401558402", "text": "# coding: utf-8\n\n\"\"\"\n Marketing API v.1.0\n\n IMPORTANT: This swagger links to Criteo production environment. Any test applied here will thus impact real campaigns. # noqa: E501\n\n The version of the OpenAPI document: v.1.0\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AudiencePatchResponse(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'operation': 'str',\n 'request_date': 'datetime',\n 'schema': 'str',\n 'nb_valid_identifiers': 'int',\n 'nb_invalid_identifiers': 'int',\n 'sample_invalid_identifiers': 'list[str]'\n }\n\n attribute_map = {\n 'operation': 'operation',\n 'request_date': 'requestDate',\n 'schema': 'schema',\n 'nb_valid_identifiers': 'nbValidIdentifiers',\n 'nb_invalid_identifiers': 'nbInvalidIdentifiers',\n 'sample_invalid_identifiers': 'sampleInvalidIdentifiers'\n }\n\n def __init__(self, operation=None, request_date=None, schema=None, nb_valid_identifiers=None, nb_invalid_identifiers=None, sample_invalid_identifiers=None): # noqa: E501\n \"\"\"AudiencePatchResponse - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._operation = None\n self._request_date = None\n self._schema = None\n self._nb_valid_identifiers = None\n self._nb_invalid_identifiers = None\n self._sample_invalid_identifiers = None\n self.discriminator = None\n\n if operation is not None:\n self.operation = operation\n if request_date is not None:\n self.request_date = request_date\n if schema is not None:\n self.schema = schema\n if nb_valid_identifiers is not None:\n self.nb_valid_identifiers = nb_valid_identifiers\n if nb_invalid_identifiers is not None:\n self.nb_invalid_identifiers = nb_invalid_identifiers\n if sample_invalid_identifiers is not None:\n self.sample_invalid_identifiers = sample_invalid_identifiers\n\n @property\n def operation(self):\n \"\"\"Gets the operation of this AudiencePatchResponse. # noqa: E501\n\n The Operation recorded. # noqa: E501\n\n :return: The operation of this AudiencePatchResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._operation\n\n @operation.setter\n def operation(self, operation):\n \"\"\"Sets the operation of this AudiencePatchResponse.\n\n The Operation recorded. # noqa: E501\n\n :param operation: The operation of this AudiencePatchResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._operation = operation\n\n @property\n def request_date(self):\n \"\"\"Gets the request_date of this AudiencePatchResponse. # noqa: E501\n\n When the Operation was recorded. # noqa: E501\n\n :return: The request_date of this AudiencePatchResponse. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._request_date\n\n @request_date.setter\n def request_date(self, request_date):\n \"\"\"Sets the request_date of this AudiencePatchResponse.\n\n When the Operation was recorded. # noqa: E501\n\n :param request_date: The request_date of this AudiencePatchResponse. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._request_date = request_date\n\n @property\n def schema(self):\n \"\"\"Gets the schema of this AudiencePatchResponse. # noqa: E501\n\n The schema specified for the identifiers. # noqa: E501\n\n :return: The schema of this AudiencePatchResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._schema\n\n @schema.setter\n def schema(self, schema):\n \"\"\"Sets the schema of this AudiencePatchResponse.\n\n The schema specified for the identifiers. # noqa: E501\n\n :param schema: The schema of this AudiencePatchResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._schema = schema\n\n @property\n def nb_valid_identifiers(self):\n \"\"\"Gets the nb_valid_identifiers of this AudiencePatchResponse. # noqa: E501\n\n\n :return: The nb_valid_identifiers of this AudiencePatchResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._nb_valid_identifiers\n\n @nb_valid_identifiers.setter\n def nb_valid_identifiers(self, nb_valid_identifiers):\n \"\"\"Sets the nb_valid_identifiers of this AudiencePatchResponse.\n\n\n :param nb_valid_identifiers: The nb_valid_identifiers of this AudiencePatchResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._nb_valid_identifiers = nb_valid_identifiers\n\n @property\n def nb_invalid_identifiers(self):\n \"\"\"Gets the nb_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n\n\n :return: The nb_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n :rtype: int\n \"\"\"\n return self._nb_invalid_identifiers\n\n @nb_invalid_identifiers.setter\n def nb_invalid_identifiers(self, nb_invalid_identifiers):\n \"\"\"Sets the nb_invalid_identifiers of this AudiencePatchResponse.\n\n\n :param nb_invalid_identifiers: The nb_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n :type: int\n \"\"\"\n\n self._nb_invalid_identifiers = nb_invalid_identifiers\n\n @property\n def sample_invalid_identifiers(self):\n \"\"\"Gets the sample_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n\n Optionnal. A sample of invalid identifiers if there is some. # noqa: E501\n\n :return: The sample_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._sample_invalid_identifiers\n\n @sample_invalid_identifiers.setter\n def sample_invalid_identifiers(self, sample_invalid_identifiers):\n \"\"\"Sets the sample_invalid_identifiers of this AudiencePatchResponse.\n\n Optionnal. A sample of invalid identifiers if there is some. # noqa: E501\n\n :param sample_invalid_identifiers: The sample_invalid_identifiers of this AudiencePatchResponse. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._sample_invalid_identifiers = sample_invalid_identifiers\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AudiencePatchResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "repo_name": "criteo/criteo-python-marketing-sdk", "sub_path": "criteo_marketing/models/audience_patch_response.py", "file_name": "audience_patch_response.py", "file_ext": "py", "file_size_in_byte": 8006, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "52", "api": [{"api_name": "six.iteritems", "line_number": 213, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "10868314231", "text": "import numpy as np\nimport matplotlib.pyplot as plt \n## 1\n# y = np.random.randint(1,100, 50)\n# # plt.plot(y, 'ro') # ‘ro’ represents color (r) and marker (o)\n# plt.plot(y, 'red', marker = 'o')\n# plt.show()\n\n##2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmonths = np.array(['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'])\nsales = np.array([241268.56, 184837.36, 263100.77, 242771.86, 288401.05, 401814.06, 258705.68, 456619.94, 481157.24, 422766.63, 555279.03, 503143.69])\n\nplt.plot(months, sales)\n\n# Adding and formatting title\nplt.title(\"Sales across 2015\\n\", fontdict={'fontsize': 20, 'fontweight' : 5, 'color' : 'Green'})\n\n# Labeling Axes\nplt.xlabel(\"Months\", fontdict={'fontsize': 12, 'fontweight' : 5, 'color' : 'Brown'})\nplt.ylabel(\"Sales\", fontdict={'fontsize': 12, 'fontweight' : 5, 'color' : 'Brown'} )\n\nticks = np.arange(0, 600000, 50000)\nlabels = [\"{}K\".format(i//1000) for i in ticks]\nplt.yticks(ticks, labels)\n\nplt.xticks(rotation=90)\n\nfor xy in zip(months, sales):\n plt.annotate(text = \"{}K\".format(xy[1]//1000), xy = xy, textcoords='data')\n\nplt.show()", "repo_name": "reshmimrk/upgrad_python", "sub_path": "Datatoolkit/module5/line_handson.py", "file_name": "line_handson.py", "file_ext": "py", "file_size_in_byte": 1157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.annotate", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "74078298085", "text": "import requests\nKEY = \"a5264c2c15fe7ddde568e96bfae26847\"\ndef get_lat_and_lon(name = \"London\"):\n city_name = name\n res = requests.get(f\"http://api.openweathermap.org/geo/1.0/direct?q={city_name}&appid={KEY}\").json()\n return {\"lat\" : res[0][\"lat\"], \"lon\" :res[0][\"lon\"]}\ndef get_weather(geoloc):\n lat = geoloc[\"lat\"]\n lon = geoloc[\"lon\"]\n res = requests.get(f\"https://api.openweathermap.org/data/3.0/onecall?lat={lat}&lon={lon}&appid={KEY}]\").json()\n return res\n\n\ngeoloc = get_lat_and_lon()\nw = get_weather(geoloc)\nprint(w)\n\n", "repo_name": "OkomuraSan/TelegramBot", "sub_path": "Мусор/get_weather.py", "file_name": "get_weather.py", "file_ext": "py", "file_size_in_byte": 544, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "8630373927", "text": "# Custom JSON Serialization\n# As we saw in the previous video, certain data types cannot be serialized to JSON using Python's defaults.\n# Here's a simple example of this:\n\nfrom datetime import datetime\n\ncurrent = datetime.utcnow()\n\nprint(current)\n# datetime.datetime(2018, 12, 29, 22, 26, 35, 671836)\n\nprint('#' * 52 + ' As we can see, this is a `datetime` object.')\n\nimport json\n\n# json.dumps(current) # TypeError: Object of type 'datetime' is not JSON serializable\n# TypeError: Object of type 'datetime' is not JSON serializable\n# As we can see Python raises a TypeError exception, stating that datetime objects are not JSON serializable.\n# So, we'll need to come up with our own serialization format.\n# For datetimes, the most common format is the ISO 8601 format - you can read up more about it here\n# (https://en.wikipedia.org/wiki/ISO_8601), but basically the format is:\n# YYYY-MM-DD T HH:MM:SS\n# There are some variations for encoding timezones, but to keep things simple I am going to use timezone naive\n# timestamps, and just use UTC everywhere.\n# We could use Python's string representation for datetimes:\n\nprint()\nprint('#' * 52 + ' We could use Pythons string representation for datetimes:')\nprint(str(current))\n# '2018-12-29 22:26:35.671836'\n# ######################################################################################################################\n\nprint('#' * 52 + ' but this is not quite ISO-8601. We could write a custom formatter ourselves:')\n\ndef format_iso(dt):\n return dt.strftime('%Y-%m-%dT%H:%M:%S')\n\n# (If you want more info and options on date and time formatting/parsing using strftime and strptime,\n# which essentially pass through to their C counterparts, you can see the Python docs here:\n# https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior)\n\nprint(format_iso(current))\n# '2018-12-29T22:26:35'\n# ######################################################################################################################\n\nprint('#' * 52 + ' But Python actually provides us a function to do the same:')\n\nprint(current.isoformat())\n# 2018-12-29T22:26:35.671836'\n# ######################################################################################################################\n\n# This is almost identical to our custom representation, but also includes fractional seconds.\n# If you don't want fractional seconds in your representation, then you'll have to write some custom code like the one\n# above. I'm just going to use Python's ISO-8601 representation. And now let's serialize our datetime object to JSON:\nprint('#' * 52 + ' This is'\n ' almost identical to our custom representation, but also includes fractional seconds.')\nprint('#' * 52 + ' If you dont want fractional seconds in your representation, '\n ' then you will have to write some custom code like the one above.')\nprint()\n\nlog_record = {'time': datetime.utcnow().isoformat(), 'message': 'testing'}\nprint(json.dumps(log_record))\n# '{\"time\": \"2018-12-29T22:26:42.083020\", \"message\": \"testing\"}'\n# ######################################################################################################################\n\nprint('#' * 52 + ' OK, this works, but this is far from ideal.')\nprint('#' * 52 + ' Normally, our dictionary will contain the `datetime` object, not its string representation.')\n\nlog_record = {'time': datetime.utcnow(), 'message': 'testing'}\n# The problem is that log_record is now not JSON serializable!\n# What we have to do is write custom code to replace non-JSON serializable objects in our dictionary with custom\n# representations. This can quickly become tedious and unmanageable if we deal with many dictionaries,\n# and arbitrary structures.\n# Fortunately, Python's dump and dumps functions have some ways for us to define general serializations for\n# non-standard JSON objects.\n# The simplest way is to specify a function that dump/dumps will call when it encounters something it cannot serialize\n\ndef format_iso(dt):\n return dt.isoformat()\n\nprint(json.dumps(log_record, default=format_iso))\n# '{\"time\": \"2018-12-29T22:26:42.532485\", \"message\": \"testing\"}'\n# ######################################################################################################################\n\nprint('#' * 52 + ' This will work even if we have more than one date in our dictionary:')\n\nlog_record = {\n 'time1': datetime.utcnow(),\n 'time2': datetime.utcnow(),\n 'message': 'Testing...'\n}\n\nprint(json.dumps(log_record, default=format_iso))\n# '{\"time1\": \"2018-12-29T22:26:43.296170\", \"time2\": \"2018-12-29T22:26:43.296171\", \"message\": \"Testing...\"}'\n# ######################################################################################################################\n\nprint('#' * 52 + ' So this works, but what happens if we introduce another non-serializable object:')\n\nlog_record = {\n 'time': datetime.utcnow(),\n 'message': 'Testing...',\n 'other': {'a', 'b', 'c'}\n}\n\n# json.dumps(log_record, default=format_iso) # AttributeError: 'set' object has no attribute 'isoformat'\n# AttributeError: 'set' object has no attribute 'isoformat'\n# As you can see, Python encountered that set, and therefore called the default callable - but that callable was not\n# designed to handle sets, and so we end up with an exception in the format_iso callable instead\n# We can remedy this by essentially adding code to our function to make it handle various data types.\n# Essentially creating a dispatcher - this should remind you of the single-dispatch generic function decorator\n# available in the functools module which we discussed in an earlier part of this series. You can also view more\n# info about it here: https://docs.python.org/3/library/functools.html#functools.singledispatch\n# Let's first write it without the decorator to make sure we have our code correct:\n\nprint('#' * 52 + ' Lets first write it without the decorator to make sure we have our code correct:')\n\ndef custom_json_formatter(arg):\n if isinstance(arg, datetime):\n return arg.isoformat()\n elif isinstance(arg, set):\n return list(arg)\n\nprint(json.dumps(log_record, default=custom_json_formatter))\n# '{\"time\": \"2018-12-29T22:26:43.760863\", \"message\": \"Testing...\", \"other\": [\"c\", \"a\", \"b\"]}'\n# ######################################################################################################################\n\nprint('#' * 52 + ' To make things a little more interesting, lets throw in a custom object as well:')\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.create_dt = datetime.utcnow()\n\n def __repr__(self):\n return f'Person(name={self.name}, age={self.age})'\n\n def toJSON(self):\n return {\n 'name': self.name,\n 'age': self.age,\n 'create_dt': self.create_dt.isoformat()\n }\n\n\np = Person('John', 82)\nprint(p)\nprint(p.toJSON())\n# Person(name=John, age=82)\n# {'name': 'John', 'age': 82, 'create_dt': '2018-12-29T22:26:45.066252'}\n# ######################################################################################################################\n\nprint('#' * 52 + ' And we modify our custom JSON formatter as follows:')\n\n\ndef custom_json_formatter(arg):\n if isinstance(arg, datetime):\n return arg.isoformat()\n elif isinstance(arg, set):\n return list(arg)\n elif isinstance(arg, Person):\n return arg.toJSON()\n\n\nlog_record = dict(time=datetime.utcnow(),\n message='Created new person record',\n person=p)\n\nprint(json.dumps(log_record, default=custom_json_formatter))\n# '{\"time\": \"2018-12-29T22:26:45.769929\", \"message\": \"Created new person record\", \"person\": {\"name\": \"John\", \"age\": 82, \"create_dt\": \"2018-12-29T22:26:45.066252\"}}'\n# ######################################################################################################################\n\nprint(json.dumps(log_record, default=custom_json_formatter, indent=2))\n# {\n# \"time\": \"2018-12-29T22:26:45.769929\",\n# \"message\": \"Created new person record\",\n# \"person\": {\n# \"name\": \"John\",\n# \"age\": 82,\n# \"create_dt\": \"2018-12-29T22:26:45.066252\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' One thing to note here is that for the `Person` class'\n ' we returned a formatted string for the `created_dt` attribute.')\nprint('#' * 52 + ' We dont actually need to do this - we can simply return a `datetime` object and'\n ' let `custom_json_formatter` handle serializing the `datetime` object:')\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.create_dt = datetime.utcnow()\n\n def __repr__(self):\n return f'Person(name={self.name}, age={self.age})'\n\n def toJSON(self):\n return {\n 'name': self.name,\n 'age': self.age,\n 'create_dt': self.create_dt\n }\n\n\np = Person('Monty', 100)\n\nlog_record = dict(time=datetime.utcnow(),\n message='Created new person record',\n person=p)\n\nprint(json.dumps(log_record, default=custom_json_formatter, indent=2))\n# {\n# \"time\": \"2018-12-29T22:26:47.029102\",\n# \"message\": \"Created new person record\",\n# \"person\": {\n# \"name\": \"Monty\",\n# \"age\": 100,\n# \"create_dt\": \"2018-12-29T22:26:46.749022\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' In fact, we could simplify our class further by simply returning a dict of the attributes, '\n ' since in this case we want to serialize everything as is.')\nprint('#' * 52 + ' But using the `toJSON` callable means we can customize exactly '\n ' how we want out objects to be serialized.')\nprint('#' * 52 + ' So, if we were +not particular about the serialization we could do this:')\n\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.create_dt = datetime.utcnow()\n\n def __repr__(self):\n return f'Person(name={self.name}, age={self.age})'\n\n def toJSON(self):\n return vars(self)\n\np = Person('Python', 27)\n\nprint(p.toJSON())\n# {'name': 'Python',\n# 'age': 27,\n# 'create_dt': datetime.datetime(2018, 12, 29, 22, 26, 47, 973930)}\n# ######################################################################################################################\n\nprint()\nprint()\nprint()\nprint('#' * 52 + ' ')\n\nlog_record['person'] = p\nprint(log_record)\n# {'time': datetime.datetime(2018, 12, 29, 22, 26, 47, 29102), 'message': 'Created new person record', 'person': Person(name=Python, age=27)}\n# ######################################################################################################################\n\nprint(json.dumps(log_record, default=custom_json_formatter, indent=2))\n# {\n# \"time\": \"2018-12-29T22:26:47.029102\",\n# \"message\": \"Created new person record\",\n# \"person\": {\n# \"name\": \"Python\",\n# \"age\": 27,\n# \"create_dt\": \"2018-12-29T22:26:47.973930\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' In fact, we could use this approach in our custom formatter - '\n ' if an object does not have a `toJSON` callable,')\nprint('#' * 52 + ' we will just use a dictionary of the attributes - it it has any, '\n ' it might not (like a complex number or a set as examples), '\n ' so we need to watch out for that as well.')\n\nprint('toJSON' in vars(Person))\n# True\n# ######################################################################################################################\n\ndef custom_json_formatter(arg):\n if isinstance(arg, datetime):\n return arg.isoformat()\n elif isinstance(arg, set):\n return list(arg)\n else:\n try:\n return arg.toJSON()\n except AttributeError:\n try:\n return vars(arg)\n except TypeError:\n return str(arg)\n\n\nprint('#' * 52 + ' Lets create another custom class that does not have a `toJSON` method:')\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return f'Point(x={self.x}, y={self.y})'\n\n\npt1 = Point(10, 10)\n\nprint(vars(pt1))\n# {'x': 10, 'y': 10}\n# ######################################################################################################################\n\nlog_record = dict(time=datetime.utcnow(),\n message='Created new point',\n point=pt1,\n created_by=p)\n\nprint(log_record)\n# {'time': datetime.datetime(2018, 12, 29, 22, 26, 50, 955039),\n# 'message': 'Created new point',\n# 'point': Point(x=10, y=10),\n# 'created_by': Person(name=Python, age=27)}\n# ######################################################################################################################\n\nprint('#' * 52 + ' And we can now serialize it to JSON:')\n\nprint(json.dumps(log_record, default=custom_json_formatter, indent=2))\n# {\n# \"time\": \"2018-12-29T22:26:50.955039\",\n# \"message\": \"Created new point\",\n# \"point\": {\n# \"x\": 10,\n# \"y\": 10\n# },\n# \"created_by\": {\n# \"name\": \"Python\",\n# \"age\": 27,\n# \"create_dt\": \"2018-12-29T22:26:47.973930\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' So now, lets re-write our custom json formatter using the generic single dispatch decorator'\n ' I mentioned earlier:')\n\nfrom functools import singledispatch\n\n# Our default approach is going to first try to use toJSON, if not it will try to use vars, and it that still fails\n# we'll use the string representation, whatever that happens to be:\n\n@singledispatch\ndef json_format(arg):\n print(arg)\n try:\n print('\\ttrying to use toJSON...')\n return arg.toJSON()\n except AttributeError:\n print('\\tfailed - trying to use vars...')\n try:\n return vars(arg)\n except TypeError:\n print('\\tfailed - using string representation...')\n return str(arg)\n\n# And now we 'register' other data types:\n\n@json_format.register(datetime)\ndef _(arg):\n return arg.isoformat()\n\n\n@json_format.register(set)\ndef _(arg):\n return list(arg)\n\nprint(json.dumps(log_record, default=json_format, indent=2))\n# Point(x=10, y=10)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# Person(name=Python, age=27)\n# \ttrying to use toJSON...\n# {\n# \"time\": \"2018-12-29T22:26:50.955039\",\n# \"message\": \"Created new point\",\n# \"point\": {\n# \"x\": 10,\n# \"y\": 10\n# },\n# \"created_by\": {\n# \"name\": \"Python\",\n# \"age\": 27,\n# \"create_dt\": \"2018-12-29T22:26:47.973930\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' Lets change our Person class to emit some custom JSON instead of just using `vars`:')\n\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n self.create_dt = datetime.utcnow()\n\n def __repr__(self):\n return f'Person(name={self.name}, age={self.age})'\n\n def toJSON(self):\n return dict(name=self.name)\n\np = Person('Python', 27)\nlog_record['created_by'] = p\nprint(json.dumps(log_record, default=json_format, indent=2))\n# Point(x=10, y=10)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# Person(name=Python, age=27)\n# \ttrying to use toJSON...\n# {\n# \"time\": \"2018-12-29T22:26:50.955039\",\n# \"message\": \"Created new point\",\n# \"point\": {\n# \"x\": 10,\n# \"y\": 10\n# },\n# \"created_by\": {\n# \"name\": \"Python\"\n# }\n# }\n# ######################################################################################################################\n\nprint('#' * 52 + ' The way we wrote our default formatter,'\n ' means that we can now also represent other unexpected data types, '\n ' but using each objects string representation.')\nprint('#' * 52 + ' If that is not acceptable, we can either not do this and let a `TypeError` exception get generated,'\n ' or register more custom formatters:')\n\nfrom decimal import Decimal\nfrom fractions import Fraction\n\nprint(json.dumps(dict(a=1 + 1j,\n b=Decimal('0.5'),\n c=Fraction(1, 3),\n p=Person('Python', 27),\n pt=Point(0, 0),\n time=datetime.utcnow()\n ),\n default=json_format))\n# (1+1j)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# 0.5\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# 1/3\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# Person(name=Python, age=27)\n# \ttrying to use toJSON...\n# Point(x=0, y=0)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# '{\"a\": \"(1+1j)\", \"b\": \"0.5\", \"c\": \"1/3\", \"p\": {\"name\": \"Python\"}, \"pt\": {\"x\": 0, \"y\": 0}, \"time\": \"2018-12-29T22:26:54.860340\"}'\n# ######################################################################################################################\n\nprint('#' * 52 + ' Now, suppose we dont want that default representation for `Decimals` -'\n ' we want to serialize it in this form: `Decimal(0.5)`.')\nprint('#' * 52 + ' All we need to do is to register a new function to serialize `Decimal` types:')\n\n\n@json_format.register(Decimal)\ndef _(arg):\n return f'Decimal({str(arg)})'\n\n\nprint(json.dumps(dict(a=1 + 1j,\n b=Decimal(0.5),\n c=Fraction(1, 3),\n p=Person('Python', 27),\n pt=Point(0, 0),\n time=datetime.utcnow()\n ),\n default=json_format))\n# (1+1j)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# 1/3\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# Person(name=Python, age=27)\n# \ttrying to use toJSON...\n# Point(x=0, y=0)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# '{\"a\": \"(1+1j)\", \"b\": \"Decimal(0.5)\", \"c\": \"1/3\", \"p\": {\"name\": \"Python\"}, \"pt\": {\"x\": 0, \"y\": 0}, \"time\": \"2018-12-29T22:26:55.491606\"}'\n# ######################################################################################################################\n\nprint(\n '#' * 52 + ' One last example that clearly shows the `json_format` function gets called recursively when needed:')\n\nprint(json.dumps(dict(pt = Point(Person('Python', 27), 2+2j)),\n default=json_format, indent=2))\n# Point(x=Person(name=Python, age=27), y=(2+2j))\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# Person(name=Python, age=27)\n# \ttrying to use toJSON...\n# (2+2j)\n# \ttrying to use toJSON...\n# \tfailed - trying to use vars...\n# \tfailed - using string representation...\n# {\n# \"pt\": {\n# \"x\": {\n# \"name\": \"Python\"\n# },\n# \"y\": \"(2+2j)\"\n# }\n# }\n# ######################################################################################################################\n", "repo_name": "syurskyi/Python_Topics", "sub_path": "065_serialization_and_deserialization/002_json/examples/49. Custom JSON Encoding - Coding.py", "file_name": "49. Custom JSON Encoding - Coding.py", "file_ext": "py", "file_size_in_byte": 19581, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.utcnow", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 61, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 89, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 89, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 100, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 123, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 157, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcnow", "line_number": 165, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 165, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 169, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 194, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 194, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 209, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 236, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 236, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 262, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 285, "usage_type": "argument"}, {"api_name": "datetime.datetime.utcnow", "line_number": 316, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 316, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 330, "usage_type": "call"}, {"api_name": "functools.singledispatch", "line_number": 354, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 370, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 379, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 406, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 406, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 416, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 444, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 445, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 446, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 449, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 449, "usage_type": "name"}, {"api_name": "decimal.Decimal", "line_number": 477, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 482, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 483, "usage_type": "call"}, {"api_name": "fractions.Fraction", "line_number": 484, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 487, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 487, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 509, "usage_type": "call"}]} +{"seq_id": "42548479193", "text": "import os\r\nfrom copy import deepcopy\r\nimport random\r\nimport time\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom torchvision import datasets as datasets\r\nimport torch\r\nfrom PIL import ImageDraw\r\nfrom pycocotools.coco import COCO\r\nimport matplotlib.pyplot as plt\r\n \r\n\r\ndef mixup_data(x, y, alpha=1.0):\r\n\r\n '''Compute the mixup data. Return mixed inputs, pairs of targets, and lambda'''\r\n if alpha > 0.:\r\n lam = np.random.beta(alpha, alpha)\r\n else:\r\n lam = 1.\r\n batch_size = x.size()[0]\r\n index = torch.randperm(batch_size).cuda()\r\n\r\n mixed_x = lam * x + (1 - lam) * x[index,:]\r\n y_a, y_b = y, y[index]\r\n return mixed_x, y_a, y_b, lam\r\n\r\ndef mixup_criterion(y_a, y_b, lam):\r\n return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\r\n\r\ndef parse_args(parser):\r\n # parsing args\r\n args = parser.parse_args()\r\n if args.dataset_type == 'OpenImages':\r\n args.do_bottleneck_head = True\r\n if args.th == None:\r\n args.th = 0.995\r\n else:\r\n args.do_bottleneck_head = False\r\n if args.th == None:\r\n args.th = 0.7\r\n return args\r\n\r\n\r\ndef average_precision(output, target):\r\n epsilon = 1e-8\r\n\r\n \r\n # sort examples\r\n indices = output.argsort()[::-1]\r\n # Computes prec@i\r\n total_count_ = np.cumsum(np.ones((len(output), 1)))\r\n # print(111111111)\r\n target_ = target[indices]\r\n ind = target_ == 1\r\n pos_count_ = np.cumsum(ind)\r\n total = pos_count_[-1]\r\n pos_count_[np.logical_not(ind)] = 0\r\n # print(np.logical_not(ind))\r\n # print(pos_count_[np.logical_not(ind)])\r\n # print(pos_count_)\r\n pp = pos_count_ / total_count_\r\n precision_at_i_ = np.sum(pp)\r\n precision_at_i = precision_at_i_ / (total + epsilon)\r\n\r\n return precision_at_i\r\n\r\n\r\ndef shot_mAP(per_class_number, targs, preds, many_shot_thr=100, low_shot_thr=20):\r\n \"\"\"Returns the model's average precision for each class\r\n Return:\r\n ap (FloatTensor): 1xK tensor, with avg precision for each class k\r\n \"\"\"\r\n many_shot = []\r\n median_shot = []\r\n low_shot = []\r\n if np.size(preds) == 0:\r\n return 0\r\n ap = np.zeros((preds.shape[1]))\r\n # compute average precision for each class\r\n for k in range(preds.shape[1]):\r\n # sort scores\r\n scores = preds[:, k]\r\n targets = targs[:, k]\r\n # print (scores)\r\n \r\n # compute average precision\r\n ap[k] = average_precision(scores, targets)\r\n # if k ==0:\r\n # print(\"person\")\r\n # print (ap[k])\r\n # print (ap[k])\r\n if per_class_number[k]>=many_shot_thr:\r\n many_shot.append(ap[k])\r\n elif per_class_number[k] function to fetch tweets from twitter\n # result_type -> fetches recent tweets\n # items() -> tweets fetch limit\n list_tweets.append(tweet.text)\n\n# Using the Text_Classification model to analyse the sentiment of the tweets\n \nwith open('tfidfmodel.pickle','rb') as f:\n tfidfVectorizer = pickle.load(f) # TFIDF Vectorizer obtained from Text_Classification.py\n\nwith open('sentiment_classifer.pickle','rb') as f:\n clf = pickle.load(f) # Logistic model classification obtained from Text_Classification.py\n\n# Preprocessing the fetched tweets using regular expressions\n\ntotal_pos = 0\ntotal_neg = 0\n\nfor tweet in list_tweets:\n tweet = re.sub(r\"^https://t.co/[a-zA-Z0-9]*\\s\", \" \", tweet) # removes urls at the beginning of the tweets\n tweet = re.sub(r\"\\s+https://t.co/[a-zA-Z0-9]*\\s\", \" \", tweet) # removes urls in the middle of the tweets\n tweet = re.sub(r\"\\s+https://t.co/[a-zA-Z0-9]*$\", \" \", tweet) # removes urls at the end of the tweets\n tweet = tweet.lower() # converts tweets to lower case\n #replaces all short form of the words with its full form\n tweet = re.sub(r\"that's\",\"that is\",tweet)\n tweet = re.sub(r\"there's\",\"there is\",tweet)\n tweet = re.sub(r\"what's\",\"what is\",tweet)\n tweet = re.sub(r\"where's\",\"where is\",tweet)\n tweet = re.sub(r\"it's\",\"it is\",tweet)\n tweet = re.sub(r\"who's\",\"who is\",tweet)\n tweet = re.sub(r\"i'm\",\"i am\",tweet)\n tweet = re.sub(r\"she's\",\"she is\",tweet)\n tweet = re.sub(r\"he's\",\"he is\",tweet)\n tweet = re.sub(r\"they're\",\"they are\",tweet)\n tweet = re.sub(r\"who're\",\"who are\",tweet)\n tweet = re.sub(r\"ain't\",\"am not\",tweet)\n tweet = re.sub(r\"wouldn't\",\"would not\",tweet)\n tweet = re.sub(r\"shouldn't\",\"should not\",tweet)\n tweet = re.sub(r\"can't\",\"can not\",tweet)\n tweet = re.sub(r\"couldn't\",\"could not\",tweet)\n tweet = re.sub(r\"won't\",\"will not\",tweet)\n tweet = re.sub(r\"\\W\",\" \",tweet) # replaces all puntuations in sentences with space\n tweet = re.sub(r\"\\d\",\" \",tweet) # replaces all numbers in sentences with space\n tweet = re.sub(r\"\\s+[a-z]\\s+\",\" \",tweet) # replaces all single letter characters in middle of tweets with space\n tweet = re.sub(r\"\\s+[a-z]$\",\" \",tweet) # replaces all single letter characters at the end of tweets with space\n tweet = re.sub(r\"^[a-z]\\s+\",\" \",tweet) # replaces all single letter characters in the beginning of tweets with space\n tweet = re.sub(r\"\\s+\",\" \",tweet) # replaces all multiple spaces in the tweets with single space\n sent = clf.predict(tfidfVectorizer.transform([tweet]).toarray())\n #print(tweet,\":\",sent)\n if(sent[0] == 0):\n total_neg +=1\n else:\n total_pos+=1\n\n# Plotting a bar graph showing the number of positive and negative tweets based on the specified keyword\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nobjects = ['Positive','Negative']\ny_pos = np.arange(len(objects))\n\nplt.bar(y_pos,[total_pos,total_neg],alpha=0.5)\nplt.xticks(y_pos,objects)\nplt.ylabel('Number')\nplt.title('Number of Postive and Negative Tweets')\n\nplt.show()", "repo_name": "mohankumar27/ML-Projects", "sub_path": "Sentiment Analysis/TwitterSentimentAnalysis.py", "file_name": "TwitterSentimentAnalysis.py", "file_ext": "py", "file_size_in_byte": 4133, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 24, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 41, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 52, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 53, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 54, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 57, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 58, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 60, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 62, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 63, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 65, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 66, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 67, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 68, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 69, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 70, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 71, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 72, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 73, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 74, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 75, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 76, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 77, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 78, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "18230088252", "text": "#!/usr/env/bin python\n\nimport argparse\nimport random\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--inputfiles\", dest=\"inputfiles\", help=\"list of input files to select a random word from\", required=False, nargs='+', default=[])\n\nargs = ap.parse_args()\ncn=[]\nfor f in args.inputfiles:\n with open(f, 'r') as fd:\n #filter commented lines\n cn.append( random.choice(list(filter(lambda x: not x.startswith(\"#\") and not x.strip()==\"\",fd.readlines()))).strip() )\nprint(' '.join(cn))\n ", "repo_name": "dmelha/codenamegenerator", "sub_path": "codenamegenerator.py", "file_name": "codenamegenerator.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "27547201926", "text": "from itertools import product\n\nimport pytest\nimport tensorlayerx as tlx\nimport numpy as np\n\nfrom gammagl.layers.conv import RGCNConv\n\nclasses = [RGCNConv]\nconfs = [(None, None), (2, None), (None, 2)]\n\n@pytest.mark.parametrize('cls,conf', product(classes, confs))\ndef test_rgcn_conv(cls, conf):\n num_bases, num_blocks = conf\n\n x1 = tlx.random_normal((4, 4))\n x2 = tlx.random_normal((2, 16))\n idx1 = tlx.arange(start = 0, limit = 4, dtype = tlx.int64)\n idx2 = tlx.arange(start = 0, limit = 2, dtype = tlx.int64)\n edge_index = tlx.convert_to_tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]], dtype = tlx.int64)\n edge_type = tlx.convert_to_tensor([0, 1, 1, 0, 0, 1], dtype = tlx.int64)\n row, col = edge_index\n\n conv = cls(4, 32, 2, num_bases = num_bases, num_blocks = num_blocks)\n out1 = conv(x1, edge_index, edge_type)\n assert tlx.get_tensor_shape(out1) == [4, 32]\n\n if num_blocks is None:\n out2 = conv(None, edge_index, edge_type)\n assert tlx.get_tensor_shape(out2) == [4, 32]\n\n conv = cls((4, 16), 32, 2, num_bases = num_bases, num_blocks = num_blocks)\n out1 = conv((x1, x2), edge_index, edge_type)\n assert tlx.get_tensor_shape(out1) == [2, 32]\n\n if num_blocks is None:\n out2 = conv((None, idx2), edge_index, edge_type)\n assert tlx.get_tensor_shape(out2) == [2, 32]\n assert np.allclose(tlx.convert_to_numpy(conv((idx1, idx2), edge_index, edge_type)), \n tlx.convert_to_numpy(out2))\n", "repo_name": "BUPT-GAMMA/GammaGL", "sub_path": "tests/layers/conv/test_rgcn_conv.py", "file_name": "test_rgcn_conv.py", "file_ext": "py", "file_size_in_byte": 1488, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 157, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gammagl.layers.conv.RGCNConv", "line_number": 9, "usage_type": "name"}, {"api_name": "tensorlayerx.random_normal", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorlayerx.random_normal", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorlayerx.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorlayerx.int64", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorlayerx.arange", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorlayerx.int64", "line_number": 19, "usage_type": "attribute"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorlayerx.int64", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorlayerx.convert_to_tensor", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorlayerx.int64", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tensorlayerx.get_tensor_shape", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorlayerx.get_tensor_shape", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorlayerx.get_tensor_shape", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorlayerx.get_tensor_shape", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_numpy", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorlayerx.convert_to_numpy", "line_number": 40, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "41460441446", "text": "from __future__ import division\nimport shutil\nimport numpy as np\nimport torch\nfrom path import Path\nimport datetime\nfrom collections import OrderedDict\nimport os\nimport pdb\n\ndef save_path_formatter(args, parser):\n def is_default(key, value):\n return value == parser.get_default(key)\n args_dict = vars(args)\n data_folder_name = str(Path(args_dict['data']).normpath().name)\n folder_string = [data_folder_name]\n if not is_default('epochs', args_dict['epochs']):\n folder_string.append('{}epochs'.format(args_dict['epochs']))\n keys_with_prefix = OrderedDict()\n keys_with_prefix['epoch_size'] = 'epoch_size'\n keys_with_prefix['sequence_length'] = 'seq'\n keys_with_prefix['rotation_mode'] = 'rot_'\n keys_with_prefix['padding_mode'] = 'padding_'\n keys_with_prefix['batch_size'] = 'b'\n keys_with_prefix['lr'] = 'lr'\n keys_with_prefix['photo_loss_weight'] = 'p'\n keys_with_prefix['mask_loss_weight'] = 'm'\n keys_with_prefix['smooth_loss_weight'] = 's'\n keys_with_prefix['network'] = 'network'\n keys_with_prefix['pretrained_encoder'] = 'pretrained_encoder'\n keys_with_prefix['loss'] = 'loss'\n for key, prefix in keys_with_prefix.items():\n value = args_dict[key]\n if not is_default(key, value):\n folder_string.append('{}{}'.format(prefix, value))\n #for store_true option to be written into the folder name(added here)\n # if args.pretrained_encoder:\n # folder_string.append('pretrained_encoder')\n\n save_path = Path(','.join(folder_string))\n timestamp = datetime.datetime.now().strftime(\"%m-%d-%H:%M\")\n return save_path/timestamp\n\n\ndef tensor2array(tensor, max_value=255, colormap='rainbow', channel_first=True):\n tensor = tensor.detach().cpu() #;pdb.set_trace()\n if max_value is None:\n max_value = tensor.max().item()\n\n if tensor.ndimension() == 2 or tensor.size(0) == 1:\n try:\n import cv2\n if int(cv2.__version__[0]) >= 3:\n color_cvt = cv2.COLOR_BGR2RGB\n else: # 2.4\n color_cvt = cv2.cv.CV_BGR2RGB\n if colormap == 'rainbow':\n colormap = cv2.COLORMAP_RAINBOW\n elif colormap == 'bone':\n colormap = cv2.COLORMAP_BONE\n array = (255*tensor.squeeze().numpy()/max_value).clip(0, 255).astype(np.uint8)\n colored_array = cv2.applyColorMap(array, colormap)\n array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255\n except ImportError:\n if tensor.ndimension() == 2:\n tensor.unsqueeze_(2)\n array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1)\n if channel_first:\n array = array.transpose(2, 0, 1)\n\n elif tensor.ndimension() == 3:\n assert(tensor.size(0) == 3)\n array = 0.5 + tensor.numpy()*0.5\n if not channel_first:\n array = array.transpose(1, 2, 0)\n return array\n\n\ndef save_checkpoint(save_path, dispnet_state, exp_pose_state, is_best, epoch, filename='checkpoint.pth.tar',record=False):\n file_prefixes = ['dispnet', 'exp_pose']\n states = [dispnet_state, exp_pose_state]\n for (prefix, state) in zip(file_prefixes, states):\n torch.save(state, save_path/'{}_{}'.format(prefix,filename))\n \n if record:\n #timestamp = datetime.datetime.now().strftime(\"%m-%d-%H:%M\")\n record_path = save_path/\"weights_{}\".format(epoch)\n record_path.makedirs_p()\n torch.save(dispnet_state, record_path/'dispnet_{}'.format(filename))\n\n if is_best:\n for prefix in file_prefixes:\n shutil.copyfile(save_path/'{}_{}'.format(prefix,filename), save_path/'{}_model_best.pth.tar'.format(prefix))\n\n# def get_depth_sid(args, labels):\n# if args.dataset == 'kitti':\n# min = 0.001\n# max = 80.0\n# K = 71.0\n# elif args.dataset == 'nyu':\n# min = 0.02\n# max = 80.0\n# K = 68.0\n# else:\n# print('No Dataset named as ', args.dataset)\ndef get_depth_sid(labels, ordinal_c=71.0,dataset='kitti'):\n # min = 0.001\n # max = 80.0\n\n # set as consistant with paper to add min value to 1 and set min as 0.01 (cannot converge on both nets)\n if dataset == 'kitti':\n alpha_ = 1.0\n beta_ = 80.999\n elif dataset == 'nyu' or dataset == 'NYU':# for the args in test_disp is different from train\n alpha_ = 1.0\n beta_ = 10.999\n\n K = float(ordinal_c)#;pdb.set_trace()\n\n if torch.cuda.is_available():\n alpha_ = torch.tensor(alpha_).cuda()\n beta_ = torch.tensor(beta_).cuda()\n K_ = torch.tensor(K).cuda()\n #;pdb.set_trace()\n else:\n alpha_ = torch.tensor(alpha_)\n beta_ = torch.tensor(beta_)\n K_ = torch.tensor(K)\n\n #depth = alpha_ * (beta_ / alpha_) ** (labels.float() / K_)-0.999\n depth = 0.5*(alpha_ * (beta_ / alpha_) ** (labels.float() / K_)+alpha_ * (beta_ / alpha_) ** ((labels.float()+1.0) / K_))-0.999# for compensation\n\n return depth.float()\n\n\n# def get_labels_sid(args, depth):\n# if args.dataset == 'kitti':\n# alpha = 0.001\n# beta = 80.0\n# K = 71.0\n# elif args.dataset == 'nyu':\n# alpha = 0.02\n# beta = 10.0\n# K = 68.0\n# else:\n# print('No Dataset named as ', args.dataset)\ndef get_labels_sid(depth, ordinal_c=71.0 ,dataset='kitti'):\n #alpha = 0.001\n #beta = 80.0\n\n # set as consistant with paper to add min value to 1 and set min as 0.01 (cannot converge on both nets)\n\n if dataset == 'kitti':\n alpha = 1.0\n beta = 80.999#new alpha is 0.01 which is consistant with other network\n elif dataset == 'nyu':\n alpha = 1.0\n beta = 10.999\n\n K = float(ordinal_c)\n\n alpha = torch.tensor(alpha)\n beta = torch.tensor(beta)\n K = torch.tensor(K)\n\n if torch.cuda.is_available():\n alpha = alpha.cuda()\n beta = beta.cuda()\n K = K.cuda()\n\n # labels = K * torch.log(depth / alpha) / torch.log(beta / alpha)\n labels = K * torch.log((depth+0.999) / alpha) / torch.log(beta / alpha)\n if torch.cuda.is_available():\n labels = labels.cuda()\n return labels.int()", "repo_name": "zenithfang/supervised_dispnet", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 6206, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 46, "dataset": "github-code", "pt": "52", "api": [{"api_name": "path.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 19, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 41, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 41, "usage_type": "attribute"}, {"api_name": "cv2.__version__", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 54, "usage_type": "attribute"}, {"api_name": "cv2.cv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "cv2.COLORMAP_RAINBOW", "line_number": 58, "usage_type": "attribute"}, {"api_name": "cv2.COLORMAP_BONE", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.applyColorMap", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 89, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 120, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.log", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 173, "usage_type": "attribute"}]} +{"seq_id": "22047367717", "text": "import cv2\nimport sys\n\nprint(cv2.__version__)\n\nimg = cv2.imread('cat.bmp',cv2.IMREAD_GRAYSCALE)\n\nif img is None :\n print(\"failed\")\n sys.exit()\n\n\ncv2.imwrite('cat_gray.png',img) \n\ncv2.namedWindow('image')\ncv2.imshow('image',img)\ncv2.waitKey()\n\ncv2.destroyAllWindows('image')", "repo_name": "5ohyun/openCV", "sub_path": "chap1/HelloCV.py", "file_name": "HelloCV.py", "file_ext": "py", "file_size_in_byte": 279, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.__version__", "line_number": 4, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "17543185901", "text": "# -*- coding: utf-8 -*-\n# @UpdateTime : 2023/7/5 23:41\n# @Author : 27\n# @File : p1.py.py\n\nimport multiprocessing, time\n\n\ndef myFunc(i):\n print(\"calling myFunc from process n: %s\" % i)\n for j in range(0, i):\n print(\"out from myFunc is: %s\" % j)\n\n\ndef myFunc2():\n name = multiprocessing.current_process().name\n print(\"Starting process name = {}\".format(name))\n time.sleep(3)\n print(\"Exiting process name = {}\".format(name))\n\n\nif __name__ == \"__main__\":\n print(123)\n\n # myFunc\n # for i in range(6):\n # process = multiprocessing.Process(target=myFunc, args=(i,))\n # process.start()\n # process.join()\n\n # myFunc2\n start = time.perf_counter()\n process_with_name = multiprocessing.Process(name='myFunc2 process', target=myFunc2)\n process_with_default_name = multiprocessing.Process(target=myFunc2)\n process_with_name.start()\n process_with_default_name.start()\n process_with_name.join()\n process_with_default_name.join()\n end = time.perf_counter()\n print(f\"func2 running time {end - start}\")\n pass\n\n", "repo_name": "wnz27/Coding-Daily", "sub_path": "content/Python_Processing/p1.py", "file_name": "p1.py", "file_ext": "py", "file_size_in_byte": 1088, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "multiprocessing.current_process", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 18, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 32, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 33, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 34, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "37066350569", "text": "'''\r\n fluids\r\n'''\r\n\r\nfrom const import *\r\nimport thing\r\nfrom colors import COLORS as COL\r\n\r\n#non-tile container of fluids\r\n#object to be a component of a thing that holds water\r\nclass FluidContainer:\r\n def __init__(self,size):\r\n self.size=size\r\n self.quantity=0\r\n self.fluidType=None\r\n\r\n#Fluid data, each type of fluid has one (1) unique fluid data object.\r\nclass Data:\r\n\r\n def __init__(self, x,y, t=T_FLUID, name=None,\r\n color=None, material=None, d=1,v=1,kg=1,\r\n burns=False, putsout=False):\r\n\r\n self._type=t\r\n self.name=name\r\n self.color=color\r\n self.material=material\r\n self.density=d\r\n self.viscosity=v\r\n self.mass=kg\r\n self.flammable=burns\r\n self.extinguish=putsout #does it put out fires?\r\n\r\n#Tile fluid container\r\n #***SHOULD THIS BE A THING, OR SHOULD IT BE A CONTAINER OF FLUIDS WHICH ARE THINGS?\r\nclass Fluid(thing.Thing):\r\n\r\n def __init__(self, x,y):\r\n super(Fluid, self).__init__(x, y)\r\n self.dic={}\r\n self.size=0 #total quantity of fluid in this tile\r\n\r\n def getData(self, stat): #get a particular stat about the fluid\r\n return FLUIDS[self.name].__dict__[stat]\r\n \r\n def clear(self): #completely remove all fluids from the tile\r\n self.dic={}\r\n self.size=0\r\n \r\n def add(self, name, quantity=1):\r\n newQuant = self.dic.get(name, 0) + quantity\r\n self.dic.update({name : newQuant})\r\n self.size += quantity\r\n \r\n '''floodFill = False\r\n if self.size + quantity > MAX_FLUID_IN_TILE:\r\n quantity = MAX_FLUID_IN_TILE - self.size\r\n floodFill = True #partial floodfill / mixing\r\n #how should the fluids behave when you \"inject\" a new fluid into a full lake of water, etc.?\r\n #regular floodfill will not cut it\r\n #maybe just replace the current fluid with the new fluid to keep it simple.\r\n '''\r\n\r\n '''if floodFill:\r\n #do flood fill algo.\r\n #this is going to also have to run a cellular automata to distribute different types of fluids\r\n return'''\r\n\r\n def removeType(self, name, quantity=1):\r\n if self.size > 0:\r\n curQuant = self.dic.get(name, 0)\r\n newQuant = max(0, curQuant - quantity)\r\n diff = curQuant - newQuant\r\n if not diff: #no fluid of that type to remove\r\n return\r\n self.size -= diff\r\n if newQuant != 0:\r\n self.dic.update({name : newQuant})\r\n else:\r\n #we've run out of this type of fluid\r\n self.dic.remove(name)\r\n\r\n\r\n \r\n \r\n#effects\r\ndef _wet(actor, n):\r\n if n>=10:\r\n rog.set_status(actor, WET)\r\ndef _oily(actor, n):\r\n if n>=10:\r\n rog.make(actor, OILY)\r\ndef _bloody(actor, n):\r\n if n>=10:\r\n rog.make(actor, BLOODY)\r\ndef _cough(actor, n):\r\n rog.cough(actor, n)\r\ndef _hydrate(actor, n):\r\n actor.hydration += n * WATER_HYDRATE\r\ndef _blood(actor, n):\r\n pass\r\ndef _acid(actor, n):\r\n rog.corrode(actor, n)\r\ndef _strongAcid(actor, n):\r\n rog.corrode(actor, n*3)\r\ndef _quaffAcid(actor, n):\r\n rog.corrode(actor, n*5)\r\ndef _quaffStrongAcid(actor, n):\r\n rog.corrode(actor, n*15)\r\ndef _sick(actor, n):\r\n rog.disease(actor, n)\r\ndef _drunk(actor, n):\r\n rog.intoxicate(actor, n)\r\n\r\nFLUIDS = {\r\n#attributes:\r\n# d : density\r\n# v : viscosity\r\n# kg : mass\r\n# flamm : flammable?\r\n# snuff : snuffs out fires?\r\n# ID : ( type, name, color, d, v, kg, flamm,snuff,touch,quaff,\r\nFL_SMOKE : Data(T_GAS, \"smoke\", COL['white'], 0.05, 0.01, 0.01,False,False,None, _cough,),\r\nFL_WATER : Data(T_FLUID,\"water\", COL['blue'], 1, 1, 0.1, False,True, _wet, _hydrate,),\r\nFL_BLOOD : Data(T_FLUID,\"blood\", COL['red'], 1.1, 2, 0.12,False,True, _bloody,_blood,),\r\nFL_ACID : Data(T_FLUID,\"acid\", COL['green'], 1.21, 0.6, 0.2, False,False,_acid,_quaffAcid,),\r\nFL_STRONGACID:Data(T_FLUID,\"strong acid\",COL['bio'], 1.3, 0.9, 0.2, False,False,_strongAcid,_quaffStrongAcid,),\r\nFL_OIL : Data(T_FLUID,\"oil\", COL['truepurple'], 0.9, 3, 0.3, True,False, _oily, _sick,),\r\nFL_ALCOHOL : Data(T_FLUID,\"moonshine\",COL['gold'], 1.2, 0.8, 0.15,True,False, _wet, _drunk,),\r\n#FL_LAVA\r\n#FL_HEALING\r\n#FL_CONFU\r\n#FL_DRUNK\r\n#FL_BLIND\r\n#FL_SLOW\r\n#FL_IRRIT\r\n#FL_SICK\r\n }\r\nFLUID_COMBONAMES={\r\nFL_SMOKE : \"smokey\",\r\nFL_WATER : \"watery\",\r\nFL_BLOOD : \"bloody\",\r\nFL_ACID : \"acidic\",\r\nFL_STRONGACID: \"acidic\",\r\nFL_OIL : \"oily\",\r\nFL_ALCOHOL : \"alcoholic\",\r\n }\r\n\r\n#create a fluid\r\ndef create_fluid(x,y,ID,volume):\r\n fluid = Fluid(x,y)\r\n fluid.add(ID, volume)\r\n return fluid\r\n\r\n \r\n\r\ndef simulate_flow():\r\n#idea: if any fluid tiles contain more than the maximum allowed,\r\n #always flow outward using flood fill if necessary.\r\n for fluid in rog.list_fluids():\r\n #simultaneous cellular automata\r\n newMap = TileMap(self.w,self.h)\r\n newMap.COPY(rog.map())\r\n #define some functions to reduce duplicate code\r\n def _doYourThing(x,y,num,nValues): # alter a tile or keep it the same based on input\r\n if nValues[num]==-1:\r\n newMap.tile_change(x,y,offChar)\r\n elif nValues[num]==1:\r\n newMap.tile_change(x,y,onChar)\r\n for ii in range(iterations):\r\n for x in range(self.w):\r\n for y in range(self.h):\r\n num = newMap.countNeighbors(x,y, onChar)\r\n _doYourThing(x,y,num,nValues)\r\n self.COPY(newMap)\r\n \r\n", "repo_name": "eyeCube/Softly-Into-the-Night-OLD", "sub_path": "fluids.py", "file_name": "fluids.py", "file_ext": "py", "file_size_in_byte": 5741, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "thing.Thing", "line_number": 36, "usage_type": "attribute"}, {"api_name": "colors.COLORS", "line_number": 123, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 124, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 125, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 126, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 127, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 128, "usage_type": "name"}, {"api_name": "colors.COLORS", "line_number": 129, "usage_type": "name"}]} +{"seq_id": "25782863030", "text": "from hashlib import new\nimport cv2\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom matplotlib import style\nfrom copy import deepcopy\n\nBLKSIZE = 20\nHEIGHT = 20\nWITDH = 10\n\n'''\n Simple game engine\n'''\nclass Tetris:\n '''\n We need one-sided tetrominos as we can't transform free tetronimos L -> J or S -> Z from rotation. \n This is due to chirality phenomenon.\n '''\n tetrominos = [\n # O shape\n [[1, 1],\n [1, 1]],\n # T shape\n [[2, 2, 2],\n [0, 2, 0]],\n # L shape\n [[3, 0],\n [3, 0],\n [3, 3]],\n # J shape\n [[0, 4],\n [0, 4],\n [4, 4]],\n # S shape\n [[0, 5, 5],\n [5, 5, 0]],\n # Z shape\n [[6, 6, 0],\n [0, 6, 6]],\n # I shape\n [[7],\n [7],\n [7],\n [7]],\n ]\n ''' \n Follows the same order as the tetronimos array\n '''\n tetromino_color = [\n (0, 0, 0),\n (255,255,0),\n (255,0,255),\n (255,215,0),\n (0,0,255),\n (0,255,0),\n (255,0,0),\n (135,206,250)\n ]\n\n '''\n Initatilze board. Side panel and text color go to the right of the board\n '''\n def __init__(self) -> None:\n self.text_color = (240,248,255)\n self.side_panel = np.ones((HEIGHT * BLKSIZE, WITDH * int(BLKSIZE / 2), 3), dtype=np.uint8) * np.array([95, 158, 160], dtype=np.uint8)\n self.reset()\n\n '''\n Resets the game when gameover = True. In training, it will return the state properties\n after losing.\n '''\n def reset(self):\n self.board = np.zeros((HEIGHT, WITDH), dtype=np.uint8)\n self.score = 0\n self.count = 0\n self.lines = 0\n self.indexes = list(range(len(self.tetrominos)))\n random.shuffle(self.indexes)\n self.idx = self.indexes.pop()\n self.tetromino = deepcopy(self.tetrominos[self.idx])\n self.current_pos = {'x': int(WITDH / 2) - int(len(self.tetrominos[0]) / 2), 'y': 0}\n self.gameover = False\n return self.state_properties(self.board)\n\n '''\n These heuristics of the game will be what our training model uses to asses performance.\n Complete Lines: We want to maximize this because it is the goal of the AI + more space\n Aggregate Height: We want to minimize this value because then we can drop more pieces\n Holes: We want to minimize this because the less holes the more lines we can complete\n Bumpiness: We want to mnimize this value so our board doesn't fill up in unwanted places\n\n Returns: tf float array with the 4 properties\n '''\n def state_properties(self, board):\n line_count, board = self.completed_lines(board)\n agg_height = sum(self.heights(board))\n holes = self.hole_count(board)\n bumpy_score = self.bumpiness(board)\n return np.array([line_count, holes, bumpy_score, agg_height])\n # return tf.constant([line_count, holes, bumpy_score, agg_height])\n \n '''\n Checking if ndarray in each row contains a 0. If so we want to remove it from our board.\n \n Returns the length of amount of lines deleted and the updated board without those lines.\n '''\n def completed_lines(self, board):\n completed_lines = []\n for i, row in enumerate(board[::-1]):\n if np.all(row):\n completed_lines.append(len(board) - i - 1)\n if len(completed_lines) > 0:\n board = self.remove_line(board, completed_lines)\n return len(completed_lines), board\n\n '''\n Flips the board on its side using zip, going through each column once a 1 appears at the top\n start counting holes if a 0 appears. \n\n Returns all the holes found.\n '''\n def hole_count(self, board):\n holes = 0\n for col in np.stack(board, axis=1):\n valid = False\n for i in range(HEIGHT):\n if col[i] == 1:\n valid = True\n if valid == True and col[i] == 0:\n holes += 1\n return holes\n\n '''\n Queries where the board has values, it takes the max of each column and subtracts 20 to make the number it's true height.\n Used in getting aggregated heights and board bumpiness.\n\n Returns the max height of each column.\n '''\n def heights(self, board):\n return HEIGHT - np.where(board.any(axis=0), np.argmax(board, axis=0), HEIGHT) \n \n '''\n Given all the heights, we sum up the absolute differences between all two adjacent columns\n\n Returns the 'bumpiness' of board.\n '''\n def bumpiness(self, board):\n col_heights = self.heights(board)\n lhs = col_heights[:-1]\n rhs = col_heights[1:]\n differences = np.abs(lhs - rhs)\n return np.sum(differences)\n\n '''\n Taking in a board and a list of row indincies, delete all the lines from the board and \n with vstack add a new row of 0's to the top\n\n Returns the updated board with removed lines\n '''\n def remove_line(self, board, indices):\n for i in indices[::-1]:\n board = np.delete(board, i, 0)\n new_row = np.zeros((10), dtype=np.uint8)\n board = np.vstack([new_row, board])\n return board\n\n '''\n Takes a tetromino and rotates the array 90 degrees. This is done using rot90() from numpy.\n\n Returns rotated tetromino.\n '''\n def rotate(self, tetromino):\n return np.rot90(tetromino).tolist()\n\n '''\n Certain pieces can only really rotate a set amount of times\n '''\n def avaliable_rotations(self):\n if self.idx in [4,5,6]:\n return 2\n elif self.idx in [1,2,3]:\n return 4\n else:\n return 1\n\n def next_states(self):\n states = {}\n current_tetromino = deepcopy(self.tetromino)\n rotations = self.avaliable_rotations()\n for i in range(rotations):\n valid_positions = WITDH - len(current_tetromino[0]) + 1\n for x in range(valid_positions):\n tetromino = deepcopy(current_tetromino)\n pos= {'x': x, 'y': 0}\n while not self.check_collision(tetromino, pos):\n pos['y'] += 1\n self.truncate(tetromino, pos)\n board = self.store(tetromino, pos)\n states[(x, i)] = self.state_properties(board)\n current_tetromino = self.rotate(current_tetromino)\n return [(x[0],x[1]) for x in states.items()] # {k:p} -> [(k,p)]\n\n def current_board_state(self):\n board = deepcopy(self.board)\n for y in range(len(self.tetromino)):\n for x in range(len(self.tetromino[y])):\n board[y + self.current_pos['y']][x+ self.current_pos['x']] = self.tetromino[y][x]\n return board\n\n def new_piece(self):\n if not len(self.indexes):\n self.indexes = list(range(len(self.tetrominos)))\n random.shuffle(self.indexes)\n self.idx = self.indexes.pop()\n self.tetromino = deepcopy(self.tetrominos[self.idx])\n self.current_pos = {'x': int(WITDH / 2) - int(len(self.tetrominos[0]) / 2), 'y': 0}\n if self.check_collision(self.tetromino, self.current_pos):\n self.gameover = True\n\n def check_collision(self, tetromino, pos):\n new_y = pos['y'] + 1\n # try to convert using itertools\n for y in range(len(tetromino)):\n for x in range(len(tetromino[y])):\n if new_y + y > HEIGHT - 1 or self.board[new_y + y][pos['x'] + x] and tetromino[y][x]:\n return True\n return False\n\n def truncate(self, tetromino, pos):\n gameover = False\n last_collision_row = -1\n for y in range(len(tetromino)):\n for x in range(len(tetromino[y])):\n if self.board[pos[\"y\"] + y][pos[\"x\"] + x] and tetromino[y][x]:\n if y > last_collision_row:\n last_collision_row = y\n if pos[\"y\"] - (len(tetromino) - last_collision_row) < 0 and last_collision_row > -1:\n while last_collision_row >= 0 and len(tetromino) > 1:\n gameover = True\n last_collision_row = -1\n del tetromino[0]\n for y in range(len(tetromino)):\n for x in range(len(tetromino[y])):\n if self.board[pos[\"y\"] + y][pos[\"x\"] + x] and tetromino[y][x] and y > last_collision_row:\n last_collision_row = y\n return gameover\n\n def store(self, tetromino, pos):\n board = deepcopy(self.board)\n # try to convert using itertools\n for y in range(len(tetromino)):\n for x in range(len(tetromino[y])):\n if tetromino[y][x] and not board[y + pos[\"y\"]][x + pos[\"x\"]]:\n board[y + pos[\"y\"]][x + pos[\"x\"]] = tetromino[y][x]\n return board\n\n\n def compute_reward(self, lines, holes, height, bumpiness, gameover):\n # Parameters for the reward function\n a = -0.5\n b = -0.35\n c = -0.2\n # print([lines, holes, height, bumpiness])\n return -4 if gameover else (a*height)+lines**2+(b*holes)+(c*bumpiness) \n\n def step(self, action, render=True, video=None):\n x, rotations = action\n self.current_pos = {'x': x, 'y': 0}\n for _ in range(rotations):\n self.tetromino = self.rotate(self.tetromino)\n while not self.check_collision(self.tetromino, self.current_pos):\n self.current_pos['y'] += 1\n if render:\n self.render(video)\n if self.truncate(self.tetromino, self.current_pos):\n self.gameover = True\n self.board = self.store(self.tetromino, self.current_pos)\n line_count, self.board = self.completed_lines(self.board)\n score = 1 + (line_count ** 2) * WITDH\n self.score += score\n self.count += 1\n self.lines += line_count\n \n if self.gameover:\n self.score -= 2\n else:\n self.new_piece()\n\n reward = self.compute_reward(\n self.lines, \n self.hole_count(self.board), \n sum(self.heights(self.board)), \n self.bumpiness(self.board),\n self.gameover\n )\n\n return (self.next_states(), reward, self.gameover)\n\n def render(self, video=None):\n if not self.gameover:\n img = [self.tetromino_color[p] for row in self.current_board_state() for p in row]\n else:\n img = [self.tetromino_color[p] for row in self.board for p in row]\n img = np.array(img).reshape((HEIGHT, WITDH, 3)).astype(np.uint8)\n img = img[..., ::-1]\n img = Image.fromarray(img, 'RGB')\n\n img = img.resize((WITDH * BLKSIZE, HEIGHT * BLKSIZE), 0)\n img = np.array(img)\n img[[i * BLKSIZE for i in range(HEIGHT)], :, :] = 0\n img[:, [i * BLKSIZE for i in range(WITDH)], :] = 0\n img = np.concatenate((img, self.side_panel), axis=1)\n\n cv2.putText(img, \"Score:\", (WITDH * BLKSIZE + int(BLKSIZE / 2), BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.7, color=self.text_color)\n cv2.putText(img, str(self.score), (WITDH * BLKSIZE + int(BLKSIZE / 2), 2 * BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5, color=self.text_color)\n cv2.putText(img, \"Pieces:\", (WITDH * BLKSIZE + int(BLKSIZE / 2), 4 * BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.7, color=self.text_color)\n cv2.putText(img, str(self.count), (WITDH * BLKSIZE + int(BLKSIZE / 2), 5 * BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5, color=self.text_color)\n cv2.putText(img, \"Lines:\", (WITDH* BLKSIZE + int(BLKSIZE / 2), 7 * BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.7, color=self.text_color)\n cv2.putText(img, str(self.lines), (WITDH * BLKSIZE + int(BLKSIZE / 2), 8 * BLKSIZE),\n fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=0.5, color=self.text_color)\n\n if video:\n video.write(img)\n\n cv2.imshow(\"Deep Q-Learning Tetris\", img)\n cv2.waitKey(1)\n\n\n\n\n\n", "repo_name": "andrewfritz2001/ml-tetris", "sub_path": "tetris.py", "file_name": "tetris.py", "file_ext": "py", "file_size_in_byte": 11719, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.ones", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 68, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 76, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 81, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 176, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 191, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 196, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 207, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 216, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 218, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 307, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 309, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 309, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 315, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 317, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 318, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 319, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 320, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 321, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 322, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 323, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 324, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 325, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 326, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 327, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_DUPLEX", "line_number": 328, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 333, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 334, "usage_type": "call"}]} +{"seq_id": "36021745469", "text": "from transformers.modeling_bert import *\nfrom torch import nn\nimport torch\n\n\n\n\n# class LinearBertModel(BertPreTrainedModel):\n# def __init__(self,config):\n# super(LinearBertModel,self).__init__(config)\n# self.bert = BertModel(config)\n# self.dropout = nn.Dropout(0.2)\n# self.classifier = nn.Linear(self.config.hidden_size,self.config.num_labels)\n#\n#\n# def forward(self, batch, feed_labels = False):\n# input_ids = batch.get(\"input_ids\")\n# token_type_ids = batch.get(\"segment_ids\")\n# attention_mask = batch.get(\"input_mask\")\n# labels = batch.get(\"label\")\n# outputs = self.bert(input_ids, attention_mask,token_type_ids )\n# pooled_output = outputs[1]\n# pooled_output = self.dropout(pooled_output) ## dropout at 0.5\n# logits = self.classifier(pooled_output)\n# if feed_labels:\n# loss_fct = CrossEntropyLoss(weight=torch.tensor(self.config.weight).cuda())\n# loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n# return loss\n# else:\n# logits = nn.functional.softmax(logits, -1)\n# return logits\nclass LinearBertModel(BertPreTrainedModel):\n def __init__(self, config):\n super(LinearBertModel, self).__init__(config)\n self.num_labels = config.num_labels\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n\n self.init_weights()\n\n def forward(self, batch, feed_labels = False):\n input_ids = batch.get(\"input_ids\")\n token_type_ids = batch.get(\"segment_ids\")\n attention_mask = batch.get(\"input_mask\")\n labels = batch.get(\"label\")\n outputs = self.bert(input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids)\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n # outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n outputs = logits\n if feed_labels:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n # outputs = (loss,) + outputs\n outputs = loss\n return outputs # (loss), logits, (hidden_states), (attentions)\n", "repo_name": "neptune1997/diac", "sub_path": "models/LinearBertModel.py", "file_name": "LinearBertModel.py", "file_ext": "py", "file_size_in_byte": 2711, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Dropout", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "73339615525", "text": "from tempfile import NamedTemporaryFile\nimport pytest\nfrom envrun.__main__ import main\n\n\ndef test_no_env_no_venv():\n main([\"ls\", \"-la\"])\n assert True\n\n\ndef test_yaml_env_no_venv(env_fixture_yaml):\n for fname in wrap_fixture_file(env_fixture_yaml, suffix=\".yaml\"):\n main([\"-f\", fname, \"echo\", \"$FOO\"])\n assert True\n\n\ndef test_dot_env_no_venv(env_fixture_dotenv):\n for fname in wrap_fixture_file(env_fixture_dotenv, suffix=\".env\"):\n main([\"-f\", fname, \"echo\", \"$BAR\"])\n assert True\n\n\ndef test_no_env_with_venv():\n \"\"\" Note this cheats a bit and assumes .env virtualenv exists \"\"\"\n main([\"-e\", \".env\", \"ls -la\"])\n assert True\n\n\ndef test_yaml_env_with_venv(env_fixture_yaml):\n for fname in wrap_fixture_file(env_fixture_yaml, suffix=\".yaml\"):\n main([\"-e\", \".env\", \"-f\", fname, \"echo $BAZ\"])\n assert True\n\n\ndef wrap_fixture_file(fixture, suffix=None):\n with NamedTemporaryFile(suffix=suffix) as f:\n f.write(fixture)\n f.flush()\n yield f.name\n\n\n@pytest.fixture\ndef env_fixture_yaml():\n return b\"\"\"\nFOO: bar\nBAR: baz\nBAZ: quux\n\"\"\"\n\n\n@pytest.fixture\ndef env_fixture_dotenv():\n return b\"\"\"\nFOO=bar\nBAR = baz\nBAZ =quux\n\"\"\"\n", "repo_name": "ericgj/envrun-python", "sub_path": "test/test_main.py", "file_name": "test_main.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "envrun.__main__.main", "line_number": 7, "usage_type": "call"}, {"api_name": "envrun.__main__.main", "line_number": 13, "usage_type": "call"}, {"api_name": "envrun.__main__.main", "line_number": 19, "usage_type": "call"}, {"api_name": "envrun.__main__.main", "line_number": 25, "usage_type": "call"}, {"api_name": "envrun.__main__.main", "line_number": 31, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 36, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "34392869854", "text": "import json\nimport boto3\nimport os\nfrom botocore.exceptions import ClientError\nfrom ...common.response_builder import get_success_response, get_custom_error\nfrom ...common import utils\nfrom ...common.decorators import (\n validate_body\n)\nfrom ...validation.complete_profile import CompleteProfileSchema\n\n\n\ncognito_client = boto3.client('cognito-idp') # add region \ndynamodb = boto3.resource('dynamodb')\n\n#Table\nUSER_TABLE = os.environ[\"USERS_TABLE\"]\nuser_table = dynamodb.Table(USER_TABLE)\n\n@validate_body(Schema=CompleteProfileSchema())\ndef lambda_handler(event, context):\n data = json.loads(event[\"body\"])\n\n try:\n uuid = event[\"requestContext\"][\"authorizer\"]['claims']['sub']\n email = event[\"requestContext\"][\"authorizer\"]['claims']['email']\n is_profile_completed = event[\"requestContext\"][\"authorizer\"]['claims']['custom:is_profile_completed']\n \n\n if is_profile_completed == \"True\":\n return get_success_response(\n status_code=400, \n message='Bad Request',\n data={\n \"message\": \"Profile already completed \"\n }\n )\n \n # uuid = \"54e79bc2-600d-4dff-aafd-c4f8dcf4f260\"\n # email = \"ammer@gmail.com\"\n \n business_name = data[\"business_name\"]\n phone_no = data[\"phone_no\"]\n landline_no = data.get(\"landline_no\")\n\n pool_id = os.environ[\"USER_POOL_ID\"]\n service_provider_group = os.environ[\"SERVICE_PROVIDER_GROUP\"]\n user_group = os.environ[\"USER_POOL_GROUP\"]\n\n cognito_client.admin_update_user_attributes(\n UserPoolId = pool_id,\n Username = email,\n UserAttributes = [\n {\n 'Name': 'custom:is_profile_completed',\n 'Value': 'True'\n }\n ]\n )\n\n cognito_client.admin_remove_user_from_group(\n UserPoolId = pool_id,\n Username = email,\n GroupName = user_group\n )\n \n # Add user to service provider group\n cognito_client.admin_add_user_to_group(\n UserPoolId = pool_id,\n Username = email,\n GroupName = service_provider_group\n )\n updated_at = utils.get_timeStamp()\n user_object = user_table.delete_item(\n Key = {\n \"Pk\": str(uuid),\n \"Sk\": \"Profile#User\"\n },\n ReturnValues = 'ALL_OLD'\n )[\"Attributes\"]\n\n put_query = {\n \"Pk\": str(uuid),\n \"Sk\": \"Profile#ServiceProvider\",\n \"created_at\": user_object[\"created_at\"],\n \"updated_at\": updated_at,\n \"name\": user_object[\"name\"],\n \"business_name\": business_name,\n \"phone_no\": phone_no\n }\n\n if landline_no:\n put_query[\"landline_no\"] = landline_no\n \n user_table.put_item(\n Item = {\n **put_query\n }\n )\n\n return get_success_response(\n status_code=200, \n message='Success',\n data={\n \"message\": \"Profile Completed Successfully\"\n }\n )\n except ClientError as e:\n return get_custom_error(\n status_code=500, \n message='Error',\n data={\n \"message\": e.response['Error']['Message']\n }\n )\n ", "repo_name": "MuhammadIlyas269/AWS-Serverless-Lafete", "sub_path": "src/api/service_provider/complete_profile.py", "file_name": "complete_profile.py", "file_ext": "py", "file_size_in_byte": 3467, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "boto3.client", "line_number": 14, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "common.response_builder.get_success_response", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "common.utils.get_timeStamp", "line_number": 74, "usage_type": "call"}, {"api_name": "common.utils", "line_number": 74, "usage_type": "name"}, {"api_name": "common.response_builder.get_success_response", "line_number": 102, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 109, "usage_type": "name"}, {"api_name": "common.response_builder.get_custom_error", "line_number": 110, "usage_type": "call"}, {"api_name": "common.decorators.validate_body", "line_number": 21, "usage_type": "call"}, {"api_name": "validation.complete_profile.CompleteProfileSchema", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "33486751429", "text": "\"\"\"\nauthor: nitin tiwari\nemail:er.nitintiwari548@gmail.com\n\"\"\"\n\nfrom utils.model import Perceptron\nfrom utils.all_utils import prepare_data, save_model, save_plot\nimport pandas as pd\nimport logging\n\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s] %(message)s\"\nlogging.basicConfig(level=logging.INFO, format=logging_str)\n\ndef main(data, eta, epochs, filename, plotName):\n df = pd.DataFrame(data)\n logging.info(f\"This is actual dataframe{df}\")\n X,y = prepare_data(df)\n\n model =Perceptron(eta=eta, epochs=epochs)\n model.fit(X,y)\n\n _ = model.total_loss() # underscore _ is adummy variable , you can remove it if you want\n\n save_model(model, filename=filename)\n save_plot(df, plotName, model)\n\n\nif __name__ == '__main__': \n \n AND ={\n \"x1\":[0,0,1,1],\n \"x2\":[0,1,0,1],\n \"y\":[0,0,0,1],\n }\n ETA = 0.3\n EPOCHS = 10\n\n main(data=AND, eta=ETA, epochs=EPOCHS, filename=\"and.model\", plotName=\"and.png\")\n", "repo_name": "Nitin-9/oneNeuron", "sub_path": "and.py", "file_name": "and.py", "file_ext": "py", "file_size_in_byte": 961, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.all_utils.prepare_data", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.model.Perceptron", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.all_utils.save_model", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.all_utils.save_plot", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "30188217029", "text": "from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.popup import Popup\nimport os\n\nclass main(App):\n\n def save_doc(a):\n save_popup.dismiss()\n pass\n\n def close_doc(a):\n close_popup.dismiss()\n main().stop()\n pass\n\n def save_popup(self):\n s = BoxLayout(orientation='vertical')\n global save_popup\n save_popup = Popup(title=title+'.txt', size_hint=(None, None), size=(400, 200), auto_dismiss=False)\n saved = Label(text='To the path:')\n loc = TextInput(text=title + '.txt', multiline=False)\n sve_btn = Button(text='Save')\n sve_btn.bind(on_release=main.save_doc)\n cls_btn = Button(text='Cancel')\n cls_btn.bind(on_release=save_popup.dismiss)\n sve = BoxLayout(orientation='horizontal')\n sve.add_widget(sve_btn)\n sve.add_widget(cls_btn)\n s.add_widget(saved)\n s.add_widget(loc)\n s.add_widget(sve)\n save_popup.add_widget(s)\n save_popup.open()\n\n def close_popup(self):\n c = BoxLayout(orientation='vertical')\n d = BoxLayout(orientation='horizontal')\n global close_popup\n close_popup = Popup(title='Close', size_hint=(None, None), size=(400, 200), auto_dismiss=False)\n closed = Label(text='Are you sure?')\n loc = Label(text='')\n cl_btn = Button(text='Cancel')\n cl_btn.bind(on_release=close_popup.dismiss)\n cls_btn = Button(text='Yes')\n cls_btn.bind(on_release=main.close_doc)\n d.add_widget(cls_btn)\n d.add_widget(cl_btn)\n c.add_widget(closed)\n c.add_widget(loc)\n c.add_widget(d)\n close_popup.add_widget(c)\n close_popup.open()\n\n def build(self):\n global title\n title = 'untitled'\n head = BoxLayout (orientation='horizontal', size_hint=(1, .05))\n save_btn = Button(text='S', size_hint=(.1, 1))\n save_btn.bind(on_release=main.save_popup)\n close_btn = Button(text='C', size_hint=(.1, 1))\n close_btn.bind(on_release=main.close_popup)\n doc_title = Label(text=title+'.txt', size_hint=(.8, 1))\n head.add_widget(doc_title)\n head.add_widget(save_btn)\n head.add_widget(close_btn)\n txt_input = TextInput(scroll_x=1, size_hint=(1, .8))\n content = GridLayout(cols=1, padding=20)\n content.add_widget(head)\n content.add_widget(txt_input)\n return content\n\nif __name__ == '__main__':\n main().run()\n", "repo_name": "mischajoerg/kNote", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2647, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "kivy.app.App", "line_number": 10, "usage_type": "name"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 22, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 24, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 25, "usage_type": "call"}, {"api_name": "kivy.uix.textinput.TextInput", "line_number": 26, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 27, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 29, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 31, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 41, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 42, "usage_type": "call"}, {"api_name": "kivy.uix.popup.Popup", "line_number": 44, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 45, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 46, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 47, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 49, "usage_type": "call"}, {"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 62, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 63, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 65, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 67, "usage_type": "call"}, {"api_name": "kivy.uix.textinput.TextInput", "line_number": 71, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "2048858529", "text": "import os\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Convolution2D, MaxPooling2D, Dropout, Flatten\nfrom keras.optimizers import Adam\n\n\nEPISODES = 3_000\n\nUPDATE_TARGET_MODEL_EVERY = 200\nSAVE_EVERY = 200\nPLOT_EVERY = 10\n\nREPLAY_MEMORY_SIZE = 10_000\nMIN_TRAIN_SAMPLE = 1_000 # Avoid overfitting the first houndred samples\nBATCH_SIZE = 32\n\nGAMMA = 0.95\n\nEPSILON = 1\nEPSILON_DECAY = 0.999\nMIN_EPSILON = 0.1\n\nRENDER_EVERY = 200\nSHOW_GAME_OVER = False\nMAX_ACTIONS = 7 * 6\n\n'''\nModel naming:\nConv2D : {filters}c\nMaxPooling2D : m\nDense : {units}d\nDropout : d\n'''\n\nMODEL_NAME = \"8c-d-32d-d-16d\"\n\n\nclass Brain:\n\n\tdef __init__(self, param, model=None):\n\t\t# Game settings\n\t\tself.param = param\n\n\t\t# Unstable Model\n\t\tself.model = self.create_model(model)\n\n\t\t# Stable Model\n\t\tself.target_model = self.create_model(model)\n\t\t# Syncronize taget_model with model\n\t\tself.update_target_model()\n\n\tdef create_model(self, model=None):\n\t\tif model == None:\n\t\t\tmodel = Sequential()\n\n\t\t\tmodel.add(Convolution2D(8, (4, 4), padding=\"valid\", input_shape=(7, 6, 1), activation=\"relu\"))\n\t\t\t# model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))\n\t\t\tmodel.add(Flatten())\n\t\t\tmodel.add(Dropout(0.2))\n\t\t\tmodel.add(Dense(32, activation=\"relu\"))\n\t\t\tmodel.add(Dropout(0.2))\n\t\t\t# model.add(Dense(32, activation=\"relu\"))\n\t\t\tmodel.add(Dense(16, activation=\"relu\"))\n\t\t\tmodel.add(Dense(self.param[\"ACTION_SPACE\"], activation=\"tanh\"))\n\n\t\t\tmodel.compile(optimizer=Adam(), loss=\"mse\")\n\n\t\tprint(model.summary())\n\n\t\treturn model\n\n\tdef update_target_model(self):\n\t\tself.target_model.set_weights(self.model.get_weights())\n\n\tdef save(self, directory, name):\n\t\tif not os.path.exists(directory):\n\t\t\tos.makedirs(directory)\n\t\tself.target_model.save(f\"{directory}/{name}\")\n\n\tdef train(self, batch_size, batch_per_epoch, epochs):\n\t\tself.model.fit_generator()\n\n\tdef on_epoch_end(self):\n\t\tself.update_target_model()\n\n", "repo_name": "cberger1/ConnectFourDQN", "sub_path": "brain.py", "file_name": "brain.py", "file_ext": "py", "file_size_in_byte": 1885, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "keras.models.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 58, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "11922196866", "text": "import requests\nimport os\nfrom dotenv import load_dotenv\nfrom twilio.rest import Client\n\nSTOCK = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\nload_dotenv()\n\nSTOCK_APY_KEY = os.environ.get('STOCKS_API_KEY')\nstock_endpoint = 'https://www.alphavantage.co/query'\nstock_parameters = {\n 'function' : 'TIME_SERIES_DAILY_ADJUSTED',\n 'symbol' : STOCK,\n 'apikey' : STOCK_APY_KEY\n}\n\nNEWS_API_KEY = os.environ.get('NEWS_API_KEY')\nnews_endpoint = 'https://newsapi.org/v2/everything'\nnews_parameters = {\n 'apiKey' : NEWS_API_KEY,\n 'q' : COMPANY_NAME,\n 'language' : 'en'\n}\n\naccount_sid = os.environ.get('SID')\nauth_token = os.environ.get('AUTH_TOKEN')\n\nstock_response = requests.get(stock_endpoint, params=stock_parameters)\nstock_response.raise_for_status()\nstock_data = stock_response.json()['Time Series (Daily)']\nkey_list = list(stock_data.keys())\nyesterday = float(stock_data[key_list[0]]['4. close'])\nbefore_yesterday = float(stock_data[key_list[1]]['4. close'])\npercentage_difference = round(((yesterday - before_yesterday)/yesterday)*100)\nup_down = None\nif percentage_difference > 0:\n up_down= '🔺'\nelse:\n up_down = '🔻'\n\nif abs(percentage_difference) > 2:\n news_response = requests.get(news_endpoint, params=news_parameters)\n news_response.raise_for_status()\n articles = news_response.json()['articles']\n three_first_articles = articles[:3]\n sms_content = [f\"{STOCK}: {up_down}{percentage_difference}%\\nHeadline: {article['title']}. \\nDescription: {article['description']}\" for article in three_first_articles]\n print(sms_content)\n client = Client(account_sid, auth_token)\n for article in sms_content:\n message = client.messages.create(\n body=article,\n from_=\"+15074456521\",\n to=\"+5584999533866\"\n )\n", "repo_name": "jvdpt0/Stock-Trading-News-Alert", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1764, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 18, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 43, "usage_type": "call"}, {"api_name": "twilio.rest.Client", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "24187289710", "text": "# pytorch implementation of filterbank pyramid filter\nimport cv2\nimport torch\nimport torch.nn.functional as F\n\n\nclass Spyr_PyTorch(object):\n\n def __init__(self, filter, device, height=5, nbands=4, sub_sample=True):\n '''\n :param filter: a function which returns filter parameters\n :param height:\n :param nbands:\n :param sub_sample: it shoule be Ture, haven't implemented the non-downsampling version\n :param device:\n :param wsize: window size\n '''\n params = vars()\n del params['self']\n self.__dict__.update(params)\n\n def build(self, img):\n '''\n :param img [N,C=1,H,W]\n :return:\n '''\n hi0filt = self.filter['hi0filt']\n lo0filt = self.filter['lo0filt']\n\n hi0 = self._conv2d(img, hi0filt)\n lo0 = self._conv2d(img, lo0filt)\n\n return [hi0] + self._buildLevs(lo0, self.height-1)\n\n def _buildLevs(self, lo0, height):\n if height<=1:\n return [lo0]\n\n coeffs = []\n bfilts = self.filter['bfilts']\n for ori in range(self.nbands):\n coeffs.append(self._conv2d(lo0, bfilts[ori]))\n\n lo = self._conv2d(lo0, self.filter['lofilt'])\n if self.sub_sample: # sub-sampling\n lo = lo[:,:,::2,::2] # same as F.interpolate\n\n return [coeffs] + self._buildLevs(lo, height-1)\n\n def _conv2d(self, img, kernel):\n # circular padding\n pad = kernel.shape[-1]//2\n img = torch.cat([img, img[:,:, 0:pad,:]], dim=-2)\n img = torch.cat([img, img[:,:,:, 0:pad]], dim=-1)\n img = torch.cat([img[:,:, -2 * pad:-pad,:], img], dim=-2)\n img = torch.cat([img[:,:,:, -2 * pad:-pad], img], dim=-1)\n\n # F.conv2d\n return F.conv2d(img, kernel)\n\n def getlist(self, coeff):\n straight = [bands for scale in coeff[1:-1] for bands in scale]\n straight = [coeff[0]] + straight + [coeff[-1]]\n return straight\n\n\nif __name__ == \"__main__\":\n device = torch.device('cuda:0')\n from sp3Filters import sp3Filters\n s = Spyr_PyTorch(sp3Filters, sub_sample=True, device = device)\n\n image_path = '../data/0.png'\n img = cv2.imread(image_path, 0)\n img = torch.tensor(img).to(device)\n img = img.unsqueeze(0).unsqueeze(0).float()/255\n\n import pdb;\n pdb.set_trace()\n coeffs = s.build(img.double())\n", "repo_name": "KaixuanZ/STSIM-compression-public", "sub_path": "filterbank/Spyr_PyTorch.py", "file_name": "Spyr_PyTorch.py", "file_ext": "py", "file_size_in_byte": 2367, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.cat", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.functional.conv2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 68, "usage_type": "call"}, {"api_name": "sp3Filters.sp3Filters", "line_number": 70, "usage_type": "argument"}, {"api_name": "cv2.imread", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 74, "usage_type": "call"}, {"api_name": "pdb.set_trace", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "71658761764", "text": "from flask import Flask, render_template, request\nimport requests\n\napp = Flask(__name__)\n\n# API keys\nSKYSCANNER_API_KEY = \"abcdef1234567890\"\nHOTELS_COMBINED_API_KEY = \"ghijkl1234567890\"\nOPENWEATHERMAP_API_KEY = \"mnopqr1234567890\"\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n destination = request.form[\"destination\"]\n travel_dates = request.form[\"travel_dates\"]\n\n # Fetch flight data from Skyscanner API\n skyscanner_url = f\"https://api.example.com/skyscanner?api_key={SKYSCANNER_API_KEY}&destination={destination}&dates={travel_dates}\"\n skyscanner_response = requests.get(skyscanner_url)\n flight_data = skyscanner_response.json()\n \n # Fetch hotel data from Hotels Combined API\n hotels_combined_url = f\"https://api.example.com/hotelscombined?api_key={HOTELS_COMBINED_API_KEY}&destination={destination}&dates={travel_dates}\"\n hotels_combined_response = requests.get(hotels_combined_url)\n hotel_data = hotels_combined_response.json()\n \n # Fetch weather data from OpenWeatherMap API\n openweathermap_url = f\"https://api.example.com/openweathermap?api_key={OPENWEATHERMAP_API_KEY}&destination={destination}\"\n weather_response = requests.get(openweathermap_url)\n weather_data = weather_response.json()\n \n # Combine and format the data\n combined_data = {\n \"flight_data\": flight_data,\n \"hotel_data\": hotel_data,\n \"weather_data\": weather_data\n }\n \n return render_template(\"index.html\", data=combined_data)\n\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n", "repo_name": "paulinemutuku/TravelPlanner-Use_of_APIs", "sub_path": "travel.py", "file_name": "travel.py", "file_ext": "py", "file_size_in_byte": 1722, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "70360614246", "text": "from flask import Flask, jsonify, request, make_response, render_template, send_file, redirect, url_for\n\nimport requests\n\nfrom flask_qrcode import QRcode as QRcode_generator\nimport json\nimport socket\n\n# instantiate flask object\napp = Flask(__name__)\n\n# add QRcode generator\nqrcode_generator = QRcode_generator(app)\n\n@app.route(\"/\")\ndef index():\n try:\n host_name = socket.gethostname()\n host_ip = socket.gethostbyname(host_name)\n return render_template('index.html', hostname=host_name, ip=host_ip)\n except:\n return render_template('error.html')\n\n\n# add a qrcode\n@app.route(\"/qrcode\", methods=[\"GET\"])\ndef add_qrcode():\n '''\n Request a QR code from asg api. \n If succesfull return template with rendered qr image\n if unsuccesfull return error msg\n\n Input : NONE\n Output : HTML TEMPLATE WITH QR_CODE JSON Object \n '''\n try:\n \n # request a new qr code\n response = requests.get('http://asg-api:8085/qrcode')\n\n # serialize incoming response\n serialized_qrcode = json.loads(response.text)\n\n # return template\n return render_template(\n 'qr_code.html',\n title=serialized_qrcode['uuid'],\n description=\"Generate QR code with unique uuid\",\n json_qrcode=serialized_qrcode\n )\n\n except Exception as e:\n return jsonify({\"Error\": \"Invalid Request, please try again.\" + str(e)})\n\n\n# get all qrcodes\n@app.route(\"/qrcodes\", methods=[\"GET\"])\ndef get_qrcodes():\n '''\n get all qr code objects from QR_Code model class\n and return jinja template with a table rendering \n a new row for each dumped qr code object that exists. \n\n Input : None\n Output : All QR_CODE JSON Objects\n '''\n try:\n \n # request a new qr code\n response = requests.get('http://asg-api:8085/qrcodes')\n\n # serialize incoming response\n serialized_qr_codes = json.loads(response.text)\n\n # return template\n return render_template(\n 'qr_codes_overview.html',\n title=\"QR codes\",\n description=\"Show all existing qr codes\",\n qr_codes=serialized_qr_codes\n )\n\n except Exception as e:\n return jsonify({\"Error\": \"Invalid Request, please try again.\" + str(e)})\n\n \n\n# get a specific qrcode\n@app.route(\"/qrcode/\", methods=[\"GET\"])\ndef get_qrcode(qr_uuid):\n '''\n Show QR code info with QR code uuid\n\n Input : QR code uuid\n Ouput : QR code JSON object with corresponding uuid\n '''\n try:\n # request a new qr code\n response = requests.get('http://asg-api:8085/qrcode/' + qr_uuid)\n\n # serialize incoming response\n serialized_qr_code = json.loads(response.text)\n\n # return template\n return render_template(\n 'qr_code.html',\n title=\"QR code: \" + str(serialized_qr_code.uuid),\n description=\"Generate QR code with unique uuid\",\n json_qrcode=serialized_qr_code\n )\n\n except Exception as e:\n return jsonify({\"Error\": \"Invalid Request, please try again.\" + str(e)})\n\n\n\n# update a qrcode\n@app.route(\"/qrcode/scanned/\", methods=[\"PUT\"])\ndef scanned_qrcode(qr_uuid):\n '''\n Update qr code info from html template form\n\n Input : QR Code uuid\n Output : QR Code uuid \n '''\n if request.method == 'PUT':\n \n # update a qr code\n response = requests.put('http://asg-api:8085/qrcode/' + str(qr_uuid))\n\n # serialize incoming response\n serialized_new_qrcode = json.loads(response.text)\n\n # return template\n return render_template(\n 'qr_code.html',\n title=serialized_new_qrcode['uuid'],\n description=\"Generate QR code with unique uuid\",\n json_qrcode=serialized_new_qrcode\n )\n\n# delete qrcode\n@app.route(\"/qrcode/delete/\", methods=[\"DELETE\"])\ndef delete_qrcode(qr_uuid):\n '''\n select qrcode by uuid from QR_Code model class\n deletes qrcode object from database \n commit if succesfull\n '''\n if request.method == 'DELETE':\n \n #delete a qr code\n response = requests.delete('http://asg-api:8085/qrcode/' + str(qr_uuid))\n\n # serialize incoming response\n serialized_new_qrcode = json.loads(response.text)\n\n # return template\n return render_template(\n 'qr_code.html',\n title=serialized_new_qrcode['uuid'],\n description=\"Generate QR code with unique uuid\",\n json_qrcode=serialized_new_qrcode\n )\n\n# get a specific qrcode\n@app.route(\"/login\", methods=[\"GET, POST\"])\ndef get_qrcode(user_uuid):\n '''\n Show QR code info with QR code uuid\n\n Input : QR code uuid\n Ouput : QR code JSON object with corresponding uuid\n '''\n try:\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n # request a new qr code\n response = requests.post('http://asg-api:8085/login/', username=username, password=password)\n # check if response for user login is true\n if response.Text == True:\n # set session\n # return template\n return redirect(url_for(\"user\", user=username))\n else:\n # return template\n return render_template(\n 'login.html',\n username=username\n )\n\n elif request.method == 'GET': \n # return template\n return render_template(\n 'login.html',\n username=\"\"\n )\n\n except Exception as e:\n return jsonify({\"Error\": \"Invalid Request, please try again.\" + str(e)})\n\n@app.route(\"/\")\ndef user(user_id):\n return f\"

{user_id}

\"\n\n# error handeling\n@app.errorhandler(400)\ndef handle_400_error(_error):\n \"\"\"Return a http 400 error to client\"\"\"\n return make_response(jsonify({'error': 'Misunderstood'}), 400)\n\n\n@app.errorhandler(401)\ndef handle_401_error(_error):\n \"\"\"Return a http 401 error to client\"\"\"\n return make_response(jsonify({'error': 'Unauthorised'}), 401)\n\n\n@app.errorhandler(404)\ndef handle_404_error(_error):\n \"\"\"Return a http 404 error to client\"\"\"\n return make_response(jsonify({'error': 'Not found lolzs'}), 404)\n\n\n@app.errorhandler(500)\ndef handle_500_error(_error):\n \"\"\"Return a http 500 error to client\"\"\"\n return make_response(jsonify({'error': 'Server error'}), 500)\n \n \n", "repo_name": "Flowlessx/asg-api", "sub_path": "Dashboard/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_qrcode.QRcode", "line_number": 13, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 18, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 39, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 76, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 84, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 99, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 102, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 126, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 126, "usage_type": "name"}, {"api_name": "requests.put", "line_number": 129, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 132, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 135, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "requests.delete", "line_number": 153, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 159, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 176, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 176, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 177, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 177, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 178, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 178, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 193, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 193, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 201, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 211, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 223, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 229, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "8524985115", "text": "from torch.utils.data import Dataset, DataLoader\r\n\r\nclass TextDatasetFromDir(Dataset):\r\n '''\r\n This class is equivalent to keras.utils.text_dataset_from_directory\r\n \r\n it helps to load every dataset from the pre-configured folder structure like:\r\n main_directory/\r\n ...class_a/\r\n ......a_text_1.txt\r\n ......a_text_2.txt\r\n ...class_b/\r\n ......b_text_1.txt\r\n ......b_text_2.txt\r\n \r\n '''\r\n def __init__(self, root, transform=None, target_transform=None, loader=default_loader):\r\n self.root = root\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.loader = loader\r\n\r\n self.classes = sorted([d.name for d in os.scandir(self.root) if d.is_dir()])\r\n self.class_to_idx = {cls_name: idx for idx, cls_name in enumerate(self.classes)}\r\n self.samples = self._load_samples()\r\n\r\n def _load_samples(self):\r\n samples = []\r\n \r\n for class_name in self.classes:\r\n class_dir = os.path.join(self.root, class_name)\r\n if not os.path.isdir(class_dir):\r\n continue\r\n with tqdm(total=12000) as pbar:\r\n for filename in os.listdir(class_dir):\r\n path = os.path.join(class_dir, filename)\r\n if not os.path.isfile(path):\r\n continue\r\n if self._has_valid_extension(filename):\r\n item = (path, self.class_to_idx[class_name])\r\n samples.append(item)\r\n pbar.update()\r\n return samples\r\n\r\n def _has_valid_extension(self, filename):\r\n valid_extensions = ['.txt'] # Add more extensions if needed\r\n return any(filename.endswith(ext) for ext in valid_extensions)\r\n\r\n def __getitem__(self, index):\r\n path, target = self.samples[index]\r\n sample = self.loader(path)\r\n if self.transform is not None:\r\n sample = self.transform(sample)\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n return sample, target\r\n\r\n def __len__(self):\r\n return len(self.samples)\r\n", "repo_name": "MarioRasconMerinoML/Machine_Learning", "sub_path": "NLP/Intro_to_deep_learning_for_text/pytorch/TextDatasetFromDir.py", "file_name": "TextDatasetFromDir.py", "file_ext": "py", "file_size_in_byte": 2215, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "41292744160", "text": "from django.urls import path\n\nfrom zhanhu.articles import views\n\napp_name = 'articles'\n\nurlpatterns = [\n path('', views.ArticlesListView.as_view(), name='list'),\n path('write-new-article', views.ArticleCreateView.as_view(), name='write_new'),\n path('drafts', views.DraftListView.as_view(), name='drafts'),\n]\n", "repo_name": "bingwin/mklearn", "sub_path": "django/zhanhu/zhanhu/articles/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 317, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.ArticlesListView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.ArticlesListView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "zhanhu.articles.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.ArticleCreateView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.ArticleCreateView", "line_number": 9, "usage_type": "attribute"}, {"api_name": "zhanhu.articles.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.DraftListView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "zhanhu.articles.views.DraftListView", "line_number": 10, "usage_type": "attribute"}, {"api_name": "zhanhu.articles.views", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "4286198955", "text": "__name__ = \"gptty.context\"\n__author__ = \"Sig Janoska-Bedi\"\n__credits__ = [\"Sig Janoska-Bedi\"]\n__version__ = \"0.2.5\"\n__license__ = \"MIT\"\n__maintainer__ = \"Sig Janoska-Bedi\"\n__email__ = \"signe@atreeus.com\"\n\n\nimport click\nimport tiktoken\nfrom textblob import TextBlob\nfrom collections import Counter, defaultdict\nfrom nltk.corpus import stopwords\n\n\nYELLOW = \"\\033[1;33m\"\nRESET = \"\\033[0m\"\n\ndef verify_added_phrase(phrase:str,context:str, max_len:int) -> bool:\n\n if len(context) + len(phrase) <= max_len:\n return True\n\n return False\n\ndef get_token_count(s, model_name):\n \"\"\"Returns the number of tokens in a text string.\"\"\"\n encoding = tiktoken.encoding_for_model(model_name)\n num_tokens = len(encoding.encode(s))\n return num_tokens\n\ndef return_most_common_phrases(text:str, weight_recent=True) -> list:\n\n # Extract noun phrases using TextBlob\n blob = TextBlob(text)\n noun_phrases = blob.noun_phrases\n\n # Remove stopwords from noun phrases\n stop_words = set(stopwords.words('english'))\n filtered_noun_phrases = []\n for np in noun_phrases:\n words = np.split()\n filtered_words = [word for word in words if word not in stop_words]\n if filtered_words:\n filtered_noun_phrases.append(' '.join(filtered_words))\n\n if not weight_recent:\n\n # Count the frequency of the noun phrases\n noun_phrase_counts = Counter(filtered_noun_phrases)\n\n # Get the most frequent key phrases\n return [phrase for phrase, count in noun_phrase_counts.most_common()]\n\n # Count the weighted frequency of the noun phrases\n noun_phrase_weighted_counts = defaultdict(int)\n total_phrases = len(filtered_noun_phrases)\n\n for i, phrase in enumerate(filtered_noun_phrases):\n weight = (i + 1) / total_phrases # Assign a higher weight to phrases that appear later in the text\n noun_phrase_weighted_counts[phrase] += weight\n\n # Get the most frequent key phrases\n return [phrase for phrase, count in sorted(noun_phrase_weighted_counts.items(), key=lambda x: x[1], reverse=True)]\n\ndef get_context(tag: str, \n max_context_length: int, \n output_file: str, \n model_name:str, \n context_keywords_only: bool = True, \n additional_context: str = \"\",\n model_type: str = None, \n question: str = None, \n debug: bool = False):\n\n # additional_context = additional_context.replace(\"\\n\",'')\n\n if len(tag) < 1:\n if model_type == 'v1/chat/completions':\n\n context = [{\"role\": \"user\", \"content\": question}]\n\n if len(additional_context) > 0:\n # at this point we've added all the elements to context that we believe we should, so let's add any \n # additional context that we passed.\n remaining_tokens = max_context_length - len(question)\n context = [{\"role\": \"system\", \"content\": ' '.join(additional_context.split()[:remaining_tokens])}] + context\n\n\n if debug:\n click.echo(YELLOW + '-' * 25)\n click.echo(f'[debug]\\nmodel: {model_name}\\ntokens: {get_token_count(question, model_name)}\\nwords: {len(question.split()) }\\ntext: {question}') # debug - print the context to see what it looks like\n click.echo('-' * 25 + RESET)\n \n return context\n\n else:\n\n if len(additional_context) > 0:\n\n remaining_tokens = max_context_length - (len(question.split()))\n if remaining_tokens > 0:\n question = ' '.join(additional_context.split()[:remaining_tokens]) + \" \" + question\n\n\n if debug:\n click.echo(YELLOW + '-' * 25)\n click.echo(f'[debug]\\nmodel: {model_name}\\ntokens: {get_token_count(question, model_name)}\\nwords: {len(question.split())}\\ntext: {question}') # debug - print the context to see what it looks like\n click.echo('-' * 25 + RESET)\n\n return question\n\n with open(output_file, 'r') as f:\n text = f.read().strip().split('\\n')\n\n if model_type == 'v1/chat/completions':\n context = []\n\n for row in reversed(text):\n data = [item.strip() for item in row.split('|')]\n\n if (sum(len(item[\"content\"].split()) for item in context) + len(data[2].split()) + len(data[3].split()) + len(question.split())) > max_context_length:\n break\n\n if data[1] == tag:\n context = [{\"role\": \"assistant\", \"content\": data[3]}] + context\n context = [{\"role\": \"user\", \"content\": data[2]}] + context\n\n context.append({\"role\": \"user\", \"content\": question})\n \n if len(additional_context) > 0:\n # at this point we've added all the elements to context that we believe we should, so let's add any \n # additional context that we passed.\n remaining_tokens = max_context_length - (sum(len(item[\"content\"].split()) for item in context))\n context = [{\"role\": \"system\", \"content\": ' '.join(additional_context.split()[:remaining_tokens])}] + context\n\n if debug:\n token_count = \" \".join([x['content'] for x in context])\n click.echo(YELLOW + '-' * 25)\n click.echo(f'[debug]\\nmodel: {model_name}\\ntokens: {get_token_count(token_count, model_name)}\\nwords: {sum(len(item[\"content\"].split()) for item in context)}\\ntext: {context}') # debug - print the context to see what it looks like\n click.echo('-' * 25 + RESET)\n\n\n else:\n context = \"\"\n for row in text:\n data = [item.strip() for item in row.split('|')]\n\n if data[1] == tag:\n context += ' ' + data[2] + ' ' + data[3]\n\n if context_keywords_only:\n phrases = return_most_common_phrases(additional_context+context) # here we prepend the context with the additional_context string\n context = \"\" # maybe not the cleanest way to do this, but we are resetting the context here\n\n for phrase in phrases:\n if (len(context.split()) + len(phrase.split()) + len(question.split())) > max_context_length:\n break\n context += \" \" + phrase\n\n else:\n c = \"\"\n context_words = context.split()\n\n for i in range(len(context_words)):\n if (len(c.split()) + len(question.split())) >= max_context_length:\n break\n c += ' ' + context_words[i]\n\n context = c.strip()\n\n # prepend `context` with `additional_context` if we have any tokens remaining.\n # WARNING - this may create unexpected behavior, especially if a question is \n # contained within the additional context passed, that may provide seemingly \n # inexplicable responses.\n remaining_tokens = max_context_length - (len(context.split()) + len(question.split()))\n if remaining_tokens > 0:\n context = ' '.join(additional_context.split()[:remaining_tokens]) + \" \" + context\n\n\n context = context.strip() + ' ' + question\n \n\n if debug:\n click.echo(YELLOW + '-' * 25)\n click.echo(f'[debug]\\nmodel: {model_name}\\ntokens: {get_token_count(context, model_name)}\\nwords: {len(context.split())}\\ntext: {context}') # debug - print the context to see what it looks like\n click.echo('-' * 25 + RESET)\n\n return context", "repo_name": "tommybahamiboy/gptty", "sub_path": "gptty/context.py", "file_name": "context.py", "file_ext": "py", "file_size_in_byte": 7531, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "52", "api": [{"api_name": "tiktoken.encoding_for_model", "line_number": 29, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 36, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 40, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 40, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 51, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 57, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 92, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 93, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 94, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 108, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 109, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 110, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 140, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 141, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 142, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 186, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 187, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "70756062245", "text": "#! python3\n# -*- encoding: utf-8 -*-\n'''\n@File : train.py\n@Time : 2021/07/23 11:23:39\n@Author : SWS SUMMERWORKSHOP GROUP 9\n@Version : 1.0\n@Description : Bonus part of project 'Masked Unmasked Face Recognition'\n'''\n# Headers to be included:\nimport random\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nimport torchvision\nimport time\nfrom torch.optim.lr_scheduler import ExponentialLR\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\nif __name__ == \"__main__\":\n batch_size = 16\n photosize=128\n\n # 设置随机数种子\n setup_seed(1896)\n\n # 定义数据集处理方式\n transform=transforms.Compose([\n transforms.Resize(photosize), #缩放图片(Image),保持长宽比不变,最短边为128像素\n transforms.CenterCrop(photosize), #从图片中间裁剪出128*128的图片\n transforms.ToTensor(), #将图片Image转换成Tensor,归一化至【0,1】\n transforms.Normalize(mean=[.5,.5,.5],std=[.5,.5,.5]) #标准化至【-1,1】,规定均值和方差\n ])\n\n #导入cifar10数据集\n train_set=datasets.CIFAR10('data/',download=True, train=True, transform=transform)\n val_set=datasets.CIFAR10('data/',download=True, train=False, transform=transform)\n \n # train_set, val_set,leaveout= torch.utils.data.random_split(dataset= val_set, lengths=[2000, 500, 7500])\n train_loader=DataLoader(train_set,batch_size=batch_size,shuffle=True,pin_memory=True,num_workers=16)\n val_loader=DataLoader(val_set,batch_size=batch_size,shuffle=True,pin_memory=True,num_workers=16)\n print(\"Size of training data = {}\".format(len(train_set)))\n print(\"Size of validation data = {}\".format(len(val_set)))\n print('数据集导入完成')\n\n # 搭建模型 model,采用resnet18为主体结构;此处可改为alexnet、vgg等结构,相应地改变其最后全连接层即可。对alexnet、vgg为model.classifier[6]\n model = torchvision.models.resnet18(pretrained=True)\n num_fc = model.fc.out_features\n # 微调网络结构,将输出改为10维\n model.fc = nn.Sequential(model.fc, nn.ReLU(), nn.Dropout(0.4), nn.Linear(num_fc,10))\n # 开启全网络的梯度流运算\n for param in model.parameters(): param.requires_grad = True\n print('模型导入完成')\n\n model=model.cuda()\n # 采用GPU运行程序\n device = torch.device('cuda:0')\n print(device)\n print('GPU device count: ',torch.cuda.device_count())\n # 如果有多张显卡\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model,device_ids=[0,1,2])\n model=model.cuda()\n torch.backends.cudnn.benchmark = True\n\n start_time = time.time()\n\n # 构造损失函数 loss,采用交叉熵函数;\n # 构造优化器 optimizer;\n # 设定训练次数 num_epoch;\n loss = nn.CrossEntropyLoss()\n learning_rate = 0.0001\n optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) \n\n # 设定学习率指数衰减\n scheduler = ExponentialLR(optimizer, gamma=0.9)\n print(\"初始化的学习率:\", optimizer.defaults['lr'])\n num_epoch = 1\n val_acc_best = 0.0\n\n # 训练 并print每个epoch的结果;\n for epoch in range(num_epoch):\n print(\"第%d个epoch的学习率:%f\" % (epoch+1, optimizer.param_groups[0]['lr']))\n\n epoch_start_time = time.time()\n train_acc = 0.0\n train_loss = 0.0\n val_acc = 0.0\n val_loss = 0.0\n\n model.train() \n for i, data in enumerate(train_loader):\n optimizer.zero_grad() # 用 optimizer 将 model 参数的 gradient 归零\n train_pred = model(data[0].to(device)) # 调用 model 的 forward 函數\n batch_loss = loss(train_pred, data[1].to(device)) # 计算 loss\n batch_loss.backward() # 利用 back propagation 算出每个参数的 gradient\n optimizer.step() # 以 optimizer 用 gradient 更新参数值\n\n train_acc += np.sum(\n np.argmax(train_pred.cpu().data.numpy(), axis=1) ==\n data[1].numpy())\n train_loss += batch_loss.item()\n break\n\n model.eval()\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n val_pred = model(data[0].to(device))\n print('11111111111111111111')\n print(val_pred)\n print(data[1])\n batch_loss = loss(val_pred, data[1].to(device))\n break\n\n val_acc += np.sum(\n np.argmax(val_pred.cpu().data.numpy(), axis=1) ==\n data[1].numpy())\n val_loss += batch_loss.item()\n\n train_acc /= train_set.__len__()\n train_loss /= train_set.__len__()\n val_acc /= val_set.__len__()\n val_loss /= val_set.__len__()\n\n # 将结果 print 出来\n print(\n '[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f | Val Acc: %3.6f loss: %3.6f'\n % (epoch + 1, num_epoch, time.time() - epoch_start_time, train_acc,\n train_loss, val_acc, val_loss))\n\n # 记录最好的结果 并保存模型\n if val_acc > val_acc_best:\n val_acc_best = val_acc\n torch.save(model.state_dict(), 'resnet18_model_best.pth.tar')\n print('Save model')\n\n # 学习率指数衰��一次,衰减因子为0.9\n scheduler.step()\n\n print('Best accuracy on validation set: %3.6f' % val_acc_best)\n endtime=time.time()\n print('模型训练总用时:',endtime-start_time,'秒')", "repo_name": "Daniel-ChenJH/Masked-Unmasked-Face-Recognition", "sub_path": "bonus part/resnet_bonus.py", "file_name": "resnet_bonus.py", "file_ext": "py", "file_size_in_byte": 5835, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.manual_seed", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 29, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 40, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 40, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 41, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 41, "usage_type": "name"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 42, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 42, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 43, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 43, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 44, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 44, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 48, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 48, "usage_type": "name"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 49, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.models.resnet18", "line_number": 59, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn.Sequential", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.Dropout", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 71, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 73, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.backends", "line_number": 76, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.ExponentialLR", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 128, "usage_type": "call"}, {"api_name": "time.time", "line_number": 140, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 146, "usage_type": "call"}, {"api_name": "time.time", "line_number": 153, "usage_type": "call"}]} +{"seq_id": "13134308178", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nQuery the Kubikat service for metadata.\n\"\"\"\nimport bs4\nimport re\nimport urllib\n\nfrom common.config import config\nfrom common.logger import log\nfrom common.misc import fields, fill_with_none\n\n_KUBIKAT_SERVICE = 'https://aleph.mpg.de/F/'\n_KUBIKAT_FILTERS = '&adjacent=N&filter_code_1=WSP&filter_request_1=&filter_code_2=WYR&filter_request_2=&filter_code_3' \\\n '=WYR&filter_request_3=&filter_code_4=WEF&filter_request_4=&local_base=KUB01&filter_code_7=WEM' \\\n '&filter_code_8=WAK&con_lng=eng '\nSERVICE_URL = _KUBIKAT_SERVICE + '?func=find-b&find_code=WRD&request={title}' + _KUBIKAT_FILTERS\n\n\ndef book_parser(data):\n \"\"\"\n Parses the html page of a title and returns all the relevant information\n\n :param: html data\n\n :return: all relevant info, if found, empty dict if an error occurs\n\n :rtype: a dict\n \"\"\"\n book_data = {}\n\n soup = bs4.BeautifulSoup(data, features='html.parser')\n\n try:\n results = soup.find(id=\"Person(s)-tr\").parent\n\n # Extract title\n book_data[fields.TITLE] = str(results.find('tr', id=\"Title-tr\").find('a').text)\n\n # Extract subtitle if exists\n if results.find('tr', id=\"Remainder of title-tr\"):\n book_data[fields.TITLE] += str(\" \") + str(\n results.find('tr', id=\"Remainder of title-tr\").find('span').text.replace('\\\\n', ''))\n\n log.debug('{name}: {value}'.format(name=fields.TITLE, value=book_data[fields.TITLE]))\n\n # Extract list of author(s)\n raw = results.find('tr', id=\"Person(s)-tr\")\n names = list()\n names.append(str(raw.find('a').string))\n siblings = raw.find_next_siblings('tr')\n for sibling in siblings:\n if sibling['id'] != '-tr':\n break\n names.append(str(sibling.find('a').string))\n\n book_data[fields.AUTHORS] = names\n log.debug('{name}: {value}'.format(name=fields.AUTHORS, value=book_data[fields.AUTHORS]))\n\n # Extract publisher and year\n raw = results.find('tr', id='Publication-tr')\n if raw:\n book_data[fields.PUBLISHER] = str(raw.find('span').string)\n log.debug('{name}: {value}'.format(name=fields.PUBLISHER, value=book_data[fields.PUBLISHER]))\n\n raw = results.find('tr', id='Responsibility-tr')\n if raw:\n book_data[fields.RESPONSIBILITY] = str(raw.find('span').string)\n log.debug('{name}: {value}'.format(name=fields.RESPONSIBILITY, value=book_data[fields.RESPONSIBILITY]))\n\n raw = results.find('tr', id='ISBN-tr')\n if raw:\n raw = raw.find('span').string.split(\"(\")\n book_data[fields.ISBN] = raw[0]\n log.debug('{name}: {value}'.format(name=fields.ISBN, value=book_data[fields.ISBN]))\n\n raw = results.find('tr', id='External File')\n if raw:\n for td in raw.find_all('td'):\n if td.find('span').find('a'):\n raw = td.find('span').find('a', href=True)['href']\n href = re.findall(r'\\\"(.+?)\\\"', raw)[0]\n import ssl\n context = ssl._create_unverified_context()\n contents = urllib.request.urlopen(href, context=context).read()\n contents = bs4.BeautifulSoup(contents, features='html.parser')\n contents = urllib.request.urlopen(\n urllib.request.Request(\n contents.find('body')['onload'].split('\\'')[1],\n headers={\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n })).read()\n contents = bs4.BeautifulSoup(contents, features='html.parser')\n book_data[fields.CONTENT] = str(contents.find('body'))\n log.debug('{name}: {value}'.format(name=fields.CONTENT, value=book_data[fields.CONTENT]))\n\n\n except (AttributeError, KeyError) as ex:\n log.error('Error parsing book page html in Kubikat. {ex}'.format(ex=ex))\n book_data = {}\n\n book_data[fields.RETRIEVED_WITH] = config.KUBIKAT_NAME\n return fill_with_none(book_data)\n\n\ndef results_parser(data, max_results):\n \"\"\"\n Parses html data retrieved and returns a list of all the relevant hrefs\n \"\"\"\n records = list()\n\n soup = bs4.BeautifulSoup(data, features='html.parser')\n\n try:\n result = soup.find_all('tr', {\"valign\": \"baseline\"})\n\n for i in range(min(max_results, len(result))):\n records.append(result[i].find('td').find('a', href=True)['href'])\n\n except (AttributeError, KeyError) as ex:\n log.error('Error parsing results html in Kubikat. {ex}'.format(ex=ex))\n records = []\n\n return records\n", "repo_name": "biblhertz/referency", "sub_path": "common/kubikat/kubikat.py", "file_name": "kubikat.py", "file_ext": "py", "file_size_in_byte": 4858, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 32, "usage_type": "call"}, {"api_name": "common.misc.fields.TITLE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 38, "usage_type": "name"}, {"api_name": "common.misc.fields.TITLE", "line_number": 42, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 42, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 45, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 45, "usage_type": "name"}, {"api_name": "common.misc.fields.TITLE", "line_number": 45, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 45, "usage_type": "name"}, {"api_name": "common.misc.fields.AUTHORS", "line_number": 57, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 57, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 58, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 58, "usage_type": "name"}, {"api_name": "common.misc.fields.AUTHORS", "line_number": 58, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 58, "usage_type": "name"}, {"api_name": "common.misc.fields.PUBLISHER", "line_number": 63, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 63, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 64, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 64, "usage_type": "name"}, {"api_name": "common.misc.fields.PUBLISHER", "line_number": 64, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 64, "usage_type": "name"}, {"api_name": "common.misc.fields.RESPONSIBILITY", "line_number": 68, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 68, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 69, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 69, "usage_type": "name"}, {"api_name": "common.misc.fields.RESPONSIBILITY", "line_number": 69, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 69, "usage_type": "name"}, {"api_name": "common.misc.fields.ISBN", "line_number": 74, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 74, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 75, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 75, "usage_type": "name"}, {"api_name": "common.misc.fields.ISBN", "line_number": 75, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 75, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 82, "usage_type": "call"}, {"api_name": "ssl._create_unverified_context", "line_number": 84, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 85, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 85, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 86, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 87, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 87, "usage_type": "attribute"}, {"api_name": "urllib.request.Request", "line_number": 88, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 88, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 93, "usage_type": "call"}, {"api_name": "common.misc.fields.CONTENT", "line_number": 94, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 94, "usage_type": "name"}, {"api_name": "common.logger.log.debug", "line_number": 95, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 95, "usage_type": "name"}, {"api_name": "common.misc.fields.CONTENT", "line_number": 95, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 95, "usage_type": "name"}, {"api_name": "common.logger.log.error", "line_number": 99, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 99, "usage_type": "name"}, {"api_name": "common.misc.fields.RETRIEVED_WITH", "line_number": 102, "usage_type": "attribute"}, {"api_name": "common.misc.fields", "line_number": 102, "usage_type": "name"}, {"api_name": "common.config.config.KUBIKAT_NAME", "line_number": 102, "usage_type": "attribute"}, {"api_name": "common.config.config", "line_number": 102, "usage_type": "name"}, {"api_name": "common.misc.fill_with_none", "line_number": 103, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 112, "usage_type": "call"}, {"api_name": "common.logger.log.error", "line_number": 121, "usage_type": "call"}, {"api_name": "common.logger.log", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "42789906607", "text": "from sys import argv\nfrom itertools import product, zip_longest, permutations\nfrom collections import Counter, defaultdict\nfrom operator import mul\nfrom functools import reduce as r\n\nimport numpy as np\n\n\ndef read_input(input_file):\n with open(input_file, \"r\") as ifile:\n lines = ifile.readlines()\n return [line.strip() for line in lines]\n\n\n# 1st star\n\n\ndef encode(line):\n nodes = []\n current = []\n i = 0\n while i < len(line):\n if line[i] == \"[\":\n current.append(0)\n try:\n number = int(line[i])\n nodes.append(current + [number])\n current = current[:-1] + [1]\n except ValueError:\n pass\n if line[i] == \"]\":\n current = current[:-2] + [1]\n if line[i] == \",\":\n pass\n i += 1\n return nodes\n\n\ndef explode(encoded_):\n encoded = encoded_.copy()\n for i, node in enumerate(encoded):\n if len(node) >= 6 and all([num == 0 or num == 1 for num in node[-6:-2]]):\n if i > 0:\n encoded[i - 1][-1] += node[-1]\n if i + 1 < len(encoded) - 1:\n encoded[i + 2][-1] += encoded[i + 1][-1]\n new_node = node.copy()\n encoded.remove(node)\n encoded.remove(encoded[i])\n encoded.insert(i, new_node[:-2] + [0])\n return encoded\n return encoded\n\n\ndef split(encoded_):\n encoded = encoded_.copy()\n for i, node in enumerate(encoded):\n number = node[-1]\n if node[-1] >= 10:\n pair1 = int(np.floor(number / 2))\n pair2 = int(np.ceil(number / 2))\n new_node1 = node[:-1] + [0, pair1]\n new_node2 = node[:-1] + [1, pair2]\n encoded.remove(node)\n encoded.insert(i, new_node2)\n encoded.insert(i, new_node1)\n return encoded\n return encoded\n\n\ndef add(encoded1, encoded2):\n new_encoded = []\n for node in encoded1:\n new_encoded.append([0] + node)\n for node in encoded2:\n new_encoded.append([1] + node)\n return new_encoded\n\n\ndef reduce(encoded_):\n encoded = encoded_.copy()\n old_encoded = []\n while old_encoded != encoded:\n while old_encoded != encoded:\n old_encoded = encoded\n encoded = explode(encoded.copy())\n old_encoded = encoded\n encoded = split(encoded.copy())\n return encoded\n\n\ndef addred(encoded1, encoded2):\n return reduce(add(encoded1, encoded2))\n\n\ndef calc_magnitude(encoded_):\n mags = encoded_.copy()\n old_mags = []\n while old_mags != mags:\n old_mags = mags\n mags = step_magnitude(mags)\n return mags[0][0]\n\n\ndef step_magnitude(encoded):\n mags = encoded.copy()\n for i, node in enumerate(encoded):\n try:\n if len(node) == len(encoded[i + 1]) and node[:-2] == encoded[i + 1][:-2]:\n mag = node[:-2] + [3 * node[-1] + 2 * encoded[i + 1][-1]]\n mags.remove(encoded[i])\n mags.remove(encoded[i + 1])\n mags.insert(i, mag)\n except IndexError:\n pass\n return sorted(mags)\n\n\ndef homework(encoded):\n added = r(addred, encoded)\n return calc_magnitude(added)\n\n\n# 2nd star\n\n\ndef homework2(encoded):\n perms = permutations(encoded, 2)\n mags = [calc_magnitude(addred(*perm)) for perm in perms]\n return max(mags)\n\n\ndef main():\n input_type = argv[-1] + \".txt\"\n lines = read_input(input_type)\n encoded = [encode(line) for line in lines]\n result = homework(encoded)\n # First puzzle\n print(\"First puzzle: {}\".format(result))\n # Second puzzle\n result = homework2(encoded)\n print(\"Second puzzle: {}\".format(result))\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "manolomartinez/advent_of_code", "sub_path": "2021/Day_18/day18.py", "file_name": "day18.py", "file_ext": "py", "file_size_in_byte": 3720, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.floor", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 62, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 121, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 129, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 135, "usage_type": "name"}]} +{"seq_id": "27624273030", "text": "\"\"\"Plotting functions.\"\"\"\nfrom scipy.stats import norm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator, FuncFormatter\nfrom ProcessOptimizer.space import Categorical, Integer\nfrom ProcessOptimizer import expected_minimum\nfrom ProcessOptimizer.plots import partial, dependence, _cat_format, _map_categories\n\n\ndef plot_brownie_bee(\n result,\n n_points=40,\n n_samples=250,\n size=2,\n max_quality=5,\n):\n \"\"\"Single factor dependence plot of the model intended for use with the\n Brownie Bee user interface.\n\n Each plot shows how quality depends on the dimension `i` when all other\n factor values are locked to those of the expected minimum. A vertical line\n indicates the location of the expected minimum for each factor.\n\n Parameters\n ----------\n * `result` [`OptimizeResult`]\n The result for which to create the plots.\n\n * `n_points` [int, default=40]\n Number of points at which to evaluate the partial dependence\n along each dimension.\n\n * `n_samples` [int, default=250]\n Number of random samples to use for averaging the model function\n at each of the `n_points`.\n\n * `size` [float, default=2]\n Height (in inches) of each returned figure.\n\n * `max_quality` [int, default=5]\n The maximal quality obtainable in the setup of Brownie Bee. Quality is\n assumed to be measured on a scale from 0 to this number, and the y-axis\n of each plot is scaled to reflect this.\n\n Returns\n -------\n * `plot_list`: [`Figures`]:\n A list of individual matplotlib figure handles, one for each dimension\n present in 'result' and a last one representing a histogram of samples\n drawn at the expected minimum.\n \"\"\"\n\n space = result.space\n # Check if we have any categorical dimensions, as this influences the plots\n is_cat = [isinstance(dim, Categorical) for dim in space.dimensions]\n # Check if we have any integer dimensions, as this influences the plots\n is_int = [isinstance(dim, Integer) for dim in space.dimensions]\n # Identify the location of the expected minimum, and its mean and std\n x_eval, [res_mean, res_std] = expected_minimum(\n result,\n n_random_starts=20,\n random_state=None,\n return_std=True,\n )\n\n rvs_transformed = space.transform(space.rvs(n_samples=n_samples))\n _, minimum, _ = _map_categories(space, result.x_iters, x_eval)\n\n # Gather all data relevant for plotting\n plots_data = []\n for i in range(space.n_dims):\n row = []\n xi, yi, stddevs = dependence(\n space,\n result.models[-1],\n i,\n j=None,\n sample_points=rvs_transformed,\n n_points=n_points,\n x_eval=x_eval,\n )\n row.append({\"xi\": xi, \"yi\": yi, \"std\": stddevs})\n\n plots_data.append(row)\n\n # Create the list to store figure handles\n figure_list = []\n\n # Build all the plots in the figure\n for n in range(space.n_dims):\n # Prepare a figure\n fig, ax_ = plt.subplots(\n figsize=(size, size),\n dpi=200,\n )\n # Set the padding\n fig.subplots_adjust(\n left=0.12, right=0.93, bottom=0.2, top=0.95, hspace=0.0, wspace=0.0\n )\n\n # Get data to plot in this subplot\n xi = plots_data[n][0][\"xi\"]\n yi = plots_data[n][0][\"yi\"]\n stddevs = plots_data[n][0][\"std\"]\n\n # Set y-axis limits\n ax_.set_ylim(0, max_quality)\n\n # Enter here when we plot a categoric factor\n if is_cat[n]:\n # Expand the x-axis for this factor so we can see the first\n # and the last category\n ax_.set_xlim(np.min(xi) - 0.2, np.max(xi) + 0.2)\n\n # Create one uniformly colored bar for each category.\n # Edgecolor ensures we can see the bar when plotting\n # at best obeservation, as stddev is often tiny there\n ax_.bar(\n xi,\n 2 * 1.96 * stddevs,\n width=0.2,\n bottom=(-yi - 1.96 * stddevs),\n alpha=0.5,\n color=\"green\",\n edgecolor=\"green\",\n )\n [labl.set_fontsize(6) for labl in ax_.get_xticklabels()]\n\n # For non-categoric factors\n else:\n ax_.set_xlim(np.min(xi), np.max(xi))\n # Show the uncertainty\n ax_.fill_between(\n xi,\n y1=-(yi - 1.96 * stddevs),\n y2=-(yi + 1.96 * stddevs),\n alpha=0.5,\n color=\"green\",\n edgecolor=\"green\",\n linewidth=0.0,\n )\n\n # Highlight the expected minimum\n ax_.axvline(minimum[n], linestyle=\"--\", color=\"r\", lw=2, zorder=6)\n # Fix formatting of the y-axis with ticks from 0 to our max quality\n ax_.yaxis.set_major_locator(MaxNLocator(5, integer=True))\n ax_.tick_params(axis=\"y\", direction=\"inout\")\n\n if space.dimensions[n].prior == \"log-uniform\":\n ax_.set_xscale(\"log\")\n else:\n ax_.xaxis.set_major_locator(\n MaxNLocator(4, prune=None, integer=(is_cat[n] | is_int[n]))\n )\n if is_cat[n]:\n # Axes for categorical dimensions are really integers;\n # we have to label them with the category names\n ax_.xaxis.set_major_formatter(\n FuncFormatter(partial(_cat_format, space.dimensions[n]))\n )\n\n # Add the figure to the output list\n figure_list.append(fig)\n\n # Prepare a figure for a histogram of expected quality\n fig, ax_ = plt.subplots(\n figsize=(size, size),\n dpi=200,\n )\n # Set the padding\n fig.subplots_adjust(\n left=0.05, right=0.95, bottom=0.2, top=0.95, hspace=0.0, wspace=0.0\n )\n # Plot in the interval between 0 and our max quality\n xi = np.linspace(0, max_quality, 250)\n # Create histogram y-values\n yi = norm.pdf(xi, -res_mean, res_std)\n # Build the plot\n ax_.fill_between(\n xi,\n y1=np.zeros((len(xi),)),\n y2=yi,\n alpha=0.5,\n color=\"blue\",\n edgecolor=\"blue\",\n linewidth=0.0,\n )\n # Cosmetics\n ax_.get_yaxis().set_visible(False)\n ax_.set_ylim(0, max(yi) * 1.05)\n # Fix formatting of the x-axis with ticks from 0 to our max quality\n ax_.set_xlim(0, max_quality)\n ax_.xaxis.set_major_locator(MaxNLocator(5, prune=None, integer=True))\n\n # Add the figure to the output list\n figure_list.append(fig)\n\n return figure_list\n\n\ndef _cat_format(dimension, x, _):\n \"\"\"Categorical axis tick formatter function. Returns the name of category\n `x` in `dimension`. Used with `matplotlib.ticker.FuncFormatter`.\"\"\"\n x = min(max(int(x), 0), len(dimension.categories) - 1)\n label = str(dimension.categories[x])\n # If longer than 10 characters, try to break on spaces\n if len(label) > 10:\n if \" \" in label:\n # Break label at a space near the middle\n spaces = [i for i in range(len(label)) if label[i] == \" \"]\n middle_space = spaces[len(spaces) // 2]\n label = label[:middle_space] + \"\\n\" + label[middle_space + 1 :]\n else:\n # If no spaces, abbreviate to first 7 characters\n label = label[:7] + \"...\"\n return label\n", "repo_name": "BoostV/process-optimizer-api", "sub_path": "optimizerapi/plot_patch.py", "file_name": "plot_patch.py", "file_ext": "py", "file_size_in_byte": 7430, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "52", "api": [{"api_name": "ProcessOptimizer.space.Categorical", "line_number": 56, "usage_type": "argument"}, {"api_name": "ProcessOptimizer.space.Integer", "line_number": 58, "usage_type": "argument"}, {"api_name": "ProcessOptimizer.expected_minimum", "line_number": 60, "usage_type": "call"}, {"api_name": "ProcessOptimizer.plots._map_categories", "line_number": 68, "usage_type": "call"}, {"api_name": "ProcessOptimizer.plots.dependence", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 160, "usage_type": "call"}, {"api_name": "ProcessOptimizer.plots.partial", "line_number": 160, "usage_type": "call"}, {"api_name": "ProcessOptimizer.plots._cat_format", "line_number": 160, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 176, "usage_type": "call"}, {"api_name": "scipy.stats.norm.pdf", "line_number": 178, "usage_type": "call"}, {"api_name": "scipy.stats.norm", "line_number": 178, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "11842290506", "text": "from Paciente import Paciente\r\nfrom tabulate import tabulate\r\nfrom prettytable import PrettyTable\r\nimport matplotlib.pyplot as plt\r\n#plt.switch_backend('agg')\r\n\r\n\r\nclass Catalogo:\r\n def __init__(self,csv):\r\n self.pacientes = []\r\n self.parsing(csv)\r\n self.doenca_por_sexo = {\"M\": 0, \"F\": 0}\r\n self.calcula_doenca_por_sexo()\r\n self.doenca_por_idade = {}\r\n self.calcula_doenca_idade()\r\n self.doenca_por_colestrol = {}\r\n self.calcula_doenca_por_colesterol()\r\n\r\n def parsing(self, csv):\r\n with open(csv, 'r', encoding='utf8') as registos:\r\n next(registos)\r\n for linha in registos:\r\n campos = linha.strip().split(',')\r\n paciente = Paciente(*campos) # para desempacotar os campos\r\n self.pacientes.append(paciente) # adicionar o paciente a lista\r\n\r\n def calcula_doenca_por_sexo(self):\r\n for paciente in self.pacientes:\r\n if paciente.sexo == 'M' and paciente.tem_doenca == 1:\r\n self.doenca_por_sexo[paciente.sexo] += 1\r\n elif paciente.sexo == 'F' and paciente.tem_doenca == 1:\r\n self.doenca_por_sexo[paciente.sexo] += 1\r\n\r\n def max_idade(self):\r\n max_idade = 0\r\n for paciente in self.pacientes:\r\n if paciente.idade > max_idade:\r\n max_idade = paciente.idade\r\n return max_idade\r\n\r\n def calcula_doenca_idade(self):\r\n\r\n max_idade = self.max_idade()\r\n\r\n for idade in range(30, max_idade+1, 5): # mais 1 para incluir o max_idade, 5 porque é onde começa o proximo\r\n\r\n self.doenca_por_idade[f\"{idade}-{idade+4}\"] = 0\r\n\r\n\r\n for paciente in self.pacientes:\r\n if paciente.tem_doenca and paciente.idade >= 30:\r\n for faixa_etaria in self.doenca_por_idade:\r\n faixa_etaria_inferior, faixa_etaria_superior = map(int, faixa_etaria.split('-'))\r\n if faixa_etaria_inferior <= paciente.idade <= faixa_etaria_superior:\r\n self.doenca_por_idade[faixa_etaria] += 1\r\n break\r\n\r\n def calcula_doenca_por_colesterol(self):\r\n\r\n limites_colesterol = [paciente.colesterol for paciente in self.pacientes]\r\n\r\n min_colesterol = min(limites_colesterol)\r\n max_colesterol = max(limites_colesterol)\r\n\r\n\r\n for i in range(min_colesterol, max_colesterol+10, 10): # +10 para contar com o ultimo\r\n self.doenca_por_colestrol[f\"{i}-{i+9}\"] = 0\r\n\r\n for paciente in self.pacientes:\r\n for nivel_colesterol, value in self.doenca_por_colestrol.items():\r\n colesterol_inferior, colesterol_superior = map(int, nivel_colesterol.split('-'))\r\n if colesterol_inferior <= paciente.colesterol <= colesterol_superior and paciente.tem_doenca:\r\n self.doenca_por_colestrol[nivel_colesterol] += 1\r\n break\r\n\r\n #PROMPT\r\n\r\n def mostrar_pacientes(self):\r\n for paciente in self.pacientes:\r\n print(f\"Idade: {paciente.idade}, Sexo: {paciente.sexo}, Tensão: {paciente.tensao}, Colesterol: {paciente.colesterol}, Batimento: {paciente.batimento}, Tem doença: {'Sim' if paciente.tem_doenca else 'Não'}\")\r\n\r\n def mostrar_doencas_por_sexo(self):\r\n for sexo, doencas in self.doenca_por_sexo.items():\r\n print(f\"Sexo {sexo}: {doencas} com doença\")\r\n\r\n def mostrar_doencas_por_idade(self):\r\n\r\n for faixa_etaria, num_pacientes in self.doenca_por_idade.items():\r\n print(f\"Faixa etária {faixa_etaria}: {num_pacientes} com doença\")\r\n\r\n def mostrar_doencas_por_colesterol(self):\r\n\r\n for nivel_de_colesterol, num_pacientes in self.doenca_por_colestrol.items():\r\n print(f\"Nivel de colesterol {nivel_de_colesterol}: {num_pacientes} com doenca\")\r\n\r\n #TABELAS\r\n\r\n def mostrar_doencas_por_sexo_tabela(self):\r\n table = PrettyTable()\r\n table.field_names = [\"Sexo\", \"Doenças\"]\r\n\r\n for sexo, doencas in self.doenca_por_sexo.items():\r\n table.add_row([sexo, doencas])\r\n\r\n print(table)\r\n\r\n def mostrar_doencas_por_idade_tabela(self):\r\n headers = [\"Faixa Etária\", \"Número de Doenças\"]\r\n table = []\r\n\r\n for faixa_etaria, num_pacientes in self.doenca_por_idade.items():\r\n table.append([faixa_etaria, num_pacientes])\r\n\r\n print(tabulate(table, headers=headers, tablefmt=\"fancy_grid\"))\r\n #print(tabulate(tabela, headers=[\"Faixa Etária\", \"Número de Pacientes com Doença\"], tablefmt=\"fancy_grid\"))\r\n\r\n def mostrar_doencas_por_colesterol_tabela(self):\r\n \r\n tabela = PrettyTable()\r\n tabela.field_names = [\"Nível de Colesterol\", \"Número de Pacientes\"]\r\n\r\n for nivel_de_colesterol, num_pacientes in self.doenca_por_colestrol.items():\r\n tabela.add_row([nivel_de_colesterol, num_pacientes])\r\n\r\n print(tabela)\r\n\r\n # GRAFICOS\r\n\r\n def mostrar_doencas_por_sexo_grafico(self):\r\n \r\n sexos = list(self.doenca_por_sexo.keys())\r\n num_doencas_por_sexo = list(self.doenca_por_sexo.values())\r\n\r\n plt.bar(sexos, num_doencas_por_sexo)\r\n plt.title(\"Distribuição de Doenças por Sexo\")\r\n plt.xlabel(\"Sexo\")\r\n plt.ylabel(\"Número de Pacientes com Doença\")\r\n\r\n plt.show()\r\n \r\n def mostrar_doencas_por_idade_grafico(self):\r\n \r\n grupo_etario = list(self.doenca_por_idade.keys())\r\n num_pacientes_por_grupo_etario = list(self.doenca_por_idade.values())\r\n\r\n plt.bar(grupo_etario, num_pacientes_por_grupo_etario)\r\n plt.title(\"Distribuição de Doenças por Grupo Etário\")\r\n plt.xlabel(\"Grupo Etário\")\r\n plt.ylabel(\"Número de Pacientes com Doença\")\r\n\r\n plt.show()\r\n \r\n def mostrar_doencas_por_colesterol_grafico(self):\r\n\r\n niveis_colesterol = list(self.doenca_por_colestrol.keys())\r\n num_pacientes_por_nivel = list(self.doenca_por_colestrol.values())\r\n\r\n plt.bar(niveis_colesterol, num_pacientes_por_nivel)\r\n plt.title(\"Distribui��ão de Pacientes por Nível de Colesterol\")\r\n plt.xlabel(\"Nível de Colesterol\")\r\n plt.ylabel(\"Número de Pacientes com Doença\")\r\n\r\n plt.show()\r\n\r\n", "repo_name": "joaofarialeite/PL2023", "sub_path": "TPC1/Catalogo.py", "file_name": "Catalogo.py", "file_ext": "py", "file_size_in_byte": 6296, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Paciente.Paciente", "line_number": 24, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 99, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 114, "usage_type": "call"}, {"api_name": "prettytable.PrettyTable", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 149, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}]} +{"seq_id": "15181667264", "text": "import pandas as pd\r\nimport streamlit as st\r\nimport numpy as np\r\nfrom matplotlib import image\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport os\r\n\r\nst.title('Flipkart Laptop EDA')\r\n\r\n# absolute path to this file\r\nFILE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n# absolute path to this file's root directory\r\nPARENT_DIR = os.path.join(FILE_DIR, os.pardir)\r\n# absolute path of directory_of_interest\r\ndir_of_interest = os.path.join(PARENT_DIR, \"resources\")\r\n\r\nIMAGE_PATH = os.path.join(dir_of_interest, \"images\", \"flipkart.png\")\r\n\r\nDATA_PATH = os.path.join(dir_of_interest, \"data\", \"laptop.csv\")\r\n\r\nimg = image.imread(IMAGE_PATH)\r\nst.image(img)\r\n\r\ndata = pd.read_csv(DATA_PATH)\r\nst.dataframe(data)\r\n\r\nst.header('Columns of Data Frame')\r\nst.write(data.columns)\r\nst.header('Summary')\r\nst.write(data.describe())\r\n\r\nst.header('Types Of Processors')\r\nst.bar_chart(data['CPU Processor'].value_counts())\r\n\r\nst.header('Types Of Brand')\r\nst.bar_chart(data['Brand'].value_counts())\r\n\r\nst.header('Types Of Processor Brands')\r\nst.bar_chart(data['CPU Brand'].value_counts())\r\n\r\nst.header('Relationship between Storage Capacity and MRP')\r\n\r\n# create the bar plot\r\nfig, ax = plt.subplots()\r\nsns.barplot(x=data['Storage_Capacity'], y=data['MRP'], ax=ax)\r\nplt.xticks(rotation='vertical')\r\n\r\n# display the plot in Streamlit\r\nst.pyplot(fig)\r\n\r\nst.header('Relationship between Storage Type and MRP')\r\n\r\n# create the bar plot\r\nfig, ax = plt.subplots()\r\nsns.barplot(x=data['Storage_Type'], y=data['MRP'], ax=ax)\r\nplt.xticks(rotation='vertical')\r\n\r\n# display the plot in Streamlit\r\nst.pyplot(fig)\r\n\r\nst.header('Relationship between RAM Type and MRP')\r\n\r\n# create the bar plot\r\nfig, ax = plt.subplots()\r\nsns.barplot(x=data['RAM Type'], y=data['MRP'], ax=ax)\r\nplt.xticks(rotation='vertical')\r\n\r\n# display the plot in Streamlit\r\nst.pyplot(fig)\r\n\r\nst.header('Relationship between RAM Capacity and MRP')\r\n\r\n# create the bar plot\r\nfig, ax = plt.subplots()\r\nsns.barplot(x=data['RAM Capacity'], y=data['MRP'], ax=ax)\r\nplt.xticks(rotation='vertical')\r\n\r\n# display the plot in Streamlit\r\nst.pyplot(fig)\r\n", "repo_name": "pavan-stark/Laptop-price-prediction", "sub_path": "pages/Data_Visualizations.py", "file_name": "Data_Visualizations.py", "file_ext": "py", "file_size_in_byte": 2136, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "streamlit.title", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.image.imread", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 24, "usage_type": "name"}, {"api_name": "streamlit.image", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 30, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 31, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 32, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 33, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 35, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 38, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 39, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 41, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 52, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 72, "usage_type": "call"}, {"api_name": "streamlit.header", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "streamlit.pyplot", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "29014309099", "text": "import argparse, os, json, random\nfrom collections import defaultdict\n\nimport logic\n\nargParser = argparse.ArgumentParser()\nargParser.add_argument('--seed', type=str, help=\"The seed (If not present, one will be generated.)\")\nargParser.add_argument('--spoiler', help=\"Location to dump the spoiler file.\")\nargParser.add_argument('--output', help=\"Location for output. If not set, output will not be generated.\", default=\"final.lua\")\nargParser.add_argument('--seedfile', help=\"Dump the seed in a human-readable way to this file.\", default=\"seed.txt\")\nargParser.add_argument('--hubsroot', type=str, help=\"Root directory of the hubs files\", default=\"hubs\")\n\nclass WarpMapEntry:\n def __init__(self, fromWarpID, fromMapNum, fromMapGroup, fromGame, toWarpID, toMapNum, toMapGroup, toGame, \n fromOurX = None, fromOurY = None, fromOurMapNum=None, varIndex=None, varSet=None):\n self.fromWarpID = fromWarpID\n self.fromMapNum = fromMapNum\n self.fromMapGroup = fromMapGroup\n self.fromGame = fromGame\n self.fromOurX = fromOurX\n self.fromOurY = fromOurY\n self.fromOurMapNum = fromOurMapNum\n self.toWarpID = toWarpID\n self.toMapNum = toMapNum\n self.toMapGroup = toMapGroup\n self.toGame = toGame\n self.varIndex = varIndex\n self.varSet = varSet\n\n def toString(self):\n ourXYString = \"\"\n if self.fromOurX:\n ourXYString = '[\"posX\"] = %i,[\"posY\"] = %i, [\"originMapNum\"] = %i,' % (self.fromOurX, self.fromOurY, self.fromOurMapNum)\n varSetString = \"\"\n if self.varIndex:\n varSetString = '[\"varIndex\"] = \"%s\", [\"varSet\"] = %i,' % (self.varIndex, self.varSet)\n return \"\"\"{\nfrom = { [\"warpId\"] = %i,[\"mapNum\"] = %i,[\"y\"] = 65535,[\"mapGroup\"] = %i,[\"x\"] = 65535, %s},\nto = { [\"warpId\"] = %i,[\"mapNum\"] = %i,[\"y\"] = 65535,[\"game\"] = \"%s\",[\"mapGroup\"] = %i,[\"x\"] = 65535, %s} \n},\n\"\"\" % (self.fromWarpID, self.fromMapNum, self.fromMapGroup, ourXYString, self.toWarpID, self.toMapNum, self.toGame, self.toMapGroup, varSetString)\n\nallHubs = []\ndeadEndCounts = defaultdict(lambda: 0)\ndef handleHub(hub, fileName):\n allHubs.append(hub)\n if len(hub[\"warps\"]) == 1 and not \"always_available\" in hub:\n deadEndCounts[fileName] = deadEndCounts[fileName] + 1\n\n\"\"\"\nWarpMap = {\n {\n from = { [\"warpId\"] = 0,[\"mapNum\"] = 1,[\"y\"] = 65535,[\"game\"] = \"POKEMON EMER\",[\"mapGroup\"] = 2,[\"x\"] = 65535,},\n to = { [\"warpId\"] = 1,[\"mapNum\"] = 1,[\"y\"] = 65535,[\"game\"] = \"POKEMON FIRE\",[\"mapGroup\"] = 3,[\"x\"] = 65535,} \n },\n {\n from = { [\"warpId\"] = 1,[\"mapNum\"] = 0,[\"y\"] = 65535,[\"game\"] = \"POKEMON FIRE\",[\"mapGroup\"] = 5,[\"x\"] = 65535,} ,\n to = { [\"warpId\"] = 0,[\"mapNum\"] = 1,[\"y\"] = 65535,[\"game\"] = \"POKEMON EMER\",[\"mapGroup\"] = 2,[\"x\"] = 65535,},\n }\n}\n\"\"\"\n\ndef findPartner(allWarps, w):\n for other in allWarps:\n for d in [other] + other[\"duplicates\"]:\n if w[\"destID\"] == d[\"ourID\"] and w[\"destMapNum\"] == d[\"ourMapNum\"] and w[\"destMapGroup\"] == d[\"ourMapGroup\"] and w[\"gameName\"] == d[\"gameName\"]:\n return other\n return None\n\ndef createFinalMapping(finalMappings, a, b, include_xy):\n ax = a[\"x\"] if include_xy else None\n ay = a[\"y\"] if include_xy else None\n amn = a[\"ourMapNum\"] if include_xy else None\n finalMappings.append(WarpMapEntry(a[\"destID\"], a[\"destMapNum\"], a[\"destMapGroup\"], a[\"gameName\"], b[\"ourID\"], b[\"ourMapNum\"], b[\"ourMapGroup\"], b[\"gameName\"], ax, ay, amn, b.get(\"varIndex\"), b.get(\"varSet\")))\n for d in a[\"duplicates\"]:\n dx = d[\"x\"] if include_xy else None\n dy = d[\"y\"] if include_xy else None\n dmn = d[\"ourMapNum\"] if include_xy else None\n finalMappings.append(WarpMapEntry(d[\"destID\"], d[\"destMapNum\"], d[\"destMapGroup\"], d[\"gameName\"], b[\"ourID\"], b[\"ourMapNum\"], b[\"ourMapGroup\"], b[\"gameName\"], dx, dy, dmn, b.get(\"varIndex\"), b.get(\"varSet\")))\n # finalMappings.append(WarpMapEntry(b[\"destID\"], b[\"destMapNum\"], b[\"destMapGroup\"], b[\"gameName\"], a[\"ourID\"], a[\"ourMapNum\"], a[\"ourMapGroup\"], a[\"gameName\"]))\n # for d in b[\"duplicates\"]:\n # finalMappings.append(WarpMapEntry(d[\"destID\"], d[\"destMapNum\"], d[\"destMapGroup\"], d[\"gameName\"], a[\"ourID\"], a[\"ourMapNum\"], a[\"ourMapGroup\"], a[\"gameName\"])) \n\ndef validateAllWarps(allHubs):\n allWarps = [] \n for h in allHubs:\n for w in h[\"warps\"]:\n if not \"xy_required\" in h:\n for aw in allWarps:\n if w[\"destID\"] == aw[\"destID\"] and w[\"destMapGroup\"] == aw[\"destMapGroup\"] and w[\"destMapNum\"] == aw[\"destMapNum\"] and w[\"gameName\"] == aw[\"gameName\"]:\n print(\"WARNING! Warp\", w, \"and warp\", aw, \"from hub\", h[\"name\"],\"have the same destination but are not marked as duplicates or xy_required! The lua script will break!\")\n allWarps.append(w)\n\ndef countDeadEnds():\n print(\"Top dead ends:\")\n s = sorted(deadEndCounts.items(), key=lambda a: a[1])\n for f in s:\n print(\"{}: {}\".format(f[1], f[0]))\n\ndef shuffle():\n (finalHubs, spoilerString) = logic.doLogic(allHubs)\n\n finalMappings = []\n for h in finalHubs:\n for w in h[\"warps\"]:\n p = w[\"partner\"]\n createFinalMapping(finalMappings, w, p, \"xy_required\" in h)\n return (finalMappings, spoilerString)\n\ndef finish(finalMappings, spoilerTxt, outFileName, spoilerFileName, seedFileName, seed):\n perGameWarpMappings = {}\n for m in finalMappings:\n if not m.fromGame in perGameWarpMappings:\n perGameWarpMappings[m.fromGame] = [m]\n else:\n perGameWarpMappings[m.fromGame].append(m)\n if outFileName:\n outfile = open(outFileName, \"w\")\n outfile.write(\"-- Generated with Combo Randomizer shuffle.py from seed \" + seed + \"\\n\")\n outfile.write(\"WarpMap = {\")\n for g in perGameWarpMappings:\n outfile.write('[\"%s\"] = {' % g)\n for m in perGameWarpMappings[g]:\n outfile.write(m.toString())\n outfile.write(\"},\")\n outfile.write(\"}\")\n\n if spoilerFileName:\n spoilerFile = open(spoilerFileName, \"w\")\n spoilerFile.write(spoilerTxt)\n\n if seedFileName:\n seedFile = open(seedFileName, \"w\")\n seedFile.write(\"Generated with Combo Randomizer from seed \" + seed + \"\\n\")\n\n\ndef main():\n args = argParser.parse_args()\n print(\"Welcome to the shuffler!\")\n seed = args.seed\n if not seed:\n seed = str(random.randint(0, 1000000))\n print(\"No seed was given, so we'll randomly use\", seed)\n random.seed(seed)\n print(\"Loading hub json files...\")\n for game in os.listdir(args.hubsroot):\n for fileName in os.listdir(os.path.join(args.hubsroot, game)):\n jfile = json.load(open(os.path.join(args.hubsroot, game, fileName)))\n for i, hub in enumerate(jfile[\"hubs\"]):\n hub[\"name\"] = fileName + \"_\" + str(i)\n handleHub(hub, fileName)\n print(\"Validating JSON files.\")\n validateAllWarps(allHubs)\n # countDeadEnds()\n print(len(allHubs),\"total hubs\")\n print(\"Starting logic.\")\n (finalMappings, spoilerTxt) = shuffle()\n finish(finalMappings, spoilerTxt, args.output, args.spoiler, args.seedfile, seed)\n print(\"Finished generating with seed\",seed) \n\nif __name__ == \"__main__\":\n main()", "repo_name": "Bratmon/FREComboRandomizer", "sub_path": "Python/shuffle.py", "file_name": "shuffle.py", "file_ext": "py", "file_size_in_byte": 7347, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 44, "usage_type": "call"}, {"api_name": "logic.doLogic", "line_number": 101, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 142, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 144, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 146, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}]} +{"seq_id": "6374935357", "text": "import json\n#module to get close matches of a word, in case of a mistype from the user\nfrom difflib import get_close_matches\n\ndata = json.load(open(\"data.json\"))\n\ndef translate(w):\n\t#some conditionals for the user input not to be case-sensitive\n\tif w.lower() in data:\n\t\treturn data[w.lower()]\n\n\telif w.capitalize() in data:\n\t\treturn data[w.capitalize()]\n\n\telif w.upper() in data:\n\t\treturn data[w.upper()]\n\t\t\n\telif len (get_close_matches(w, data.keys())) > 0:\n\t\tx = input(\"Did you mean %s instead? (y/n)\" % get_close_matches(w, data.keys())[0])\n\t\tx = x.lower()\n\t\tif x == 'y':\n\t\t\treturn data[get_close_matches(w, data.keys())[0]]\n\t\telif x == 'n':\n\t\t\tw = input (\"What did you mean then? \")\n\t\t\treturn translate (w)\n\t\telse:\n\t\t\treturn \"please, enter y or n\"\n\telse:\n\t\treturn \"The word doesn't exist\"\n\nword = input (\"Type a word: \")\n\noutput = translate(word)\n#in case there is more than one meaning for the word, the output will be a list of those meanings\nif type(output) == list:\n\tfor i in output:\n\t\tprint (i)\nelse:\n\tprint (output)\n", "repo_name": "evandrorm89/interactive-dictionary", "sub_path": "interactive-dictionary.py", "file_name": "interactive-dictionary.py", "file_ext": "py", "file_size_in_byte": 1026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 18, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 19, "usage_type": "call"}, {"api_name": "difflib.get_close_matches", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "7857321908", "text": "import osqp\nimport numpy as np\nimport scipy.sparse as sp\nfrom genqp import skew, Ib\nfrom uprightmpc2py import UprightMPC2C # C version\n\n\"\"\"This file creates the python controller, and can return py ver, C ver, and a reactive\"\"\"\n\nny = 6\nnu = 3\n\n# Basic constituents of dynamics A0, B0 (only sparsity matters)\ndef getA0(T0):\n A0 = np.zeros((6, 6))\n A0[:3,3:] = T0*np.eye(3)\n return A0\n\ndef getB0(s0, Btau):\n return np.block([\n [np.reshape(s0, (3,1)),np.zeros((3,2))],\n [np.zeros((3,1)), Btau]\n ])\n \nc0 = lambda g : np.array([0,0,-g,0,0,0])\n\ne3h = skew([0,0,1])\n\ndef initConstraint(N, nx, nc):\n # these will be updated\n T0 = 1\n dt = 1\n s0 = np.ones(3)\n Btau = np.ones((3,2))\n\n A = np.zeros((nc, nx))\n P = np.eye(nx) # Q, R stacked\n\n # cols of A are broken up like this (partition nx)\n n1 = N*ny\n n2 = 2*N*ny\n # rows of A broken up:\n nc1 = N*ny # after this; yddot dynamics\n nc2 = 2*N*ny # after this; thrust lims\n \n for k in range(N):\n # ykp1 = yk + dt*dyk equations\n A[k*ny:(k+1)*ny, k*ny:(k+1)*ny] = -np.eye(ny)\n A[k*ny:(k+1)*ny, n1 + k*ny:n1 + (k+1)*ny] = dt * np.eye(ny)\n if k>0:\n A[k*ny:(k+1)*ny, (k-1)*ny:(k)*ny] = np.eye(ny)\n \n # dykp1 equation\n A[nc1 + k*ny:nc1 + (k+1)*ny, n1 + k*ny:n1 + (k+1)*ny] = -np.eye(ny)\n A[nc1 + k*ny:nc1 + (k+1)*ny, n2 + k*nu:n2 + (k+1)*nu] = getB0(s0, Btau)\n if k>0:\n A[nc1 + k*ny:nc1 + (k+1)*ny, n1 + (k-1)*ny:n1 + (k)*ny] = np.eye(ny)\n if k>1:\n A[nc1 + k*ny:nc1 + (k+1)*ny, (k-2)*ny:(k-1)*ny] = getA0(T0*dt)\n \n # thrust lim\n A[nc2+k, n2+3*k] = 1\n \n return sp.csc_matrix(A), sp.csc_matrix(P)\n\ndef updateConstraint(N, A, dt, T0, s0s, Btaus, y0, dy0, g, Tmax):\n nc = A.shape[0]\n\n # Update vector\n l = np.zeros(nc)\n y1 = y0 + dt * dy0\n l[:ny] = -y1\n for k in range(N):\n if k == 0:\n l[ny*N+k*ny : ny*N+(k+1)*ny] = -dy0 - dt*getA0(T0) @ y0 - dt*c0(g)\n elif k == 1:\n l[ny*N+k*ny : ny*N+(k+1)*ny] = -dt*getA0(T0) @ y1 - dt*c0(g)\n else:\n l[ny*N+k*ny : ny*N+(k+1)*ny] = -dt*c0(g)\n # copy for dynamics\n u = np.copy(l)\n # thrust lims\n for k in range(N):\n l[2*N*ny+k] = -T0\n u[2*N*ny+k] = Tmax-T0\n\n # Left third\n AxidxT0dt = []\n n2 = 2*ny + 3 # nnz in each block col on the left\n for k in range(N-2):\n AxidxT0dt += [n2*k + i for i in [8,11,14]]\n\n # Middle third\n n1 = (2*N-1)*ny + (N-2)*3 # All the nnz in the left third\n n2 = 3*ny # nnz in each of the first N-1 block cols in the middle third\n Axidxdt = []\n for k in range(N):\n if k < N-1:\n Axidxdt += [n1 + n2*k + i for i in [0,3,6,9,12,15]]\n else:\n Axidxdt += [n1 + n2*k + i for i in [0,2,4,6,8,10]]\n \n # Right third\n n1 += 3*ny*(N-1) + 2*ny # all nnz in the left and middle third\n n2 = 10 # nnz in each B0 + 1 for thrust lim\n Axidxs0 = []\n AxidxBtau = []\n for k in range(N):\n Axidxs0 += [n1 + n2*k + i for i in range(3)]\n AxidxBtau += [n1 + n2*k + 4 + i for i in range(6)]\n # No need to update rightmost\n\n # Last check\n assert A.nnz == n1 + n2*N\n\n # Update\n A.data[AxidxT0dt] = dt*T0\n A.data[Axidxdt] = dt\n A.data[Axidxs0] = dt*np.hstack((s0s))\n A.data[AxidxBtau] = dt*np.hstack([np.ravel(Btau,order='F') for Btau in Btaus])\n\n Axidx = np.hstack((AxidxT0dt, Axidxdt, Axidxs0, AxidxBtau))\n # print(\"nAdata =\",len(Axidx))\n\n # print(A[:,2*N*ny:2*N*ny+6].toarray())\n return A, l, u, Axidx\n\ndef updateObjective(N, Qyr, Qyf, Qdyr, Qdyf, R, ydes, dydes):\n # Block diag components - see notes\n Pdata = np.hstack((\n np.hstack([Qyr for k in range(N-1)]),\n Qyf,\n np.hstack([Qdyr for k in range(N-1)]),\n Qdyf,\n np.hstack([R for k in range(N)])\n ))\n q = np.hstack((\n np.hstack([-Qyr*ydes for k in range(N-1)]),\n -Qyf*ydes,\n np.hstack([-Qdyr*dydes for k in range(N-1)]),\n -Qdyf*dydes,\n np.zeros(N*len(R))\n ))\n return Pdata, q\n\ndef openLoopX(N, dt, T0, s0s, Btaus, y0, dy0, g):\n ys = np.zeros((N,ny))\n dys = np.zeros((N,ny))\n us = np.random.rand(N,nu)\n\n yk = np.copy(y0)\n dyk = np.copy(dy0)\n for k in range(N):\n # at k=0, yy=y0, \n dykp1 = dyk + dt*(getA0(T0) @ yk + getB0(s0s[k], Btaus[k]) @ us[k,:] + c0(g)) # dy[k+1]\n\n dys[k,:] = dykp1\n ykp1 = yk + dt * dyk # y[k+1]\n ys[k,:] = ykp1 + dt * dykp1 # y[k+2]\n\n # For next k\n yk = np.copy(ykp1)\n dyk = np.copy(dykp1)\n\n # stack\n x = np.hstack((np.ravel(ys), np.ravel(dys), np.ravel(us)))\n # print(ys, x)\n return x\n\nclass UprightMPC2():\n def __init__(self, N, dt, g, TtoWmax, ws, wds, wpr, wpf, wvr, wvf, wthrust, wmom, Ib):\n self.N = N\n\n nx = self.N * (2*ny + nu)\n nc = 2*self.N*ny + self.N\n\n self.A, self.P = initConstraint(N, nx, nc)\n\n Qyr = np.hstack((np.full(3,wpr), np.full(3,ws)))\n Qyf = np.hstack((np.full(3,wpf), np.full(3,ws)))\n Qdyr = np.hstack((np.full(3,wvr), np.full(3,wds)))\n Qdyf = np.hstack((np.full(3,wvf), np.full(3,wds)))\n R = np.hstack((wthrust,np.full(2,wmom)))\n\n self.dt = dt\n self.Wts = [Qyr, Qyf, Qdyr, Qdyf, R]\n self.g = g\n self.Tmax = TtoWmax * g # use thrust-to-weight ratio to set max specific thrust\n\n # Create OSQP\n self.model = osqp.OSQP()\n self.model.setup(P=self.P, A=self.A, l=np.zeros(nc), eps_rel=1e-4, eps_abs=1e-4, verbose=False)\n\n # Manage linearization point\n self.T0 = 0 # mass-specific thrust\n self.Ibi = np.diag(1/Ib)\n \n def codegen(self, dirname='uprightmpc2/gen'):\n try:\n self.model.codegen(dirname, project_type='', force_rewrite=True, parameters='matrices', FLOAT=True, LONG=False)\n except:\n # No worries if python module failed to compile\n pass\n\n def testDyn(self, T0sp, s0s, Btaus, y0, dy0):\n # Test\n self.A, l, u, Axidx = updateConstraint(self.N, self.A, self.dt, T0sp, s0s, Btaus, y0, dy0, self.g, self.Tmax)\n xtest = openLoopX(self.N, self.dt, T0sp, s0s, Btaus, y0, dy0, self.g)\n print((self.A @ xtest - l)[:2*self.N*ny])\n \n def update1(self, T0sp, s0s, Btaus, y0, dy0, ydes, dydes):\n # Update\n self.A, self.l, self.u, self.Axidx = updateConstraint(self.N, self.A, self.dt, T0sp, s0s, Btaus, y0, dy0, self.g, self.Tmax)\n self.Pdata, self.q = updateObjective(self.N, *self.Wts, ydes, dydes)\n \n # OSQP solve ---\n self.model.update(Px=self.Pdata, Ax=self.A.data, q=self.q, l=self.l, u=self.u)\n res = self.model.solve()\n if 'solved' not in res.info.status:\n print(res.info.status)\n self.obj_val = res.info.obj_val\n # Functions for debugging\n self.obj = lambda x : 0.5 * x.T @ self.Pdense @ x + self.q.T @ x\n self.viol = lambda x : np.amin(np.hstack((self.A @ x - self.l, self.u - self.A @ x)))\n return res.x\n \n def update2(self, p0, R0, dq0, pdes, dpdes, sdes):\n # At current state\n s0 = np.copy(R0[:,2])\n s0s = [s0 for i in range(self.N)]\n Btau = (-R0 @ e3h @ self.Ibi)[:,:2] # no yaw torque\n Btaus = [Btau for i in range(self.N)]\n ds0 = -R0 @ e3h @ dq0[3:6] # omegaB\n\n y0 = np.hstack((p0, s0))\n dy0 = np.hstack((dq0[:3], ds0))\n ydes = np.hstack((pdes, sdes))\n dydes = np.hstack((dpdes, 0, 0, 0))\n\n self.prevsol = self.update1(self.T0, s0s, Btaus, y0, dy0, ydes, dydes)\n utilde = self.prevsol[2*ny*self.N : 2*ny*self.N+nu]\n self.T0 += utilde[0]\n\n return np.hstack((self.T0, utilde[1:]))\n \n def getAccDes(self, R0, dq0):\n dy1des = self.prevsol[ny*self.N : ny*self.N+ny] # from horiz\n # # Coordinate change for the velocity\n # bTw = lambda dq : np.hstack((R0.T @ dq[:3], dq[3:6]))\n dq1des = np.hstack((dy1des[:3], e3h @ R0.T @ dy1des[3:6])) # NOTE omegaz is lost\n # return (bTw(dq1des) - bTw(dq0)) / self.dt\n return (dq1des - dq0) / self.dt # return in world frame\n \n def update(self, p0, R0, dq0, pdes, dpdes, sdes, actualT0):\n if actualT0 >= 0:\n self.T0 = actualT0\n # Version of above that computes the desired body frame acceleration\n u = self.update2(p0, R0, dq0, pdes, dpdes, sdes)\n return u, self.getAccDes(R0, dq0)\n \ndef createMPC(N=3, ws=1e1, wds=1e3, wpr=1, wvr=1e3, wpf=5, wvf=2e3, wthrust=1e-1, wmom=1e-2, TtoWmax=2, popts=np.zeros(90), **kwargs):\n \"\"\"Returns the mdl\"\"\"\n dt = 5\n g = 9.81e-3\n # WLQP inputs\n mb = 100\n # what \"u\" is depends on w(u). Here in python testing with w(u) = [0,0,u0,u1,u2,u3].\n # Setting first 2 elements of Qw -> 0 => should not affect objective as longs as dumax does not constrain.\n Qw = np.hstack((np.zeros(2), np.zeros(4)))\n umin = np.array([0, -0.5, -0.2, -0.1])\n umax = np.array([10, 0.5, 0.2, 0.1])\n dumax = np.array([10, 10, 10, 10]) # /s\n # # WLQP stuff - copied from isolated C implementation\n # umin = np.array([50, -0.5, -0.2, -0.1])\n # umax = np.array([240, -0.5, -0.2, -0.1])\n # dumax = np.array([5e3, 10, 10, 10]) # /s\n controlRate = 1000\n pyver = UprightMPC2(N, dt, g, TtoWmax, ws, wds, wpr, wpf, wvr, wvf, wthrust, wmom, Ib.diagonal())\n # C version can be tested too\n cver = UprightMPC2C(dt, g, TtoWmax, ws, wds, wpr, wpf, wvr, wvf, wthrust, wmom, Ib.diagonal(), 50)\n return pyver, cver\n\ndef reactiveController(p, Rb, dq, pdes, kpos=[5e-3,5e-1], kz=[1e-1,1e0], ks=[10e0,1e2], **kwargs):\n # Pakpong-style reactive controller\n sdes = np.clip(kpos[0] * (pdes - p) - kpos[1] * dq[:3], np.full(3, -0.5), np.full(3, 0.5))\n sdes[2] = 1\n # sdes = np.array([0,0,1])\n omega = dq[3:]\n dp = dq[:3]\n s = Rb[:,2]\n ds = -Rb @ e3h @ omega\n # Template controller <- LATEST\n fz = kz[0] * (pdes[2] - p[2]) - kz[1] * dq[2]\n fTorn = ks[0] * (s - sdes) + ks[1] * ds\n fTorn[2] = 0\n fAorn = -e3h @ Rb.T @ fTorn\n return np.hstack((fz, fAorn[:2]))\n\nif __name__ == \"__main__\":\n up, upc = createMPC()\n # Dyn test\n T0 = 0.5\n s0s = [[0.1,0.1,0.9] for i in range(up.N)]\n Btaus = [np.full((3,2),1.123) for i in range(up.N)]\n y0 = np.random.rand(6)\n dy0 = np.random.rand(6)\n ydes = np.zeros_like(y0)\n dydes = np.zeros_like(y0)\n up.testDyn(T0, s0s, Btaus, y0, dy0)\n\n # FIXME: test\n p = np.random.rand(3)\n R = np.random.rand(3, 3)\n dq = np.random.rand(6)\n pdes = np.random.rand(3)\n dpdes = np.random.rand(3)\n sdes = np.random.rand(3)\n retc = upc.update(p, R, dq, pdes, dpdes, sdes)\n cl, cu, cq = upc.vectors()\n cP, cAdata, cAidx = upc.matrices()\n ret = up.update(p, R, dq, pdes, dpdes, sdes)\n # print(cAdata - up.A.data[cAidx])#OK\n # print(cAidx - up.Axidx)#OK\n # print(cl - up.l)#OK\n # print(cu - up.u)#OK\n # print(cq - up.q)#OK\n # print(cP - up.Pdata)#OK\n print(ret[0], ret[1], ret[0]-retc[0], ret[1]-retc[1])\n", "repo_name": "avikde/robobee3d", "sub_path": "template/template_controllers.py", "file_name": "template_controllers.py", "file_ext": "py", "file_size_in_byte": 11127, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.block", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "genqp.skew", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.sparse.csc_matrix", "line_number": 63, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 63, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 148, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 182, "usage_type": "call"}, {"api_name": "osqp.OSQP", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 195, "usage_type": "call"}, {"api_name": "genqp.Ib", "line_number": 195, "usage_type": "name"}, {"api_name": "numpy.amin", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "genqp.Ib.diagonal", "line_number": 277, "usage_type": "call"}, {"api_name": "genqp.Ib", "line_number": 277, "usage_type": "name"}, {"api_name": "uprightmpc2py.UprightMPC2C", "line_number": 279, "usage_type": "call"}, {"api_name": "genqp.Ib.diagonal", "line_number": 279, "usage_type": "call"}, {"api_name": "genqp.Ib", "line_number": 279, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 304, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 305, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 311, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 311, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 312, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 312, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 313, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 314, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 315, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 315, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 316, "usage_type": "attribute"}]} +{"seq_id": "24023151197", "text": "import mock\nimport unittest\n\nfrom mock_request import MockRequest\n\nfrom opp.api.v1 import user as api\n\n\nclass TestResponseHandler(unittest.TestCase):\n\n def _check_called(self, func_name, *exp_args, **exp_kwargs):\n func_name.assert_called_once_with(*exp_args, **exp_kwargs)\n\n @mock.patch('sqlalchemy.orm.scoped_session')\n @mock.patch.object(api.ResponseHandler, '_do_put')\n def test_respond_put(self, func, session):\n request = MockRequest('PUT', None, None)\n handler = api.ResponseHandler(request, None, session)\n handler.respond(require_phrase=False)\n self._check_called(func, None)\n\n @mock.patch('sqlalchemy.orm.scoped_session')\n @mock.patch.object(api.ResponseHandler, '_do_post')\n def test_respond_post(self, func, session):\n request = MockRequest('POST', None, None)\n handler = api.ResponseHandler(request, None, session)\n handler.respond(require_phrase=False)\n self._check_called(func, None)\n\n @mock.patch('sqlalchemy.orm.scoped_session')\n @mock.patch.object(api.ResponseHandler, '_do_delete')\n def test_respond_delete(self, func, session):\n request = MockRequest('DELETE', None, None)\n handler = api.ResponseHandler(request, None, session)\n handler.respond(require_phrase=False)\n self._check_called(func)\n", "repo_name": "openpassphrase/opp", "sub_path": "opp/tests/unit/api/v1/test_user.py", "file_name": "test_user.py", "file_ext": "py", "file_size_in_byte": 1333, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mock_request.MockRequest", "line_number": 17, "usage_type": "call"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 18, "usage_type": "call"}, {"api_name": "opp.api.v1.user", "line_number": 18, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 14, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 15, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 15, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 15, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user", "line_number": 15, "usage_type": "name"}, {"api_name": "mock_request.MockRequest", "line_number": 25, "usage_type": "call"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "opp.api.v1.user", "line_number": 26, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 22, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 23, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 23, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 23, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user", "line_number": 23, "usage_type": "name"}, {"api_name": "mock_request.MockRequest", "line_number": 33, "usage_type": "call"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "opp.api.v1.user", "line_number": 34, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 30, "usage_type": "call"}, {"api_name": "mock.patch.object", "line_number": 31, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 31, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user.ResponseHandler", "line_number": 31, "usage_type": "attribute"}, {"api_name": "opp.api.v1.user", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "75062196003", "text": "# Create your views here.\nfrom django.shortcuts import render\nimport requests\nimport json\nimport ast\nimport urllib\nfrom front.common import authetication_required\nimport configparser\nimport csv\n\nparser = configparser.RawConfigParser()\nparser.read('/opt/optima/global_configuration/optima_configuration_file.cnf')\ncode_list = [404, 401, 402, 500, 400]\nAPI_URI = 'http://{0}:{1}'.format(parser.get('API_SECTION', 'API_HOST'),\n parser.get('API_SECTION', 'API_PORT'))\nglobal_data = ['name',\n 'description',\n 'login',\n 'password',\n 'useDeviceCredentials',\n 'useEnablePassword',\n ]\nBOOLEAN_FIELDS = ['use_device_credentials',\n 'use_enable_password',\n 'is_validated']\nHOSTS = ['element',\n 'value',\n 'device',\n ]\ndef convertCsvToArray(fileString):\n '''Converts a CSV string to an array'''\n string = fileString.decode('utf-8')\n unformattedDevices = list(csv.reader(str(string).split('\\n'), delimiter=','))\n if not unformattedDevices[-1]: del unformattedDevices[-1]\n return unformattedDevices \n\n@authetication_required()\ndef workflowList(request, response = {}):\n url = API_URI + '/workflow'\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n r = requests.get(url, headers=headers)\n if r.status_code == 200: workflowListData = r.json()\n else: workflowListData = {}\n if(request.GET.get('delete_workflow')):\n data = request.GET\n url = API_URI + '/workflow/{0}'.format(data[\"delete_workflow\"])\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n r = requests.delete(url, headers=headers)\n response = r.text\n url = API_URI + '/workflow'\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n r = requests.get(url, headers=headers)\n if r.status_code == 200: workflowListData = r.json()\n else: workflowListData = {}\n return render(request, 'workflow/workflow.html', {'workflowListData': workflowListData,\n 'response': response})\n\n@authetication_required()\ndef workflowDelete(request):\n if(request.POST.get('logout')):\n del request.session['jwt_token']\n return render(request, 'login.html')\n if(request.GET.get('delete_workflow')):\n data = request.GET\n url = API_URI + '/workflow/{0}'.format(data[\"delete_workflow\"])\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n r = requests.delete(url, headers=headers)\n response = r.text\n return render(request, 'workflow/workflow.html', {'response': 'workflow deleted'})\n\n@authetication_required()\ndef workflowEdit(request):\n if(request.POST.get('logout')):\n del request.session['jwt_token']\n return render(request, 'login.html')\n response = {}\n url = API_URI + '/job'\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n get_location = requests.get(API_URI + '/locations', headers=headers)\n localisation = get_location.json()\n\n get_group = requests.get(API_URI + '/groups', headers=headers)\n group = get_group.json()\n\n get_deviceClass = requests.get(API_URI + '/deviceClasses', headers=headers)\n deviceClass = get_deviceClass.json() \n r = requests.get(url, headers=headers)\n if r.status_code == 200: jobListData = r.json()\n else: jobListData = {}\n precheck_list = []\n for precheck in jobListData[\"jobs\"]:\n if precheck[\"agent_type\"] == \"configuration_differ_precheck\":\n precheck_list.append(precheck)\n if request.POST.get(\"workflow_data\"):\n workflow_data = request.POST.get(\"workflow_data\")\n hostList = request.POST.get('host_list')\n hostFilter= request.POST.get('hostFilter')\n data = ast.literal_eval(workflow_data)\n data[\"hosts\"] = {}\n data[\"hosts\"][\"host_list\"] = [] \n if hostList:\n data[\"hosts\"][\"hostsType\"] = \"hostList\"\n host_list = hostList.split(\"\\r\\n\")\n for host in host_list: \n if host != '':\n data[\"hosts\"][\"host_list\"].append(host)\n else:\n data[\"hosts\"][\"hostsType\"] = \"hostFilter\"\n data['hosts'][\"host_list\"] = [{\"element\":data[\"element\"],\n \"value\":data[\"value\"],\n \"device\":data[\"device\"],}]\n if data.get(\"job_list\") and data.get(\"job_list\") != []:\n for job in data[\"job_list\"]:\n if job == data[\"start\"]:\n data[\"job_list\"][job][\"parameters\"][\"first_in_workflow\"] = True\n if data[\"job_list\"][job][\"agent_type\"] == \"configuration_parser\":\n data[\"job_list\"][job][\"parameters\"][\"keyList\"] = data[\"job_list\"][job][\"parameters\"][\"keyList\"].split(\",\")\n if data[\"job_list\"][job][\"agent_type\"] == \"configuration_sender\":\n data[\"job_list\"][job][\"parameters\"][\"remoteCommand\"] = data[\"job_list\"][job][\"parameters\"][\"remoteCommands\"]\n data[\"job_list\"][job][\"login\"] = data[\"login\"]\n data[\"job_list\"][job][\"password\"] = data[\"password\"]\n data[\"job_list\"][job][\"hostsType\"] = data[\"hosts\"][\"hostsType\"]\n data[\"job_list\"][job][\"hosts\"] = data[\"hosts\"][\"host_list\"]\n \n url = API_URI + '/workflow'\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n r = requests.post(url, data=json.dumps(data), headers=headers)\n if r.status_code == 201: response = \"Workflow Created\"\n elif r.status_code in code_list: response = r.text\n else: response = \"Unknown Error\"\n else: response = \"Can not create the workflow, missing data\"\n return workflowList(request, response)\n \n if request.POST.get(\"send_workflow\"):\n data = request.POST\n url = API_URI + '/workflow'\n headers = {'Content-Type' : 'application/json'}\n r = requests.post(url, data=json.dumps(data), headers=headers)\n if r.status_code == 201: response = \"Workflow Created\"\n elif r.status_code in code_list: response = r.text\n else: response = \"Unknown Error\"\n return render(request, 'workflow/new_workflow.html', {'response': response,\n 'deviceClass': deviceClass,\n 'localisation': localisation,\n 'group': group,\n 'precheck_list': json.dumps(precheck_list)})\n\n", "repo_name": "kdfwow64/python-cors-again", "sub_path": "frontend/front/mop/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6451, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "configparser.RawConfigParser", "line_number": 11, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 33, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "front.common.authetication_required", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}, {"api_name": "front.common.authetication_required", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 79, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 87, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 98, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 127, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 127, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 138, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 142, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "front.common.authetication_required", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "10385370455", "text": "import logging\nfrom configparser import ConfigParser\nfrom threading import Thread\n\nimport pandas as pd\nfrom fabric import Connection\nfrom libcloud.compute.providers import get_driver\nfrom libcloud.compute.types import NodeState, Provider\n\nfrom ..utils import Retry\nfrom .proxy import HERE, HOME, Proxy, names\n\nlog = logging.getLogger(__name__)\n\n\ndef get_libcloud(region=None):\n \"\"\" get libcloud driver\n :param region: None uses default region from ~/.aws/config\n \"\"\"\n cfg = ConfigParser()\n cfg.read(f\"{HOME}/.aws/credentials\")\n access = cfg.get(\"default\", \"aws_access_key_id\")\n secret = cfg.get(\"default\", \"aws_secret_access_key\")\n if region is None:\n cfg.read(f\"{HOME}/.aws/config\")\n region = cfg.get(\"default\", \"region\")\n cls = get_driver(Provider.EC2)\n lc = cls(access, secret, region=region)\n return lc\n\n\nclass AWS(Proxy):\n \"\"\" proxy on AWS \"\"\"\n\n def __init__(self, name=None, region=None):\n \"\"\"\n :param name: create object for existing node\n :param region: None uses default region from ~/.aws/config\n \"\"\"\n super().__init__()\n self.lc = get_libcloud(region=region)\n if name:\n try:\n self.node = [n for n in self.lc.list_nodes() if n.name == name][0]\n self.session = self.get_session(self.node.public_ips[0])\n except IndexError:\n pass\n\n def get_nodes(self):\n \"\"\" return list of nodes \"\"\"\n return [n for n in self.lc.list_nodes()]\n\n def start(self):\n \"\"\" start node\n :return: ip\n \"\"\"\n\n # launch server. ubuntu.\n name = names.sample(1).item().lower()\n size = [s for s in self.lc.list_sizes() if s.name == \"t3.nano\"][0]\n image = self.lc.list_images(ex_image_ids=[\"ami-03d8261f577d71b6a\"])[0]\n node = self.lc.create_node(\n name,\n size,\n image,\n ex_keyname=\"key\",\n ex_spot=True,\n ex_security_groups=[\"proxy\"],\n ex_metadata=dict(app=\"proxy\", ready=False),\n )\n log.info(f\"waiting for {name} to start\")\n node = self.lc.wait_until_running([node])[0][0]\n self.node = node\n ip = node.public_ips[0]\n self.session = self.get_session(ip)\n\n # configure using fabric\n con = Connection(\n ip, user=\"ubuntu\", connect_kwargs=dict(key_filename=f\"{HOME}/.aws/key\"),\n )\n self.con = con\n # retry until ssh available\n Retry(tries=3, delay=2, warn=1)(con.open)()\n con.put(f\"{HERE}/tinyproxy.conf\")\n con.run(\n \"sudo apt-get -qq update && \"\n \"sudo apt-get -y -qq install dos2unix tinyproxy && \"\n \"dos2unix -q tinyproxy.conf && \"\n \"sudo cp tinyproxy.conf /etc/tinyproxy/tinyproxy.conf && \"\n \"sudo service tinyproxy restart\",\n hide=\"both\",\n )\n\n # wait for proxy to be working\n try:\n self.check_proxy()\n except:\n log.error(f\"Failed to start proxy for {node.extra.instance_id} at {ip}\")\n raise\n\n # make available\n self.lc.ex_create_tags(node, dict(ready=\"True\"))\n log.info(f\" {ip} started\")\n\n return ip\n", "repo_name": "simonm3/mproxy", "sub_path": "mproxy/proxy/aws.py", "file_name": "aws.py", "file_ext": "py", "file_size_in_byte": 3260, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 20, "usage_type": "call"}, {"api_name": "proxy.HOME", "line_number": 21, "usage_type": "name"}, {"api_name": "proxy.HOME", "line_number": 25, "usage_type": "name"}, {"api_name": "libcloud.compute.providers.get_driver", "line_number": 27, "usage_type": "call"}, {"api_name": "libcloud.compute.types.Provider.EC2", "line_number": 27, "usage_type": "attribute"}, {"api_name": "libcloud.compute.types.Provider", "line_number": 27, "usage_type": "name"}, {"api_name": "proxy.Proxy", "line_number": 32, "usage_type": "name"}, {"api_name": "proxy.names.sample", "line_number": 59, "usage_type": "call"}, {"api_name": "proxy.names", "line_number": 59, "usage_type": "name"}, {"api_name": "fabric.Connection", "line_number": 78, "usage_type": "call"}, {"api_name": "proxy.HOME", "line_number": 79, "usage_type": "name"}, {"api_name": "utils.Retry", "line_number": 83, "usage_type": "call"}, {"api_name": "proxy.HERE", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "10557742881", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 6 11:30:40 2019\n\n@author: cmore\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport re\ndatos = pd.read_json('data_tenedor.json')\ncampos = list(datos.columns.values)\n#datos = pd.read_pickle('restaurantes_tenedor_DF.pkl')\n# busqueda de Nan\n\ndatos.isnull().sum()\n\n# =============================================================================\n# Mapeo tags\n# =============================================================================\ntags = []\nfor indice,i in tqdm(enumerate(datos['tags'])):\n \n for j in i:\n\n j = j.strip('.').strip('.').replace('Selección Insider - ',\"\").lower().replace('italianos','italiano').replace('mariscos','marisquería').replace('cocina vegana','vegetariano').replace('mediterráneos',\"mediterráneo\").replace('pizza',\"pizzería\").replace('árabe','arabe').replace('japoneses',\"japonés\").replace('del ','').replace('de ','')\n j = j.replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u')\n if j not in tags:\n\n tags.append(j)\n \n datos.loc[indice,j] = 1\n\nfor i in tags:\n datos[i].fillna(0,inplace = True)\ndatos.drop(['tags'], axis = 1, inplace = True)\n# =============================================================================\n# MAPEO DIRECCION\n# =============================================================================\n\n#ori = {'Madrid':'','madrid':'','Spain':'','Calle':'calle','c/':'calle',\n# 'C/':'calle','Paseo':'paseo','Avenida':'avenida','Av':'avenida','AV':'avenida',\n# 'del':'','de':'','el':'','la':'','las':'','los':'','Las':'','Los':'','\\s\\s+':' '}\n\n#ori = [['madrid',''],['spain',''],['av\\.','avenida'],['av','avenida'],['avda','avenida'],[' avd ','avenida '],['del',''],\n# ['de ',''],['el',''],['la',''],['las',''],['los',''],['del',''],['de',''],\n# ['pl ','plaza '],['sta ','santa '],['local ',''],['cc gavia',''],['bis',''],\n# ['nº',''],['s/n',''],['planta',''],['local',''],['bajo',''],['º',''],['ª',''],\n# ['portal',''],['no:',''],['ctra','carretera'],['gral','general'],['dcha',''],['izq ',' '],['plaze','plaza'],['puerta','']]\n\nori = [['madrid',''],['spain',''],['av\\.','avenida'],['av','avenida'],['avda','avenida'],[' avd ','avenida '],\n ['pl ','plaza '],['sta ','santa '],['local ',''],['cc gavia',''],['bis',''],\n ['nº',''],['s/n',''],['planta',''],['local',''],['bajo',''],['º',''],['ª',''],\n ['portal',''],['no:',''],['ctra','carretera'],['gral','general'],['dcha',''],['izq ',' '],['plaze','plaza'],['puerta','']]\n\n#ori3 = [['madrid',''],['spain',''],['av\\. ','avenida'],['del ',''],['de ','']]\ntipo = [\"calle\",\"avenida\",\"paseo\",\"boulevar\",\"glorieta\",\"plaza\",\"carretera\",\"ronda\",\"travesia\"]\n\ndireccion = datos['direccion'].tolist()\n\nfor index,i in tqdm(enumerate(direccion)):\n\n i = i.lower().replace('á','a').replace('é','e').replace('í','i').replace('ó','o').replace('ú','u').replace('à','a').replace('c/','calle ').replace('pl. ','plaza ')#.replace('sta ','santa ')\n i = re.sub(\",\",\"\",i)\n i = re.sub(\"\\.\",\"\",i)\n i = re.sub(r'\\([^)]*\\)',\"\",i)\n i = re.sub(\"-..+\",\"\",i)\n i = re.sub(\"acceso(...)\",\"\",i)\n i = re.sub(\"mercado de..+\",\" \",i)\n try:\n cp = re.findall(\"\\d{5}\",i)[0]\n except:\n cp = None\n i = re.sub(\"\\d{5}\",\"\",i)\n try:\n numero = re.findall(\"\\d+\",i)[0]\n i = re.sub(\"\\d+\",\"\",i)\n if len(numero) == 4:\n numero = numero[0:2]\n except:\n numero = None\n\n for j in ori:\n\n i = re.sub(r'\\b'+j[0]+r'\\b',j[1],i)\n try: \n if i.split()[0] in tipo:\n tipo_calle = i.split()[0]\n i = re.sub(i.split()[0],\"\",i)\n else:\n tipo_calle = \"calle\"\n except:\n tipo_calle = None\n\n i = re.sub('\\s\\s+',' ',i)\n i = i.strip()\n i = re.sub(r' [a-m]\\b','',i)\n i = re.sub(r' esq..+',' ',i)\n for j in tipo:\n i = i.replace(j,'')\n datos.loc[index,'tipo'] = tipo_calle\n datos.loc[index,'calle'] = i.strip()\n try:\n datos.loc[index,'numero'] = int(numero)\n except:\n datos.loc[index,'numero'] = 0\n \n try:\n datos.loc[index,'cp'] = int(cp)\n except:\n datos.loc[index,'cp'] = 0\n\ndatos.drop(['direccion'], axis = 1, inplace = True)\n\n\n# =============================================================================\n# mapeo precio medio\n# =============================================================================\n \nfor index,i in tqdm(enumerate(datos['precio'])):\n try:\n \n datos.loc[index,'medio'] = int(re.findall(\"\\d{1,3}\",i)[0])\n except:\n datos.loc[index,'medio'] = 0\ndatos.drop(['precio'], axis = 1, inplace = True)\n# =============================================================================\n# mapeo oferta\n# =============================================================================\n\nfor index,i in tqdm(enumerate(datos['oferta'])):\n try:\n if i[0] =='-':\n datos.loc[index,'descuento'] = int(i[1:3])\n elif i[0] =='¡':\n datos.loc[index,'descuento'] = int(i[2:4])\n elif i[0] == int:\n datos.loc[index,'descuento'] = int(i[0:2])\n \n except:\n datos.loc[index,'descuento'] = 0\n \ndatos['descuento'].fillna(0).astype(int)\n\ndatos.drop(['oferta'], axis = 1, inplace = True) \n \n# =============================================================================\n# mapeo reviews\n# =============================================================================\n\ndatos['reviews'] = datos['reviews'].str.replace(r'\\D+', '')\ndatos['reviews']=pd.to_numeric(datos['reviews'], errors='coerce').fillna(0) \n\n # =============================================================================\n# mapeo rating\n# =============================================================================\n\ndatos['rating'] = datos['rating'].str.replace(r',', '.').astype(float).fillna(0) \n \ncampos = list(datos.columns.values) \n \ndel cp, direccion, i,index,indice,j,numero,ori,tags,tipo,tipo_calle\n#del a,aux,b,cena,comida,d,desayuno,dias,dias2,dias3,horas,horas2,i,indice,j,k,l1,l2,lista,pos,pos2,ppp,primero,tipos,ultimo,x\n\ndatos.to_pickle('data_tenedor.pkl')\n\n\n\n\n\n\n\n", "repo_name": "cmorenocobian/master", "sub_path": "2.2 limpiador_tenedor.py", "file_name": "2.2 limpiador_tenedor.py", "file_ext": "py", "file_size_in_byte": 6264, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_json", "line_number": 11, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 22, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 61, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 64, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 65, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 66, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 67, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 68, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 69, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 71, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 74, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 76, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 77, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 85, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 89, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 95, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 97, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 98, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 120, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 123, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "3457159695", "text": "import gevent\nimport gevent.monkey\n\ngevent.monkey.patch_all()\n\nfrom flask import Flask, request, Response, render_template\n\napp = Flask(__name__)\n\ndef event_stream():\n count = 0\n while True:\n gevent.sleep(1)\n yield 'data: %s\\n\\n' % count\n count += 1\n\ndef json_stream():\n while True:\n gevent.sleep(1)\n yield 'data: {\"id\": \"John123\", \"msg\":\"HELLO WORLD\"}\\n\\n'\n\n@app.route('/my_event_source')\ndef sse_request():\n return Response(\n json_stream(),\n mimetype='text/event-stream')\n\n@app.route('/')\ndef page(): \n return render_template('sse.html')\n\nif __name__ == '__main__':\n app.debug = True\n app.run(threaded=True)\n", "repo_name": "siwells/sandpit", "sub_path": "python/server.sent.events/src/sse_server.py", "file_name": "sse_server.py", "file_ext": "py", "file_size_in_byte": 692, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "gevent.monkey.patch_all", "line_number": 4, "usage_type": "call"}, {"api_name": "gevent.monkey", "line_number": 4, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "gevent.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "25942420561", "text": "from asyncio import current_task\nfrom sqlalchemy.ext.asyncio import (\n create_async_engine,\n async_sessionmaker,\n async_scoped_session,\n AsyncSession,\n)\n\nfrom src.core.config import settings\n\n\nclass DatabaseHelper:\n \"\"\"\n A class for working with a database\n \"\"\"\n def __init__(self, url: str):\n self.engine = create_async_engine(url=url)\n\n self.session_factory = async_sessionmaker(\n bind=self.engine,\n autoflush=False,\n autocommit=False,\n expire_on_commit=False,\n )\n\n def get_scoped_session(self):\n session = async_scoped_session(\n session_factory=self.session_factory,\n scopefunc=current_task,\n )\n return session\n\n async def scope_session_dependency(self) -> AsyncSession:\n session = self.get_scoped_session()\n yield session\n await session.remove()\n\n\ndb_helper = DatabaseHelper(url=settings.db_url)\n", "repo_name": "amoglock/blue_box", "sub_path": "src/database/database_helper.py", "file_name": "database_helper.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sqlalchemy.ext.asyncio.create_async_engine", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.async_sessionmaker", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.asyncio.async_scoped_session", "line_number": 27, "usage_type": "call"}, {"api_name": "asyncio.current_task", "line_number": 29, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.asyncio.AsyncSession", "line_number": 33, "usage_type": "name"}, {"api_name": "src.core.config.settings.db_url", "line_number": 39, "usage_type": "attribute"}, {"api_name": "src.core.config.settings", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "39360834402", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('sportsman', '0002_auto_20160531_2258'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='student',\n name='dataVersion',\n field=models.IntegerField(verbose_name='数据版本, 从1开始, 每保存一次便增加1.', null=True, blank=True),\n ),\n migrations.AddField(\n model_name='studentevaluation',\n name='correspondWithStudentDataVersion',\n field=models.IntegerField(verbose_name='本评价对应的学生数据版本.(若小于当前关联学生数据的版本号, 则表明此评价可能不匹配.)', null=True, blank=True),\n ),\n migrations.AddField(\n model_name='studentevaluation',\n name='studentDataComplete',\n field=models.BooleanField(verbose_name='本评价对应的学生数据是否完整', default=True),\n preserve_default=False,\n ),\n ]\n", "repo_name": "pongpongcn/shsports_sportsman2015", "sub_path": "mysite/sportsman/migrations/0003_auto_20161121_0005.py", "file_name": "0003_auto_20161121_0005.py", "file_ext": "py", "file_size_in_byte": 1096, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "71448060964", "text": "import asyncio\nimport json\nfrom EdgeGPT import Chatbot, ConversationStyle\n\nwith open('./cookies.json', 'r') as f:\n cookies = json.load(f)\n\nclass Bing:\n def __init__(self):\n self.bot = Chatbot(cookies=cookies)\n\n async def bing(self, prompt):\n response = await self.bot.ask(prompt=prompt, conversation_style=ConversationStyle.balanced, wss_link=\"wss://sydney.bing.com/sydney/ChatHub\")\n return response\n \n # close connection\n async def close(self):\n await self.bot.close()\n ", "repo_name": "ivan00105/Voice-Based-AI-Assistant-with-ChatGPT-on-Raspberry-Pi", "sub_path": "src/bing.py", "file_name": "bing.py", "file_ext": "py", "file_size_in_byte": 524, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 9, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 6, "usage_type": "call"}, {"api_name": "EdgeGPT.Chatbot", "line_number": 10, "usage_type": "call"}, {"api_name": "EdgeGPT.ConversationStyle.balanced", "line_number": 13, "usage_type": "attribute"}, {"api_name": "EdgeGPT.ConversationStyle", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "28170593111", "text": "from dungeon_level.dungeon_tiles import Tiles, key_to_lock, key_tiles, TileTypes, item_to_hazard, item_tiles, lock_tiles, mission_tiles\nfrom validation.solver import Solver\nfrom graph_structure.graph_node import Node, GNode, Start, End, Key, Lock\nfrom dungeon_level.level import Level\nfrom scipy.ndimage.measurements import label as label_connected_components\nfrom scipy.ndimage import convolve\nfrom skimage.morphology import binary_dilation\nfrom graph_structure.graph import Graph\nfrom log import Log\nimport numpy as np\nimport copy\n\nclass MissionGenerator:\n @staticmethod\n def generate_mission(level, solution_node_order, mission_aesthetic):\n Log.print(level)\n positions_map = dict()\n node_to_tile = dict()\n\n start_position = MissionGenerator.get_start_position(level)\n\n was_successful, level_with_mission = MissionGenerator._generate_mission(level, 0, solution_node_order, positions_map, node_to_tile, start_position, mission_aesthetic)\n level.upper_layer = level_with_mission.upper_layer\n Log.print(level)\n return was_successful\n\n\n @staticmethod\n def _generate_mission(level, node_index, solution_node_order, positions_map, node_to_tile, start_position, mission_aesthetic):\n if node_index >= len(solution_node_order):\n return True, level\n\n level = copy.deepcopy(level)\n positions_map = copy.deepcopy(positions_map)\n node_to_tile = copy.deepcopy(node_to_tile)\n\n node = solution_node_order[node_index]\n random_positions = MissionGenerator.get_random_positions(level.upper_layer, start_position, node, node_to_tile)\n was_add_successful, level = MissionGenerator.add_mission_node(level, solution_node_order, node, node_index, positions_map, node_to_tile, random_positions, start_position, mission_aesthetic)\n if not was_add_successful:\n return False, level\n\n return True, level\n\n\n is_generator_recursive = False\n @staticmethod\n def add_mission_node(level, solution_node_order, node, node_index, positions_map, node_to_tile, random_positions, start_position, mission_aesthetic):\n original_layer = level.upper_layer.copy()\n for position in random_positions:\n level.upper_layer = original_layer.copy()\n MissionGenerator.add_mission_tile(level.upper_layer, node, position, positions_map, node_to_tile, mission_aesthetic)\n\n Log.print(\"\\n\\n\")\n Log.print(level)\n\n if Solver.does_level_follow_mission(level, solution_node_order[:node_index + 1], positions_map):\n was_successful, level = MissionGenerator._generate_mission(level, node_index + 1, solution_node_order, positions_map, node_to_tile, start_position, mission_aesthetic)\n if was_successful:\n return was_successful, level\n elif not MissionGenerator.is_generator_recursive:\n return False, level\n return False, level\n\n\n @staticmethod\n def get_start_position(level):\n components, component_count = MissionGenerator.get_rooms_components(level.upper_layer)\n start_position = MissionGenerator.get_random_positions_per_component(components, component_count)[0]\n return start_position\n\n @staticmethod\n def get_random_positions(layer, start_position, node, node_to_tile):\n if isinstance(node, Lock):\n return MissionGenerator.get_random_positions_for_lock(layer, start_position)\n elif isinstance(node, Key) or isinstance(node, End):\n return MissionGenerator.get_random_positions_for_key(layer, start_position)\n elif isinstance(node, Start):\n return np.array([start_position])\n\n\n @staticmethod\n def get_space_connected_to_position(layer, position):\n empty_mask = (layer != Tiles.wall)\n labeled_components, component_count = label_connected_components(empty_mask)\n label = labeled_components[tuple(position)]\n connected_space = (labeled_components == label)\n return connected_space\n\n @staticmethod\n def get_walls_and_corridors_connected_to_space(layer, space):\n space = binary_dilation(space)\n wall_corridor_mask = MissionGenerator.get_wall_corridor_mask(layer)\n connected_walls_and_corridors = np.logical_and(space, wall_corridor_mask)\n return connected_walls_and_corridors\n\n\n @staticmethod\n def get_random_positions_for_key(layer, start_position):\n connected_space = MissionGenerator.get_space_connected_to_position(layer, start_position).astype(int)\n mission_mask = MissionGenerator.get_mission_mask(layer)\n np.clip(connected_space - mission_mask, 0, 1, out=connected_space)\n random_positions = MissionGenerator.get_random_positions_in_component(connected_space, 1, 3)\n return random_positions\n \n\n @staticmethod\n def get_random_positions_for_lock(layer, start_position):\n connected_space = MissionGenerator.get_space_connected_to_position(layer, start_position)\n connected_walls_and_corridors = MissionGenerator.get_walls_and_corridors_connected_to_space(layer, connected_space)\n connected_walls_and_corridors_components, component_count = label_connected_components(connected_walls_and_corridors)\n random_positions = MissionGenerator.get_random_positions_per_component(connected_walls_and_corridors_components, component_count, 1)\n return random_positions\n \n\n @staticmethod\n def get_random_positions_per_component(components_mask, component_count, positions_per_component=1):\n random_position_list = []\n for component_number in range(1, component_count + 1):\n random_positions = MissionGenerator.get_random_positions_in_component(components_mask, component_number, positions_per_component)\n random_position_list.extend(random_positions)\n if len(random_position_list) > 0:\n random_positions = np.vstack(random_position_list)\n else:\n random_positions = np.zeros((0,2))\n np.random.shuffle(random_positions)\n return random_positions\n\n @staticmethod\n def get_matching_tile(node_to_tile, node, mission_aesthetic, get='key'):\n if isinstance(node, Key):\n key_node = node\n elif isinstance(node, Lock):\n key_node = next(iter(node.key_s))\n else:\n return None\n \n if key_node not in node_to_tile:\n if len(key_node.lock_s) > 1 or np.random.random() < mission_aesthetic.single_lock_is_hazard_probability:\n node_to_tile[key_node] = np.random.choice(item_tiles)\n else:\n node_to_tile[key_node] = np.random.choice(key_tiles)\n\n tile = node_to_tile[key_node]\n\n if get == 'key':\n return tile\n elif get == 'lock':\n if tile.get_tile_type() == TileTypes.key_lock:\n return key_to_lock[tile]\n elif tile.get_tile_type() == TileTypes.item_hazard:\n return item_to_hazard[tile]\n\n\n @staticmethod\n def add_mission_tile(layer, node, position, positions_map, node_to_tile, mission_aesthetic):\n positions_map[node] = position\n position = tuple(position)\n\n if isinstance(node, Start):\n layer[position] = Tiles.player\n elif isinstance(node, End):\n layer[position] = Tiles.finish\n elif isinstance(node, Key):\n key_tile = MissionGenerator.get_matching_tile(node_to_tile, node, mission_aesthetic, get='key')\n layer[position] = key_tile\n elif isinstance(node, Lock):\n lock_tile = MissionGenerator.get_matching_tile(node_to_tile, node, mission_aesthetic, get='lock')\n if lock_tile.get_tile_type() == TileTypes.item_hazard:\n MissionGenerator.spread_hazard(layer, lock_tile, position, mission_aesthetic)\n else:\n layer[position] = lock_tile\n elif isinstance(node, GNode):\n layer[position] = Tiles.collectable\n\n @staticmethod\n def spread_hazard(layer, hazard_tile, position, aesthetic_settings):\n offsets = [np.array([-1, 0]), np.array([1, 0]), np.array([0, -1]), np.array([0, 1])]\n spread_probability = aesthetic_settings.hazard_spread_probability[hazard_tile]\n hazard_tile_positions = [position]\n while len(hazard_tile_positions) > 0:\n hazard_tile_position = hazard_tile_positions.pop()\n\n layer[tuple(hazard_tile_position)] = hazard_tile\n for offset in offsets:\n if np.random.random() < spread_probability:\n neighbor_hazard_tile_position = tuple(hazard_tile_position + offset)\n if Level.is_position_within_layer_bounds(layer, neighbor_hazard_tile_position) and layer[neighbor_hazard_tile_position] == Tiles.empty:\n hazard_tile_positions.append(neighbor_hazard_tile_position)\n\n\n\n\n @staticmethod\n def get_random_positions_in_component(labeled_layer, component_number, position_count=1):\n mask = (labeled_layer == component_number)\n return MissionGenerator.get_random_positions_in_mask(mask, position_count)\n\n\n @staticmethod\n def get_random_positions_in_mask(mask, position_count=1):\n positions_in_mask = np.argwhere(mask == 1)\n if positions_in_mask.shape[0] > 0:\n samples_count = np.minimum(position_count, positions_in_mask.shape[0])\n indices = np.random.choice(positions_in_mask.shape[0], size=samples_count, replace=False)\n random_positions = positions_in_mask[indices,:]\n else:\n random_positions = np.zeros((0,2))\n\n return random_positions\n\n\n # https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.ndimage.measurements.label.html\n @staticmethod\n def get_rooms_components(layer):\n empty_mask = (layer == Tiles.empty)\n labeled_components, component_count = label_connected_components(empty_mask)\n return labeled_components, component_count\n\n\n @staticmethod\n def get_potential_lock_components(layer):\n potential_lock_mask = MissionGenerator.get_wall_corridor_mask(layer)\n labeled_components, component_count = label_connected_components(potential_lock_mask)\n return labeled_components, component_count\n\n\n @staticmethod\n def get_mission_mask(layer):\n mission_mask = np.zeros(layer.shape)\n for mission_tile in mission_tiles:\n mission_mask = np.logical_or(mission_mask, layer == mission_tile)\n\n return mission_mask.astype(int)\n\n\n # Returns a mask representing all the possible locations for a lock\n # https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.convolve.html\n @staticmethod\n def get_wall_corridor_mask(layer):\n convolutional_result1 = 2 # Based on the filter, a horizontal wall has a result == 2\n convolutional_result2 = 8 # Based on the filter, a vertical wall has a result == 8\n # We can't just use a symmetric kernel because we need to ignore corners.\n wall_kernel = np.array([\n [0., 4., 0.],\n [1., 0, 1.],\n [0., 4., 0.],])\n\n wall_mask = (layer == Tiles.wall).astype(int)\n mission_mask = MissionGenerator.get_mission_mask(layer)\n np.clip(wall_mask - mission_mask, 0, 1, out=wall_mask)\n conv_result = convolve(wall_mask, wall_kernel, mode='constant', cval=1.0)\n wall_corridor_mask = (np.logical_or(conv_result == convolutional_result1, conv_result == convolutional_result2)).astype(int)\n np.clip(wall_corridor_mask - mission_mask, 0, 1, out=wall_corridor_mask)\n return wall_corridor_mask\n\n", "repo_name": "bjatkin/dungeon-design", "sub_path": "generation/legacy/mission_generator.py", "file_name": "mission_generator.py", "file_ext": "py", "file_size_in_byte": 11810, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "log.Log.print", "line_number": 16, "usage_type": "call"}, {"api_name": "log.Log", "line_number": 16, "usage_type": "name"}, {"api_name": "log.Log.print", "line_number": 24, "usage_type": "call"}, {"api_name": "log.Log", "line_number": 24, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 33, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 34, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 35, "usage_type": "call"}, {"api_name": "log.Log.print", "line_number": 54, "usage_type": "call"}, {"api_name": "log.Log", "line_number": 54, "usage_type": "name"}, {"api_name": "log.Log.print", "line_number": 55, "usage_type": "call"}, {"api_name": "log.Log", "line_number": 55, "usage_type": "name"}, {"api_name": "validation.solver.Solver.does_level_follow_mission", "line_number": 57, "usage_type": "call"}, {"api_name": "validation.solver.Solver", "line_number": 57, "usage_type": "name"}, {"api_name": "graph_structure.graph_node.Lock", "line_number": 74, "usage_type": "argument"}, {"api_name": "graph_structure.graph_node.Key", "line_number": 76, "usage_type": "argument"}, {"api_name": "graph_structure.graph_node.End", "line_number": 76, "usage_type": "argument"}, {"api_name": "graph_structure.graph_node.Start", "line_number": 78, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 79, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.wall", "line_number": 84, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 84, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 85, "usage_type": "call"}, {"api_name": "skimage.morphology.binary_dilation", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 126, "usage_type": "attribute"}, {"api_name": "graph_structure.graph_node.Key", "line_number": 131, "usage_type": "argument"}, {"api_name": "graph_structure.graph_node.Lock", "line_number": 133, "usage_type": "argument"}, {"api_name": "numpy.random.random", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 140, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.item_tiles", "line_number": 140, "usage_type": "argument"}, {"api_name": "numpy.random", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 142, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.key_tiles", "line_number": 142, "usage_type": "argument"}, {"api_name": "numpy.random", "line_number": 142, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes.key_lock", "line_number": 149, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes", "line_number": 149, "usage_type": "name"}, {"api_name": "dungeon_level.dungeon_tiles.key_to_lock", "line_number": 150, "usage_type": "name"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes.item_hazard", "line_number": 151, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes", "line_number": 151, "usage_type": "name"}, {"api_name": "dungeon_level.dungeon_tiles.item_to_hazard", "line_number": 152, "usage_type": "name"}, {"api_name": "graph_structure.graph_node.Start", "line_number": 160, "usage_type": "argument"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.player", "line_number": 161, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 161, "usage_type": "name"}, {"api_name": "graph_structure.graph_node.End", "line_number": 162, "usage_type": "argument"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.finish", "line_number": 163, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 163, "usage_type": "name"}, {"api_name": "graph_structure.graph_node.Key", "line_number": 164, "usage_type": "argument"}, {"api_name": "graph_structure.graph_node.Lock", "line_number": 167, "usage_type": "argument"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes.item_hazard", "line_number": 169, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.TileTypes", "line_number": 169, "usage_type": "name"}, {"api_name": "graph_structure.graph_node.GNode", "line_number": 173, "usage_type": "argument"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.collectable", "line_number": 174, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 174, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 186, "usage_type": "attribute"}, {"api_name": "dungeon_level.level.Level.is_position_within_layer_bounds", "line_number": 188, "usage_type": "call"}, {"api_name": "dungeon_level.level.Level", "line_number": 188, "usage_type": "name"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.empty", "line_number": 188, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 188, "usage_type": "name"}, {"api_name": "numpy.argwhere", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 208, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.empty", "line_number": 216, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 216, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 217, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 224, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 230, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.mission_tiles", "line_number": 231, "usage_type": "name"}, {"api_name": "numpy.logical_or", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 244, "usage_type": "call"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles.wall", "line_number": 249, "usage_type": "attribute"}, {"api_name": "dungeon_level.dungeon_tiles.Tiles", "line_number": 249, "usage_type": "name"}, {"api_name": "numpy.clip", "line_number": 251, "usage_type": "call"}, {"api_name": "scipy.ndimage.convolve", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "15147294452", "text": "# Grindstone gs_UVDetect_class.py\r\n# Authors: Sam Carnes and Sean Adams\r\n\r\n# This file defines a script that makes sure all geometry has UVs\r\n\r\n\r\nimport maya.cmds as cmds\r\n\r\nclass UVDetect:\r\n \r\n #********** INIT **********#\r\n \r\n def __init__(self):\r\n \r\n # Identify whether or not the script has an auto-fix function\r\n self.hasFix = True\r\n\r\n # identify what this check is called\r\n self.scriptName = \"Has UVs\"\r\n \r\n # Provide a label for the button that executes the auto-fix\r\n # CANNOT EXCEED 20 CHARACTERS\r\n \r\n self.fixLabel = \"Find empty UVs\"\r\n \r\n # Array for holding any empty UVs following the scan\r\n \r\n self.emptyUVs = []\r\n \r\n \r\n \r\n #********** DO CHECK **********#\r\n \r\n def doCheck(self):\r\n \r\n # Clean out existing vars to start a new detection\r\n self.emptyUVs = []\r\n unMapLim = 0\r\n unMapCount = 0\r\n unMapRep = ''\r\n \r\n # Array for holding polygons and their faces\r\n polyHold = cmds.ls(geometry = True)\r\n faceHold = cmds.polyListComponentConversion(polyHold, tf = True)\r\n \r\n # Saving the selection and hilite states\r\n userHil = cmds.ls(hilite = True)\r\n userSel = cmds.ls(selection = True)\r\n \r\n # Selecting the faces for contraint\r\n cmds.select(faceHold)\r\n \r\n # Constraining the selection to any faces that are unmapped\r\n cmds.polySelectConstraint(mode = 3, type = 8, textured = 2)\r\n self.emptyUVs = cmds.ls(selection = True)\r\n unMapLim = len(self.emptyUVs)\r\n \r\n # Clearing out the selections\r\n cmds.polySelectConstraint(disable = True)\r\n cmds.select(clear = True)\r\n \r\n # Counting up the faces found to be unmapped\r\n for i in range(0, unMapLim):\r\n cmds.select(self.emptyUVs[i])\r\n unMapCount += cmds.polyEvaluate(self.emptyUVs[i], faceComponent = True)\r\n \r\n # Restoring the original selection state\r\n cmds.selectType(allObjects = True)\r\n cmds.hilite(userHil)\r\n cmds.select(userSel)\r\n \r\n # Determining if any unmapped faces have been found and reporting\r\n if self.emptyUVs:\r\n unMapRep = '%d unmapped faces detected.'%(unMapCount)\r\n \r\n return unMapRep\r\n\r\n \r\n #********** RUN FIX **********#\r\n \r\n # Hilight any unmapped faces found\r\n def runFix(self):\r\n \r\n try:\r\n \r\n # Highlight any unmapped faces\r\n cmds.select(self.emptyUVs)\r\n return \"Unmapped faces highlighted.\"\r\n \r\n except:\r\n \r\n return \"There was a problem selecting the unmapped faces\"\r\n \r\n \r\n \r\n #********** RETURN INSTANCE OF SCRIPT CLASS **********#\r\n \r\ndef getObject():\r\n return UVDetect()", "repo_name": "sadams115/Grindstone", "sub_path": "gs_decentralized/Specialized/Maya/gs_assets/gs_scripts/Modeling/gs_UVDetect_class.py", "file_name": "gs_UVDetect_class.py", "file_ext": "py", "file_size_in_byte": 2916, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "maya.cmds.ls", "line_number": 43, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 43, "usage_type": "name"}, {"api_name": "maya.cmds.polyListComponentConversion", "line_number": 44, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 44, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 47, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 47, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 48, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 48, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 51, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 51, "usage_type": "name"}, {"api_name": "maya.cmds.polySelectConstraint", "line_number": 54, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 54, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 55, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 55, "usage_type": "name"}, {"api_name": "maya.cmds.polySelectConstraint", "line_number": 59, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 59, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 60, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 60, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 64, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 64, "usage_type": "name"}, {"api_name": "maya.cmds.polyEvaluate", "line_number": 65, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 65, "usage_type": "name"}, {"api_name": "maya.cmds.selectType", "line_number": 68, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 68, "usage_type": "name"}, {"api_name": "maya.cmds.hilite", "line_number": 69, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 69, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 70, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 70, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 87, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "10864878923", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[6]:\n\n\nimport pandas as pd\n\n\n# In[10]:\n\n\ndef mili_to_min(millis):\n mins=(millis/1000)/60\n\n return round(mins, 3)\n\n\n# In[11]:\n\n\ndef get_subtitle1_list():\n\n cur.execute(\"select distinct movie_key from Subtitles\")\n subtitle1_list = cur.fetchall()\n subtitle1_list = [_[0] for _ in subtitle1_list]\n \n return subtitle1_list\n\n\n# In[12]:\n\n\ndef get_runtime(movie_key):\n \"\"\"해당 영화에 대한 러닝타임(분) 가져오기\"\"\"\n cur.execute(\"select runtime from NVmovie where id = :Id\", {\"Id\": movie_key})\n\n runtime = cur.fetchone()\n runtime = int(re.findall(\"(\\d+)\", runtime[0])[0])\n \n return runtime\n\n\n# In[13]:\n\n\ndef create_movie_df(movie_key):\n \"\"\"해당 영화에 대한 pandas dataframe 만들기\"\"\"\n cur.execute(\"select start_time, lines, h_index from REALQUOTE where movie_key = :Id\", {\"Id\": movie_key})\n quotes = cur.fetchall()\n quotes = sorted(quotes, key=operator.itemgetter(0))\n \n if movie_key in subtitle1_list:\n cur.execute(\"select start_time, lines from Subtitles where movie_key = :Id\", {\"Id\": movie_key})\n script = cur.fetchall()\n else:\n cur.execute(\"select start_time, lines from Subtitles2 where movie_key = :Id\", {\"Id\": movie_key})\n script = cur.fetchall()\n #print(len(quotes), len(script))\n \n quote_df = pd.DataFrame(quotes, columns =['time', 'lines', 'g-index'])\n quote_df.time= quote_df.time.apply(lambda x: mili_to_min(x))\n \n script_df = pd.DataFrame(script, columns =['time', 'lines'])\n script_df.time= script_df.time.apply(lambda x: mili_to_min(int(x)))\n \n runtime = get_runtime(movie_key)\n \n bins = list(map(lambda x:x*runtime, [round(float(_), 3) for _ in np.arange(0,1.1,0.125)]))\n stage_labels = [\"stage\"+str(_+1) for _ in np.arange(0,8,1)]\n \n\n\n merged_df = pd.merge(script_df, quote_df, how='outer')\n merged_df['g-index'] = merged_df['g-index'].fillna(0)\n stages = pd.cut(merged_df['time'], bins, labels=stage_labels)\n merged_df = merged_df.join(stages.to_frame(\"stage\"))\n merged_df.insert(0, \"movie_key\", movie_key)\n \n return merged_df\n\n\n# In[14]:\n\n\ndef get_stages_mean(movie_df):\n \"\"\"stage별 명대사 품질 평균 pandas dataframe 리턴 함수\"\"\"\n stage_mean = movie_df[\"g-index\"].groupby(movie_df[\"stage\"]).mean()\n stage_sum = stage_mean.sum()\n stages_mean = (stage_mean/stage_sum).to_frame(\"stages_mean\")\n stages_mean['movie_key'] = movie_df['movie_key'][0]\n stages_mean.reset_index(level=0, inplace=True)\n #stages_mean['stages_mean'].fillna(0, inplace=True)\n stages_mean = stages_mean[['movie_key', 'stage', 'stages_mean']]\n return stages_mean\n\n\n# In[15]:\n\n\ndef genre_analysis(genre_list):\n \"\"\"꼭 장르별일 필요는 없다\"\"\"\n #subtitle1_list = get_subtitle1_list()\n genre_pd_list = list() \n for _ in genre_list:\n genre_pd_list.append(create_movie_df(_))\n \n genre_concat = pd.concat(genre_pd_list)\n \n genre_stage_mean_list = list()\n for _ in genre_pd_list:\n genre_stage_mean_list.append(get_stages_mean(_))\n \n genre_stage_concat = pd.concat(genre_stage_mean_list)\n \n return genre_concat, genre_stage_concat\n \n\n\n# In[16]:\n\n\ndef stage_analysis(genre_stage_concat, genre_name):\n \"\"\"df info와 pyplot 그래프 확인하는 함수\"\"\"\n import matplotlib.pyplot as plt \n \n print(genre_stage_concat.groupby([genre_stage_concat[\"movie_key\"], genre_stage_concat[\"stage\"]]).mean().info())\n fig = plt.figure()\n plt.plot(list(genre_stage_concat['stage'].unique()),genre_stage_concat.groupby(genre_stage_concat[\"stage\"]).mean()['stages_mean'])\n \n\n\n# In[17]:\n\n\nfrom scipy.stats import sem, t\nfrom scipy import mean\n\ndef confidence_interval(data, confidence):\n \"\"\"confidence는 0~1사이 값\"\"\"\n n = len(data)\n m = mean(data)\n std_err = sem(data)\n h = std_err * t.ppf((1 + confidence) / 2, n - 1)\n\n start = round(m - h, 3)\n m = round(m, 3)\n end = round(m + h, 3)\n \n return start, m, end\n\n\n# In[18]:\n\n\ngenre_list = ['액션', '코미디', '드라마', '멜로/로맨스',' 스릴러', 'SF', '판타지', '애니메이션', '모험',' 미스터리', '범죄']\n\n\n# In[373]:\n\n\nfrom collections import defaultdict\n\ndef error_band(genre_stage_concat, confidence):\n error_band_plot_dict = defaultdict(list)\n stage_labels = [\"stage\"+str(_+1) for _ in np.arange(0,8,1)]\n for _ in stage_labels:\n x = genre_stage_concat.loc[genre_stage_concat[\"stage\"] == str(_)]['stages_mean']\n\n start, mean, end = confidence_interval(x.dropna(), confidence)\n error_band_plot_dict[_] = [start, mean, end]\n \n return error_band_plot_dict\n\n\n# In[871]:\n\n\naction10_coord = list()\nfor _ in actionList:\n action10_coord.append(error_dict_to_coordinates(error_band(action_stage_concat.loc[action_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[875]:\n\n\ncomedy10_coord = list()\nfor _ in comedyList:\n comedy10_coord.append(error_dict_to_coordinates(error_band(comedy_stage_concat.loc[comedy_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[876]:\n\n\ndrama10_coord = list()\nfor _ in dramaList:\n drama10_coord.append(error_dict_to_coordinates(error_band(drama_stage_concat.loc[drama_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[877]:\n\n\nromance10_coord = list()\nfor _ in romanceList:\n romance10_coord.append(error_dict_to_coordinates(error_band(romance_stage_concat.loc[romance_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[878]:\n\n\nthriller10_coord = list()\nfor _ in thrillerList:\n thriller10_coord.append(error_dict_to_coordinates(error_band(thriller_stage_concat.loc[thriller_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[879]:\n\n\nSF10_coord = list()\nfor _ in SFList:\n SF10_coord.append(error_dict_to_coordinates(error_band(SF_stage_concat.loc[SF_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[880]:\n\n\nfantasy10_coord = list()\nfor _ in fantasyList:\n fantasy10_coord.append(error_dict_to_coordinates(error_band(fantasy_stage_concat.loc[fantasy_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[881]:\n\n\nanimation10_coord = list()\nfor _ in animationList:\n animation10_coord.append(error_dict_to_coordinates(error_band(animation_stage_concat.loc[animation_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[882]:\n\n\nadventure10_coord = list()\nfor _ in adventureList:\n adventure10_coord.append(error_dict_to_coordinates(error_band(adventure_stage_concat.loc[adventure_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[883]:\n\n\nmystery10_coord = list()\nfor _ in mysteryList:\n mystery10_coord.append(error_dict_to_coordinates(error_band(mystery_stage_concat.loc[mystery_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[884]:\n\n\ncrime10_coord = list()\nfor _ in crimeList:\n crime10_coord.append(error_dict_to_coordinates(error_band(crime_stage_concat.loc[crime_stage_concat['movie_key']== _], 0.9))[2])\n\n\n# In[791]:\n\n\naction_graph = error_dict_to_coordinates(error_band(action_stage_concat, 0.9))\n\n\n# In[793]:\n\n\ncomedy_graph = error_dict_to_coordinates(error_band(comedy_stage_concat, 0.9))\n\n\n# In[794]:\n\n\ndrama_graph = error_dict_to_coordinates(error_band(drama_stage_concat, 0.9))\n\n\n# In[815]:\n\n\nromance_graph = error_dict_to_coordinates(error_band(romance_stage_concat, 0.9))\n\n\n# In[795]:\n\n\nthriller_graph = error_dict_to_coordinates(error_band(thriller_stage_concat, 0.9))\n\n\n# In[796]:\n\n\nSF_graph = error_dict_to_coordinates(error_band(SF_stage_concat, 0.9))\n\n\n# In[797]:\n\n\nfantasy_graph = error_dict_to_coordinates(error_band(fantasy_stage_concat, 0.9))\n\n\n# In[798]:\n\n\nanimation_graph = error_dict_to_coordinates(error_band(animation_stage_concat, 0.9))\n\n\n# In[799]:\n\n\nadventure_graph = error_dict_to_coordinates(error_band(adventure_stage_concat, 0.9))\n\n\n# In[800]:\n\n\nmystery_graph = error_dict_to_coordinates(error_band(mystery_stage_concat, 0.9))\n\n\n# In[801]:\n\n\ncrime_graph = error_dict_to_coordinates(error_band(crime_stage_concat, 0.9))\n\n\n# In[789]:\n\n\ndef error_dict_to_coordinates(error_band_dict):\n \n x = list(np.arange(1,9,1))\n\n y_max = [_[1][2] for _ in error_band_dict.items()]\n y_mean = [_[1][1] for _ in error_band_dict.items()]\n y_min = [_[1][0] for _ in error_band_dict.items()]\n \n return [x, y_max, y_mean, y_min]\n\n\n# In[375]:\n\n\ndef error_dict_to_coordinates(error_band_dict):\n \n x = list(np.arange(1,9,1))\n\n y_max = [_[1][2] for _ in error_band_dict.items()]\n y_mean = [_[1][1] for _ in error_band_dict.items()]\n y_min = [_[1][0] for _ in error_band_dict.items()]\n \n return [x, y_max, y_mean, y_min]\n\n\n# In[420]:\n\n\naction_stage_concat['genre'] = \"액션\"\n\n\n# In[422]:\n\n\ncomedy_stage_concat['genre'] = \"코미디\"\n\n\n# In[424]:\n\n\ndrama_stage_concat['genre'] = \"드라마\"\n\n\n# In[729]:\n\n\nromance_stage_concat['genre'] = \"멜로/로맨스\"\n\n\n# In[428]:\n\n\nthriller_stage_concat['genre'] = \"스릴러\"\n\n\n# In[430]:\n\n\nSF_stage_concat['genre'] = \"SF\"\n\n\n# In[432]:\n\n\nfantasy_stage_concat['genre'] = \"판타지\"\n\n\n# In[434]:\n\n\nanimation_stage_concat['genre'] = \"애니메이션\"\n\n\n# In[436]:\n\n\nadventure_stage_concat['genre'] = \"모험\"\n\n\n# In[438]:\n\n\nmystery_stage_concat['genre'] = \"미스터리\"\n\n\n# In[440]:\n\n\ncrime_stage_concat['genre'] = \"범죄\"\n\n\n# ### 1. genre_stage_concat 만들기\n\n# In[719]:\n\n\ngenre_stage_concat = pd.concat([action_stage_concat, comedy_stage_concat, drama_stage_concat, romance_stage_concat, \n thriller_stage_concat, SF_stage_concat, fantasy_stage_concat, animation_stage_concat, \n adventure_stage_concat, mystery_stage_concat, crime_stage_concat])\n\n\n# In[720]:\n\n\ngenre_stage_concat[\"genre_code\"] = pd.factorize(genre_stage_concat['genre'])[0]+1\n\n\n# In[721]:\n\n\ngenre_stage_concat[\"stage_code\"] = pd.factorize(genre_stage_concat['stage'])[0]+1\n\n\n# In[723]:\n\n\ngenre_stage_concat = genre_stage_concat[['movie_key','stage', 'genre', 'stage_code','genre_code', 'stages_mean']]\n\n\n# ### 2. genre_stage_concat 에서 장르별 genre_x 를 만들기\n\n# 액션, 코미디, 드라마, 멜로/로맨스, 스릴러, SF, 판타지, 애니메이션, 모험, 미스터리, 범죄¶\n\n# In[698]:\n\n\naction_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==1][['movie_key','genre_code','stage_code','stages_mean']]\naction_x = action_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\naction_x[\"genre\"] = 1\n\n\n# In[456]:\n\n\ncomedy_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==2][['movie_key','genre_code','stage_code','stages_mean']]\ncomedy_x = comedy_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\ncomedy_x[\"genre\"] = 2\n\n\n# In[731]:\n\n\ndrama_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==3][['movie_key','genre_code','stage_code','stages_mean']]\ndrama_x = drama_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\ndrama_x[\"genre\"] = 3\n\n\n# In[732]:\n\n\nromance_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==4][['movie_key','genre_code','stage_code','stages_mean']]\nromance_x = romance_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nromance_x[\"genre\"] = 4\n\n\n# In[461]:\n\n\nthriller_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==5][['movie_key','genre_code','stage_code','stages_mean']]\nthriller_x = thriller_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nthriller_x[\"genre\"] = 5\n\n\n# In[463]:\n\n\nSF_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==6][['movie_key','genre_code','stage_code','stages_mean']]\nSF_x = SF_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nSF_x[\"genre\"] = 6\n\n\n# In[465]:\n\n\nfantasy_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==7][['movie_key','genre_code','stage_code','stages_mean']]\nfantasy_x = fantasy_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nfantasy_x[\"genre\"] = 7\n\n\n# In[466]:\n\n\nanimation_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==8][['movie_key','genre_code','stage_code','stages_mean']]\nanimation_x = animation_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nanimation_x[\"genre\"] = 8\n\n\n# In[467]:\n\n\nadventure_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==9][['movie_key','genre_code','stage_code','stages_mean']]\nadventure_x = adventure_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nadventure_x[\"genre\"] = 9\n\n\n# In[468]:\n\n\nmystery_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==10][['movie_key','genre_code','stage_code','stages_mean']]\nmystery_x = mystery_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\nmystery_x[\"genre\"] = 10\n\n\n# In[469]:\n\n\ncrime_x = genre_stage_concat.loc[genre_stage_concat['genre_code']==11][['movie_key','genre_code','stage_code','stages_mean']]\ncrime_x = crime_x.pivot(index = 'movie_key', columns='stage_code', values='stages_mean').dropna()\ncrime_x[\"genre\"] = 11\n\n\n# ### 3. genre_x 가져와서 그래프 유사도 계산하기\n\n# In[786]:\n\n\n# 장르 평균 그래프의 좌표값\naction_mean = action_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n action_x[i] = action_x[i].apply(lambda x: abs(x-action_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\naction_stage_min_max = list()\nfor _ in error_band(action_stage_concat, 0.9).keys():\n action_stage_min_max.append(error_band(action_stage_concat, 0.9)[_][2] - error_band(action_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n action_x[i] = action_x[i].apply(lambda x: x/action_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \naction_similar_df = action_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\naction_similar_df.head()\n\n\n# In[710]:\n\n\n# 장르 평균 그래프의 좌표값\ncomedy_mean = comedy_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n comedy_x[i] = comedy_x[i].apply(lambda x: abs(x-comedy_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\ncomedy_stage_min_max = list()\nfor _ in error_band(comedy_stage_concat, 0.9).keys():\n comedy_stage_min_max.append(error_band(comedy_stage_concat, 0.9)[_][2] - error_band(comedy_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n comedy_x[i] = comedy_x[i].apply(lambda x: x/comedy_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \ncomedy_similar_df = comedy_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\ncomedy_similar_df.head()\n\n\n# In[733]:\n\n\n# 장르 평균 그래프의 좌표값\ndrama_mean = drama_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n drama_x[i] = drama_x[i].apply(lambda x: abs(x-drama_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\ndrama_stage_min_max = list()\nfor _ in error_band(drama_stage_concat, 0.9).keys():\n drama_stage_min_max.append(error_band(drama_stage_concat, 0.9)[_][2] - error_band(drama_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n drama_x[i] = drama_x[i].apply(lambda x: x/drama_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \ndrama_similar_df = drama_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\ndrama_similar_df.head()\n\n\n# In[735]:\n\n\n# 장르 평균 그래프의 좌표값\nromance_mean = romance_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n romance_x[i] = romance_x[i].apply(lambda x: abs(x-romance_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nromance_stage_min_max = list()\nfor _ in error_band(romance_stage_concat, 0.9).keys():\n romance_stage_min_max.append(error_band(romance_stage_concat, 0.9)[_][2] - error_band(romance_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n romance_x[i] = romance_x[i].apply(lambda x: x/romance_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nromance_similar_df = romance_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nromance_similar_df.head()\n\n\n# In[821]:\n\n\n# 장르 평균 그래프의 좌표값\nthriller_mean = thriller_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n thriller_x[i] = thriller_x[i].apply(lambda x: abs(x-thriller_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nthriller_stage_min_max = list()\nfor _ in error_band(thriller_stage_concat, 0.9).keys():\n thriller_stage_min_max.append(error_band(thriller_stage_concat, 0.9)[_][2] - error_band(thriller_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n thriller_x[i] = thriller_x[i].apply(lambda x: x/thriller_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nthriller_similar_df = thriller_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nthriller_similar_df.head()\n\n\n# In[823]:\n\n\n# 장르 평균 그래프의 좌표값\nSF_mean = SF_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n SF_x[i] = SF_x[i].apply(lambda x: abs(x-SF_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nSF_stage_min_max = list()\nfor _ in error_band(SF_stage_concat, 0.9).keys():\n SF_stage_min_max.append(error_band(SF_stage_concat, 0.9)[_][2] - error_band(SF_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n SF_x[i] = SF_x[i].apply(lambda x: x/SF_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nSF_similar_df = SF_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nSF_similar_df.head()\n\n\n# In[825]:\n\n\n# 장르 평균 그래프의 좌표값\nfantasy_mean = fantasy_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n fantasy_x[i] = fantasy_x[i].apply(lambda x: abs(x-fantasy_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nfantasy_stage_min_max = list()\nfor _ in error_band(fantasy_stage_concat, 0.9).keys():\n fantasy_stage_min_max.append(error_band(fantasy_stage_concat, 0.9)[_][2] - error_band(fantasy_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n fantasy_x[i] = fantasy_x[i].apply(lambda x: x/fantasy_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nfantasy_similar_df = fantasy_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nfantasy_similar_df.head()\n\n\n# In[827]:\n\n\n# 장르 평균 그래프의 좌표값\nanimation_mean = animation_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n animation_x[i] = animation_x[i].apply(lambda x: abs(x-animation_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nanimation_stage_min_max = list()\nfor _ in error_band(animation_stage_concat, 0.9).keys():\n animation_stage_min_max.append(error_band(animation_stage_concat, 0.9)[_][2] - error_band(animation_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n animation_x[i] = animation_x[i].apply(lambda x: x/animation_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nanimation_similar_df = animation_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nanimation_similar_df.head()\n\n\n# In[829]:\n\n\n# 장르 평균 그래프의 좌표값\nadventure_mean = adventure_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n adventure_x[i] = adventure_x[i].apply(lambda x: abs(x-adventure_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nadventure_stage_min_max = list()\nfor _ in error_band(adventure_stage_concat, 0.9).keys():\n adventure_stage_min_max.append(error_band(adventure_stage_concat, 0.9)[_][2] - error_band(adventure_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n adventure_x[i] = adventure_x[i].apply(lambda x: x/adventure_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nadventure_similar_df = adventure_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nadventure_similar_df.head()\n\n\n# In[831]:\n\n\n# 장르 평균 그래프의 좌표값\nmystery_mean = mystery_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n mystery_x[i] = mystery_x[i].apply(lambda x: abs(x-mystery_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\nmystery_stage_min_max = list()\nfor _ in error_band(mystery_stage_concat, 0.9).keys():\n mystery_stage_min_max.append(error_band(mystery_stage_concat, 0.9)[_][2] - error_band(mystery_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n mystery_x[i] = mystery_x[i].apply(lambda x: x/mystery_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \nmystery_similar_df = mystery_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\nmystery_similar_df.head()\n\n\n# In[833]:\n\n\n# 장르 평균 그래프의 좌표값\ncrime_mean = crime_x.groupby('genre').mean()\n\n\n# stage 별 euclidean distance 구하기\nfor i in np.arange(1,9,1):\n crime_x[i] = crime_x[i].apply(lambda x: abs(x-crime_mean[i].values[0]))\n\n# stage 별 신뢰구간 너비 구하기\ncrime_stage_min_max = list()\nfor _ in error_band(crime_stage_concat, 0.9).keys():\n crime_stage_min_max.append(error_band(crime_stage_concat, 0.9)[_][2] - error_band(crime_stage_concat, 0.9)[_][0])\n\n# 구한 신뢰구간 너비로 나누기 \nfor i in np.arange(1,9,1):\n crime_x[i] = crime_x[i].apply(lambda x: x/crime_stage_min_max[i-1])\n \n \n#최종 sort 된 movie_key 리스트 \ncrime_similar_df = crime_x.drop(columns='genre').sum(axis=1).to_frame('dist_sum').sort_values(['dist_sum'])\n\ncrime_similar_df.head()\n\n\n# ### 흥행성적이랑 합쳐서 top10 추리기\n\n# In[775]:\n\n\nbox_data = pd.read_csv(\"ver3.box_table.csv\", engine='python')\nbox_data.drop(box_data.columns[0], axis=1, inplace=True)\nbox_data.drop(columns=[\"id\"], inplace=True)\n\n\n# In[776]:\n\n\nbox_data.rename(columns={\"origin\": \"audience\"}, inplace=True)\n\n\n# In[784]:\n\n\nbox_data.head()\n\n\n# In[785]:\n\n\naction_similar_df = pd.merge(action_similar_df, box_data, on='movie_key')\naction_top_10 = action_similar_df.head(10)\naction_top_10\n\n\n# In[707]:\n\n\ncomedy_similar_df = pd.merge(comedy_similar_df, box_data, on='movie_key')\ncomedy_top_10 = comedy_similar_df.head(10)\ncomedy_top_10\n\n\n# In[734]:\n\n\ndrama_similar_df = pd.merge(drama_similar_df, box_data, on='movie_key')\ndrama_top_10 = drama_similar_df.head(10)\ndrama_top_10\n\n\n# In[736]:\n\n\nromance_similar_df = pd.merge(romance_similar_df, box_data, on='movie_key')\nromance_top_10 = romance_similar_df.head(10)\nromance_top_10\n\n\n# In[822]:\n\n\nthriller_similar_df = pd.merge(thriller_similar_df, box_data, on='movie_key')\nthriller_top_10 = thriller_similar_df.head(10)\nthriller_top_10\n\n\n# In[824]:\n\n\nSF_similar_df = pd.merge(SF_similar_df, box_data, on='movie_key')\nSF_top_10 = SF_similar_df.head(10)\nSF_top_10\n\n\n# In[826]:\n\n\nfantasy_similar_df = pd.merge(fantasy_similar_df, box_data, on='movie_key')\nfantasy_top_10 = fantasy_similar_df.head(10)\nfantasy_top_10\n\n\n# In[828]:\n\n\nanimation_similar_df = pd.merge(animation_similar_df, box_data, on='movie_key')\nanimation_top_10 = animation_similar_df.head(10)\nanimation_top_10\n\n\n# In[830]:\n\n\nadventure_similar_df = pd.merge(adventure_similar_df, box_data, on='movie_key')\nadventure_top_10 = adventure_similar_df.head(10)\nadventure_top_10\n\n\n# In[832]:\n\n\nmystery_similar_df = pd.merge(mystery_similar_df, box_data, on='movie_key')\nmystery_top_10 = mystery_similar_df.head(10)\nmystery_top_10\n\n\n# In[834]:\n\n\ncrime_similar_df = pd.merge(crime_similar_df, box_data, on='movie_key')\ncrime_top_10 = crime_similar_df.head(10)\ncrime_top_10\n\n\n# In[835]:\n\n\nactionList = list(action_top_10[\"movie_key\"])\n\n\n# In[836]:\n\n\ncomedyList = list(comedy_top_10[\"movie_key\"])\n\n\n# In[837]:\n\n\ndramaList = list(drama_top_10[\"movie_key\"])\n\n\n# In[838]:\n\n\nromanceList = list(romance_top_10[\"movie_key\"])\n\n\n# In[839]:\n\n\nthrillerList = list(thriller_top_10[\"movie_key\"])\n\n\n# In[840]:\n\n\nSFList = list(SF_top_10[\"movie_key\"])\n\n\n# In[841]:\n\n\nfantasyList = list(fantasy_top_10[\"movie_key\"])\n\n\n# In[842]:\n\n\nanimationList = list(animation_top_10[\"movie_key\"])\n\n\n# In[843]:\n\n\nadventureList = list(adventure_top_10[\"movie_key\"])\n\n\n# In[844]:\n\n\nmysteryList = list(mystery_top_10[\"movie_key\"])\n\n\n# In[845]:\n\n\ncrimeList = list(crime_top_10[\"movie_key\"])\n\n", "repo_name": "koreaunivcamp11/moviebeat", "sub_path": "genre_analysis.py", "file_name": "genre_analysis.py", "file_ext": "py", "file_size_in_byte": 25071, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "scipy.mean", "line_number": 150, "usage_type": "call"}, {"api_name": "scipy.stats.sem", "line_number": 151, "usage_type": "call"}, {"api_name": "scipy.stats.t.ppf", "line_number": 152, "usage_type": "call"}, {"api_name": "scipy.stats.t", "line_number": 152, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 174, "usage_type": "call"}, {"api_name": "scipy.mean", "line_number": 178, "usage_type": "name"}, {"api_name": "scipy.mean", "line_number": 179, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 357, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 437, "usage_type": "call"}, {"api_name": "pandas.factorize", "line_number": 445, "usage_type": "call"}, {"api_name": "pandas.factorize", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 589, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 598, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 616, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 625, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 643, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 652, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 679, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 697, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 706, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 724, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 733, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 751, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 760, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 787, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 805, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 814, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 832, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 841, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 856, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 876, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 884, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 892, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 900, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 908, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 916, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 924, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 932, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 940, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 948, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 956, "usage_type": "call"}]} +{"seq_id": "12943606185", "text": "import unittest\n\nimport base64\nimport xmppserver\n\nclass XmlUtilsTest(unittest.TestCase):\n\n def testParseXml(self):\n xml_text = \"\"\"\"\"\"\n xml = xmppserver.ParseXml(xml_text)\n self.assertEqual(xml.toxml(), xml_text)\n\n def testCloneXml(self):\n xml = xmppserver.ParseXml('')\n xml_clone = xmppserver.CloneXml(xml)\n xml_clone.setAttribute('bar', 'baz')\n self.assertEqual(xml, xml)\n self.assertEqual(xml_clone, xml_clone)\n self.assertNotEqual(xml, xml_clone)\n\n def testCloneXmlUnlink(self):\n xml_text = ''\n xml = xmppserver.ParseXml(xml_text)\n xml_clone = xmppserver.CloneXml(xml)\n xml.unlink()\n self.assertEqual(xml.parentNode, None)\n self.assertNotEqual(xml_clone.parentNode, None)\n self.assertEqual(xml_clone.toxml(), xml_text)\n\nclass StanzaParserTest(unittest.TestCase):\n\n def setUp(self):\n self.stanzas = []\n\n def FeedStanza(self, stanza):\n # We can't append stanza directly because it is unlinked after\n # this callback.\n self.stanzas.append(stanza.toxml())\n\n def testBasic(self):\n parser = xmppserver.StanzaParser(self)\n parser.FeedString('')\n self.assertEqual(self.stanzas[0], '')\n self.assertEqual(self.stanzas[1], '')\n\n def testStream(self):\n parser = xmppserver.StanzaParser(self)\n parser.FeedString('')\n self.assertEqual(self.stanzas[0],\n '')\n\n def testNested(self):\n parser = xmppserver.StanzaParser(self)\n parser.FeedString('meh')\n self.assertEqual(self.stanzas[0],\n 'meh')\n\n\nclass JidTest(unittest.TestCase):\n\n def testBasic(self):\n jid = xmppserver.Jid('foo', 'bar.com')\n self.assertEqual(str(jid), 'foo@bar.com')\n\n def testResource(self):\n jid = xmppserver.Jid('foo', 'bar.com', 'resource')\n self.assertEqual(str(jid), 'foo@bar.com/resource')\n\n def testGetBareJid(self):\n jid = xmppserver.Jid('foo', 'bar.com', 'resource')\n self.assertEqual(str(jid.GetBareJid()), 'foo@bar.com')\n\n\nclass IdGeneratorTest(unittest.TestCase):\n\n def testBasic(self):\n id_generator = xmppserver.IdGenerator('foo')\n for i in xrange(0, 100):\n self.assertEqual('foo.%d' % i, id_generator.GetNextId())\n\n\nclass HandshakeTaskTest(unittest.TestCase):\n\n def setUp(self):\n self.Reset()\n\n def Reset(self):\n self.data_received = 0\n self.handshake_done = False\n self.jid = None\n\n def SendData(self, _):\n self.data_received += 1\n\n def SendStanza(self, _, unused=True):\n self.data_received += 1\n\n def HandshakeDone(self, jid):\n self.handshake_done = True\n self.jid = jid\n\n def DoHandshake(self, resource_prefix, resource, username,\n initial_stream_domain, auth_domain, auth_stream_domain):\n self.Reset()\n handshake_task = (\n xmppserver.HandshakeTask(self, resource_prefix, True))\n stream_xml = xmppserver.ParseXml('')\n stream_xml.setAttribute('to', initial_stream_domain)\n self.assertEqual(self.data_received, 0)\n handshake_task.FeedStanza(stream_xml)\n self.assertEqual(self.data_received, 2)\n\n if auth_domain:\n username_domain = '%s@%s' % (username, auth_domain)\n else:\n username_domain = username\n auth_string = base64.b64encode('\\0%s\\0bar' % username_domain)\n auth_xml = xmppserver.ParseXml('%s'% auth_string)\n handshake_task.FeedStanza(auth_xml)\n self.assertEqual(self.data_received, 3)\n\n stream_xml = xmppserver.ParseXml('')\n stream_xml.setAttribute('to', auth_stream_domain)\n handshake_task.FeedStanza(stream_xml)\n self.assertEqual(self.data_received, 5)\n\n bind_xml = xmppserver.ParseXml(\n '%s' % resource)\n handshake_task.FeedStanza(bind_xml)\n self.assertEqual(self.data_received, 6)\n\n self.assertFalse(self.handshake_done)\n\n session_xml = xmppserver.ParseXml(\n '')\n handshake_task.FeedStanza(session_xml)\n self.assertEqual(self.data_received, 7)\n\n self.assertTrue(self.handshake_done)\n\n self.assertEqual(self.jid.username, username)\n self.assertEqual(self.jid.domain,\n auth_stream_domain or auth_domain or\n initial_stream_domain)\n self.assertEqual(self.jid.resource,\n '%s.%s' % (resource_prefix, resource))\n\n handshake_task.FeedStanza('')\n self.assertEqual(self.data_received, 7)\n\n def DoHandshakeUnauthenticated(self, resource_prefix, resource, username,\n initial_stream_domain):\n self.Reset()\n handshake_task = (\n xmppserver.HandshakeTask(self, resource_prefix, False))\n stream_xml = xmppserver.ParseXml('')\n stream_xml.setAttribute('to', initial_stream_domain)\n self.assertEqual(self.data_received, 0)\n handshake_task.FeedStanza(stream_xml)\n self.assertEqual(self.data_received, 2)\n\n self.assertFalse(self.handshake_done)\n\n auth_string = base64.b64encode('\\0%s\\0bar' % username)\n auth_xml = xmppserver.ParseXml('%s'% auth_string)\n handshake_task.FeedStanza(auth_xml)\n self.assertEqual(self.data_received, 3)\n\n self.assertTrue(self.handshake_done)\n\n self.assertEqual(self.jid, None)\n\n handshake_task.FeedStanza('')\n self.assertEqual(self.data_received, 3)\n\n def testBasic(self):\n self.DoHandshake('resource_prefix', 'resource',\n 'foo', 'bar.com', 'baz.com', 'quux.com')\n\n def testDomainBehavior(self):\n self.DoHandshake('resource_prefix', 'resource',\n 'foo', 'bar.com', 'baz.com', 'quux.com')\n self.DoHandshake('resource_prefix', 'resource',\n 'foo', 'bar.com', 'baz.com', '')\n self.DoHandshake('resource_prefix', 'resource',\n 'foo', 'bar.com', '', '')\n self.DoHandshake('resource_prefix', 'resource',\n 'foo', '', '', '')\n\n def testBasicUnauthenticated(self):\n self.DoHandshakeUnauthenticated('resource_prefix', 'resource',\n 'foo', 'bar.com')\n\n\nclass FakeSocket(object):\n \"\"\"A fake socket object used for testing.\n \"\"\"\n\n def __init__(self):\n self._sent_data = []\n\n def GetSentData(self):\n return self._sent_data\n\n # socket-like methods.\n def fileno(self):\n return 0\n\n def setblocking(self, int):\n pass\n\n def getpeername(self):\n return ('', 0)\n\n def send(self, data):\n self._sent_data.append(data)\n pass\n\n def close(self):\n pass\n\n\nclass XmppConnectionTest(unittest.TestCase):\n\n def setUp(self):\n self.connections = set()\n self.fake_socket = FakeSocket()\n\n # XmppConnection delegate methods.\n def OnXmppHandshakeDone(self, xmpp_connection):\n self.connections.add(xmpp_connection)\n\n def OnXmppConnectionClosed(self, xmpp_connection):\n self.connections.discard(xmpp_connection)\n\n def ForwardNotification(self, unused_xmpp_connection, notification_stanza):\n for connection in self.connections:\n connection.ForwardNotification(notification_stanza)\n\n def testBasic(self):\n socket_map = {}\n xmpp_connection = xmppserver.XmppConnection(\n self.fake_socket, socket_map, self, ('', 0), True)\n self.assertEqual(len(socket_map), 1)\n self.assertEqual(len(self.connections), 0)\n xmpp_connection.HandshakeDone(xmppserver.Jid('foo', 'bar'))\n self.assertEqual(len(socket_map), 1)\n self.assertEqual(len(self.connections), 1)\n\n sent_data = self.fake_socket.GetSentData()\n\n # Test subscription request.\n self.assertEqual(len(sent_data), 0)\n xmpp_connection.collect_incoming_data(\n '')\n self.assertEqual(len(sent_data), 1)\n\n # Test acks.\n xmpp_connection.collect_incoming_data('')\n self.assertEqual(len(sent_data), 1)\n\n # Test notification.\n xmpp_connection.collect_incoming_data(\n '')\n self.assertEqual(len(sent_data), 2)\n\n # Test unexpected stanza.\n def SendUnexpectedStanza():\n xmpp_connection.collect_incoming_data('')\n self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)\n\n # Test unexpected notifier command.\n def SendUnexpectedNotifierCommand():\n xmpp_connection.collect_incoming_data(\n '')\n self.assertRaises(xmppserver.UnexpectedXml,\n SendUnexpectedNotifierCommand)\n\n # Test close.\n xmpp_connection.close()\n self.assertEqual(len(socket_map), 0)\n self.assertEqual(len(self.connections), 0)\n\n def testBasicUnauthenticated(self):\n socket_map = {}\n xmpp_connection = xmppserver.XmppConnection(\n self.fake_socket, socket_map, self, ('', 0), False)\n self.assertEqual(len(socket_map), 1)\n self.assertEqual(len(self.connections), 0)\n xmpp_connection.HandshakeDone(None)\n self.assertEqual(len(socket_map), 0)\n self.assertEqual(len(self.connections), 0)\n\n # Test unexpected stanza.\n def SendUnexpectedStanza():\n xmpp_connection.collect_incoming_data('')\n self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)\n\n # Test redundant close.\n xmpp_connection.close()\n self.assertEqual(len(socket_map), 0)\n self.assertEqual(len(self.connections), 0)\n\n\nclass FakeXmppServer(xmppserver.XmppServer):\n \"\"\"A fake XMPP server object used for testing.\n \"\"\"\n\n def __init__(self):\n self._socket_map = {}\n self._fake_sockets = set()\n self._next_jid_suffix = 1\n xmppserver.XmppServer.__init__(self, self._socket_map, ('', 0))\n\n def GetSocketMap(self):\n return self._socket_map\n\n def GetFakeSockets(self):\n return self._fake_sockets\n\n def AddHandshakeCompletedConnection(self):\n \"\"\"Creates a new XMPP connection and completes its handshake.\n \"\"\"\n xmpp_connection = self.handle_accept()\n jid = xmppserver.Jid('user%s' % self._next_jid_suffix, 'domain.com')\n self._next_jid_suffix += 1\n xmpp_connection.HandshakeDone(jid)\n\n # XmppServer overrides.\n def accept(self):\n fake_socket = FakeSocket()\n self._fake_sockets.add(fake_socket)\n return (fake_socket, ('', 0))\n\n def close(self):\n self._fake_sockets.clear()\n xmppserver.XmppServer.close(self)\n\n\nclass XmppServerTest(unittest.TestCase):\n\n def setUp(self):\n self.xmpp_server = FakeXmppServer()\n\n def AssertSentDataLength(self, expected_length):\n for fake_socket in self.xmpp_server.GetFakeSockets():\n self.assertEqual(len(fake_socket.GetSentData()), expected_length)\n\n def testBasic(self):\n socket_map = self.xmpp_server.GetSocketMap()\n self.assertEqual(len(socket_map), 1)\n self.xmpp_server.AddHandshakeCompletedConnection()\n self.assertEqual(len(socket_map), 2)\n self.xmpp_server.close()\n self.assertEqual(len(socket_map), 0)\n\n def testMakeNotification(self):\n notification = self.xmpp_server.MakeNotification('channel', 'data')\n expected_xml = (\n ''\n ' '\n ' %s'\n ' '\n '' % base64.b64encode('data'))\n self.assertEqual(notification.toxml(), expected_xml)\n\n def testSendNotification(self):\n # Add a few connections.\n for _ in xrange(0, 7):\n self.xmpp_server.AddHandshakeCompletedConnection()\n\n self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 7)\n\n self.AssertSentDataLength(0)\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(1)\n\n def testEnableDisableNotifications(self):\n # Add a few connections.\n for _ in xrange(0, 5):\n self.xmpp_server.AddHandshakeCompletedConnection()\n\n self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 5)\n\n self.AssertSentDataLength(0)\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(1)\n\n self.xmpp_server.EnableNotifications()\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(2)\n\n self.xmpp_server.DisableNotifications()\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(2)\n\n self.xmpp_server.DisableNotifications()\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(2)\n\n self.xmpp_server.EnableNotifications()\n self.xmpp_server.SendNotification('channel', 'data')\n self.AssertSentDataLength(3)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "repo_name": "kiwibrowser/src", "sub_path": "components/sync/tools/testserver/xmppserver_test.py", "file_name": "xmppserver_test.py", "file_ext": "py", "file_size_in_byte": 12936, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2475, "dataset": "github-code", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "xmppserver.ParseXml", "line_number": 10, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 14, "usage_type": "call"}, {"api_name": "xmppserver.CloneXml", "line_number": 15, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 23, "usage_type": "call"}, {"api_name": "xmppserver.CloneXml", "line_number": 24, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 30, "usage_type": "attribute"}, {"api_name": "xmppserver.StanzaParser", "line_number": 41, "usage_type": "call"}, {"api_name": "xmppserver.StanzaParser", "line_number": 49, "usage_type": "call"}, {"api_name": "xmppserver.StanzaParser", "line_number": 57, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 66, "usage_type": "attribute"}, {"api_name": "xmppserver.Jid", "line_number": 69, "usage_type": "call"}, {"api_name": "xmppserver.Jid", "line_number": 73, "usage_type": "call"}, {"api_name": "xmppserver.Jid", "line_number": 77, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 81, "usage_type": "attribute"}, {"api_name": "xmppserver.IdGenerator", "line_number": 84, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 89, "usage_type": "attribute"}, {"api_name": "xmppserver.HandshakeTask", "line_number": 113, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 114, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 124, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 125, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 129, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 134, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 141, "usage_type": "call"}, {"api_name": "xmppserver.HandshakeTask", "line_number": 162, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 163, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 171, "usage_type": "call"}, {"api_name": "xmppserver.ParseXml", "line_number": 172, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 230, "usage_type": "attribute"}, {"api_name": "xmppserver.XmppConnection", "line_number": 249, "usage_type": "call"}, {"api_name": "xmppserver.Jid", "line_number": 253, "usage_type": "call"}, {"api_name": "xmppserver.UnexpectedXml", "line_number": 277, "usage_type": "attribute"}, {"api_name": "xmppserver.UnexpectedXml", "line_number": 283, "usage_type": "attribute"}, {"api_name": "xmppserver.XmppConnection", "line_number": 293, "usage_type": "call"}, {"api_name": "xmppserver.UnexpectedXml", "line_number": 304, "usage_type": "attribute"}, {"api_name": "xmppserver.XmppServer", "line_number": 312, "usage_type": "attribute"}, {"api_name": "xmppserver.XmppServer.__init__", "line_number": 320, "usage_type": "call"}, {"api_name": "xmppserver.XmppServer", "line_number": 320, "usage_type": "attribute"}, {"api_name": "xmppserver.Jid", "line_number": 332, "usage_type": "call"}, {"api_name": "xmppserver.XmppServer.close", "line_number": 344, "usage_type": "call"}, {"api_name": "xmppserver.XmppServer", "line_number": 344, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 347, "usage_type": "attribute"}, {"api_name": "base64.b64encode", "line_number": 371, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 414, "usage_type": "call"}]} +{"seq_id": "32677957197", "text": "import numpy as np\nimport random\nimport json\nimport argparse\nimport signal\nimport math\nfrom keras.layers import Input, Dense, Convolution2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, Dropout, Flatten, \\\n merge, Activation\nfrom keras.models import Model\nfrom keras.regularizers import l2\nfrom keras.optimizers import SGD\nfrom googlenet_custom_layers import PoolHelper, LRN\n\n\nclass DataGenerator:\n def __init__(self, input_data_path, batch_size=128, test_data_percent=0.1, background_signal_equivalent=True):\n print(\"Load Data From %s.\" % input_data_path)\n f = open(input_data_path, \"r\")\n data = json.loads(f.read())\n self._signal = data['signal']\n self._background = data['background']\n self._max_energy = None\n self._data = []\n if not background_signal_equivalent:\n print(\"Load %s signal, %s background\" % (len(self._signal), len(self._background)))\n for s in self._signal:\n self._data.append((s, 0))\n for b in self._background:\n self._data.append((b, 1))\n else:\n size = min(len(self._signal), len(self._background))\n print(\"Load %s signal, %s background\" % (size, size))\n for i in range(0, size):\n self._data.append((self._signal[i], 0))\n for i in range(0, size):\n self._data.append((self._background[i], 1))\n print(\"Totally load %s data.\" % len(self._data))\n random.shuffle(self._data)\n split_at = int(math.floor(len(self._data) * (1 - test_data_percent)))\n self._train_data = self._data[:split_at]\n self._test_data = self._data[split_at:]\n self._batch_size = batch_size\n self._train_pointer = 0\n self._test_pointer = 0\n\n def set_max_energy(self, max_energy):\n self._max_energy = max_energy\n\n def get_train_size(self):\n return len(self._train_data)\n\n def get_test_size(self):\n return len(self._test_data)\n\n def get_batch_size(self):\n return self._batch_size\n\n def _convert_row(self, input_row):\n row = np.zeros((3, 224, 224))\n cluster_xy_data = input_row[0]\n for pixel, energy in cluster_xy_data.items():\n location = pixel.split(\":\")\n location_x = int(location[0]) * 4\n location_y = int(location[1]) * 4\n for i in range(0, 4):\n for j in range(0, 4):\n _location_x = location_x + 224 / 2 + i\n _location_y = location_y + 224 / 2 + j\n if not (0 <= _location_x < 224 and 0 <= _location_y < 224):\n continue\n if self._max_energy:\n row[0, _location_x, _location_y] = min(int(math.floor(energy / self._max_energy * 256)), 255)\n else:\n row[0, _location_x, _location_y] = min(int(math.floor(energy / input_row[2] * 256)), 255)\n cluster_zy_data = input_row[1]\n for pixel, energy in cluster_zy_data.items():\n location = pixel.split(\":\")\n location_z = int(location[0]) * 4\n location_y = int(location[1]) * 4\n for i in range(0, 4):\n for j in range(0, 4):\n _location_z = location_z + 224 / 2 + i\n _location_y = location_y + 224 / 2 + j\n if not (0 <= _location_z < 224 and 0 <= _location_y < 224):\n continue\n if self._max_energy:\n row[1, _location_z, _location_y] = min(int(math.floor(energy / self._max_energy * 256)), 255)\n else:\n row[1, _location_z, _location_y] = min(int(math.floor(energy / input_row[2] * 256)), 255)\n return row\n\n def train_generator(self):\n while True:\n start = self._train_pointer\n end = self._train_pointer + self._batch_size\n if end >= len(self._train_data):\n end = len(self._train_data)\n self._train_pointer = 0\n else:\n self._train_pointer = end\n data = self._train_data[start:end]\n count = len(data)\n result_x = np.zeros((count, 3, 224, 224), dtype='float32')\n result_y = np.zeros((count, 1000))\n for i, row in enumerate(data):\n result_x[i] = self._convert_row(row[0])\n result_y[i][row[1]] = 1\n yield result_x, [result_y, result_y, result_y]\n\n def test_generator(self):\n while True:\n start = self._test_pointer\n end = self._test_pointer + self._batch_size\n if end >= len(self._test_data):\n end = len(self._test_data)\n self._test_pointer = 0\n else:\n self._test_pointer = end\n data = self._test_data[start:end]\n count = len(data)\n result_x = np.zeros((count, 3, 224, 224), dtype='float32')\n result_y = np.zeros((count, 1000))\n for i, row in enumerate(data):\n result_x[i] = self._convert_row(row[0])\n result_y[i][row[1]] = 1\n yield result_x, [result_y, result_y, result_y]\n\n def get_some_test(self, size):\n result_x = np.zeros((size, 3, 224, 224), dtype='float32')\n result_y = np.zeros((size, 1000))\n for i in range(0, size):\n row = random.choice(self._test_data)\n result_x[i] = self._convert_row(row[0])\n result_y[i][row[1]] = 1\n return result_x, result_y\n\n\nclass TestDataGenerator:\n def __init__(self, count=100000, batch_size=128, test_data_percent=0.1):\n self._data = []\n self._batch_size = batch_size\n self.test_data_percent = test_data_percent\n self._type = [\"circle\", \"square\"]\n self._train_pointer = 0\n self._test_pointer = 0\n for i in range(0, count):\n type_ = random.choice(self._type)\n x = random.randint(0, 223)\n y = random.randint(0, 223)\n r = random.randint(0, 50)\n self._data.append((type_, x, y, r))\n random.shuffle(self._data)\n split_at = int(math.floor(len(self._data) * (1 - test_data_percent)))\n self._train_data = self._data[:split_at]\n self._test_data = self._data[split_at:]\n\n def get_train_size(self):\n return len(self._train_data)\n\n def get_test_size(self):\n return len(self._test_data)\n\n def _convert_row(self, input_row):\n t_, x_, y_, r_ = input_row\n row = np.zeros((3, 224, 224))\n if t_ == \"circle\":\n for i in range(0, 224):\n for j in range(0, 224):\n if math.sqrt((i - x_) * (i - x_) + (j - y_) * (j - y_)) < r_:\n row[0, i, j] = 255\n elif t_ == \"square\":\n for i in range(0, 224):\n for j in range(0, 224):\n if abs(i - x_) < r_ and abs(j - y_) < r_:\n row[0, i, j] = 255\n return row\n\n def train_generator(self):\n while True:\n start = self._train_pointer\n end = self._train_pointer + self._batch_size\n if end >= len(self._train_data):\n end = len(self._train_data)\n self._train_pointer = 0\n else:\n self._train_pointer = end\n data = self._train_data[start:end]\n count = len(data)\n result_x = np.zeros((count, 3, 224, 224), dtype='float32')\n result_y = np.zeros((count, 1000))\n for i, row in enumerate(data):\n result_x[i] = self._convert_row(row)\n if row[0] == \"circle\":\n result_y[i][0] = 1\n else:\n result_y[i][1] = 1\n yield result_x, [result_y, result_y, result_y]\n\n def test_generator(self):\n while True:\n start = self._test_pointer\n end = self._test_pointer + self._batch_size\n if end >= len(self._test_data):\n end = len(self._test_data)\n self._test_pointer = 0\n else:\n self._test_pointer = end\n data = self._test_data[start:end]\n count = len(data)\n result_x = np.zeros((count, 3, 224, 224), dtype='float32')\n result_y = np.zeros((count, 1000))\n for i, row in enumerate(data):\n result_x[i] = self._convert_row(row)\n if row[0] == \"circle\":\n result_y[i][0] = 1\n else:\n result_y[i][1] = 1\n yield result_x, [result_y, result_y, result_y]\n\n def get_some_test(self, size):\n result_x = np.zeros((size, 3, 224, 224), dtype='float32')\n result_y = np.zeros((size, 1000))\n for i in range(0, size):\n row = random.choice(self._test_data)\n result_x[i] = self._convert_row(row)\n if row[0] == \"circle\":\n result_y[i][0] = 1\n else:\n result_y[i][1] = 1\n return result_x, result_y\n\n\nclass Train:\n def __init__(self):\n self.google_net = None\n self.exit_signal = False\n self.thread = None\n self.data_generator = None\n\n def set_data(self, input_data):\n # self.data_generator = DataGenerator(input_data)\n self.data_generator = TestDataGenerator()\n\n def create_googlenet(self, weights_path=None):\n # creates GoogLeNet a.k.a. Inception v1 (Szegedy, 2015)\n\n input = Input(shape=(3, 224, 224))\n\n conv1_7x7_s2 = Convolution2D(64, 7, 7, subsample=(2, 2), border_mode='same', activation='relu',\n name='conv1/7x7_s2',\n W_regularizer=l2(0.0002))(input)\n\n conv1_zero_pad = ZeroPadding2D(padding=(1, 1))(conv1_7x7_s2)\n\n pool1_helper = PoolHelper()(conv1_zero_pad)\n\n pool1_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid', name='pool1/3x3_s2')(\n pool1_helper)\n\n pool1_norm1 = LRN(name='pool1/norm1')(pool1_3x3_s2)\n\n conv2_3x3_reduce = Convolution2D(64, 1, 1, border_mode='same', activation='relu', name='conv2/3x3_reduce',\n W_regularizer=l2(0.0002))(pool1_norm1)\n\n conv2_3x3 = Convolution2D(192, 3, 3, border_mode='same', activation='relu', name='conv2/3x3',\n W_regularizer=l2(0.0002))(conv2_3x3_reduce)\n\n conv2_norm2 = LRN(name='conv2/norm2')(conv2_3x3)\n\n conv2_zero_pad = ZeroPadding2D(padding=(1, 1))(conv2_norm2)\n\n pool2_helper = PoolHelper()(conv2_zero_pad)\n\n pool2_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid', name='pool2/3x3_s2')(\n pool2_helper)\n\n inception_3a_1x1 = Convolution2D(64, 1, 1, border_mode='same', activation='relu', name='inception_3a/1x1',\n W_regularizer=l2(0.0002))(pool2_3x3_s2)\n\n inception_3a_3x3_reduce = Convolution2D(96, 1, 1, border_mode='same', activation='relu',\n name='inception_3a/3x3_reduce', W_regularizer=l2(0.0002))(pool2_3x3_s2)\n\n inception_3a_3x3 = Convolution2D(128, 3, 3, border_mode='same', activation='relu', name='inception_3a/3x3',\n W_regularizer=l2(0.0002))(inception_3a_3x3_reduce)\n\n inception_3a_5x5_reduce = Convolution2D(16, 1, 1, border_mode='same', activation='relu',\n name='inception_3a/5x5_reduce', W_regularizer=l2(0.0002))(pool2_3x3_s2)\n\n inception_3a_5x5 = Convolution2D(32, 5, 5, border_mode='same', activation='relu', name='inception_3a/5x5',\n W_regularizer=l2(0.0002))(inception_3a_5x5_reduce)\n\n inception_3a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_3a/pool')(\n pool2_3x3_s2)\n\n inception_3a_pool_proj = Convolution2D(32, 1, 1, border_mode='same', activation='relu',\n name='inception_3a/pool_proj', W_regularizer=l2(0.0002))(\n inception_3a_pool)\n\n inception_3a_output = merge([inception_3a_1x1, inception_3a_3x3, inception_3a_5x5, inception_3a_pool_proj],\n mode='concat', concat_axis=1, name='inception_3a/output')\n\n inception_3b_1x1 = Convolution2D(128, 1, 1, border_mode='same', activation='relu', name='inception_3b/1x1',\n W_regularizer=l2(0.0002))(inception_3a_output)\n\n inception_3b_3x3_reduce = Convolution2D(128, 1, 1, border_mode='same', activation='relu',\n name='inception_3b/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_3a_output)\n\n inception_3b_3x3 = Convolution2D(192, 3, 3, border_mode='same', activation='relu', name='inception_3b/3x3',\n W_regularizer=l2(0.0002))(inception_3b_3x3_reduce)\n\n inception_3b_5x5_reduce = Convolution2D(32, 1, 1, border_mode='same', activation='relu',\n name='inception_3b/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_3a_output)\n\n inception_3b_5x5 = Convolution2D(96, 5, 5, border_mode='same', activation='relu', name='inception_3b/5x5',\n W_regularizer=l2(0.0002))(inception_3b_5x5_reduce)\n\n inception_3b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_3b/pool')(\n inception_3a_output)\n\n inception_3b_pool_proj = Convolution2D(64, 1, 1, border_mode='same', activation='relu',\n name='inception_3b/pool_proj', W_regularizer=l2(0.0002))(\n inception_3b_pool)\n\n inception_3b_output = merge([inception_3b_1x1, inception_3b_3x3, inception_3b_5x5, inception_3b_pool_proj],\n mode='concat', concat_axis=1, name='inception_3b/output')\n\n inception_3b_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_3b_output)\n\n pool3_helper = PoolHelper()(inception_3b_output_zero_pad)\n\n pool3_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid', name='pool3/3x3_s2')(\n pool3_helper)\n\n inception_4a_1x1 = Convolution2D(192, 1, 1, border_mode='same', activation='relu', name='inception_4a/1x1',\n W_regularizer=l2(0.0002))(pool3_3x3_s2)\n\n inception_4a_3x3_reduce = Convolution2D(96, 1, 1, border_mode='same', activation='relu',\n name='inception_4a/3x3_reduce', W_regularizer=l2(0.0002))(pool3_3x3_s2)\n\n inception_4a_3x3 = Convolution2D(208, 3, 3, border_mode='same', activation='relu', name='inception_4a/3x3',\n W_regularizer=l2(0.0002))(inception_4a_3x3_reduce)\n\n inception_4a_5x5_reduce = Convolution2D(16, 1, 1, border_mode='same', activation='relu',\n name='inception_4a/5x5_reduce', W_regularizer=l2(0.0002))(pool3_3x3_s2)\n\n inception_4a_5x5 = Convolution2D(48, 5, 5, border_mode='same', activation='relu', name='inception_4a/5x5',\n W_regularizer=l2(0.0002))(inception_4a_5x5_reduce)\n\n inception_4a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_4a/pool')(\n pool3_3x3_s2)\n\n inception_4a_pool_proj = Convolution2D(64, 1, 1, border_mode='same', activation='relu',\n name='inception_4a/pool_proj', W_regularizer=l2(0.0002))(\n inception_4a_pool)\n\n inception_4a_output = merge([inception_4a_1x1, inception_4a_3x3, inception_4a_5x5, inception_4a_pool_proj],\n mode='concat', concat_axis=1, name='inception_4a/output')\n\n loss1_ave_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), name='loss1/ave_pool')(inception_4a_output)\n\n loss1_conv = Convolution2D(128, 1, 1, border_mode='same', activation='relu', name='loss1/conv',\n W_regularizer=l2(0.0002))(loss1_ave_pool)\n\n loss1_flat = Flatten()(loss1_conv)\n\n loss1_fc = Dense(1024, activation='relu', name='loss1/fc', W_regularizer=l2(0.0002))(loss1_flat)\n\n loss1_drop_fc = Dropout(0.7)(loss1_fc)\n\n loss1_classifier = Dense(1000, name='loss1/classifier', W_regularizer=l2(0.0002))(loss1_drop_fc)\n\n loss1_classifier_act = Activation('softmax')(loss1_classifier)\n\n inception_4b_1x1 = Convolution2D(160, 1, 1, border_mode='same', activation='relu', name='inception_4b/1x1',\n W_regularizer=l2(0.0002))(inception_4a_output)\n\n inception_4b_3x3_reduce = Convolution2D(112, 1, 1, border_mode='same', activation='relu',\n name='inception_4b/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_4a_output)\n\n inception_4b_3x3 = Convolution2D(224, 3, 3, border_mode='same', activation='relu', name='inception_4b/3x3',\n W_regularizer=l2(0.0002))(inception_4b_3x3_reduce)\n\n inception_4b_5x5_reduce = Convolution2D(24, 1, 1, border_mode='same', activation='relu',\n name='inception_4b/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_4a_output)\n\n inception_4b_5x5 = Convolution2D(64, 5, 5, border_mode='same', activation='relu', name='inception_4b/5x5',\n W_regularizer=l2(0.0002))(inception_4b_5x5_reduce)\n\n inception_4b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_4b/pool')(\n inception_4a_output)\n\n inception_4b_pool_proj = Convolution2D(64, 1, 1, border_mode='same', activation='relu',\n name='inception_4b/pool_proj', W_regularizer=l2(0.0002))(\n inception_4b_pool)\n\n inception_4b_output = merge([inception_4b_1x1, inception_4b_3x3, inception_4b_5x5, inception_4b_pool_proj],\n mode='concat', concat_axis=1, name='inception_4b_output')\n\n inception_4c_1x1 = Convolution2D(128, 1, 1, border_mode='same', activation='relu', name='inception_4c/1x1',\n W_regularizer=l2(0.0002))(inception_4b_output)\n\n inception_4c_3x3_reduce = Convolution2D(128, 1, 1, border_mode='same', activation='relu',\n name='inception_4c/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_4b_output)\n\n inception_4c_3x3 = Convolution2D(256, 3, 3, border_mode='same', activation='relu', name='inception_4c/3x3',\n W_regularizer=l2(0.0002))(inception_4c_3x3_reduce)\n\n inception_4c_5x5_reduce = Convolution2D(24, 1, 1, border_mode='same', activation='relu',\n name='inception_4c/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_4b_output)\n\n inception_4c_5x5 = Convolution2D(64, 5, 5, border_mode='same', activation='relu', name='inception_4c/5x5',\n W_regularizer=l2(0.0002))(inception_4c_5x5_reduce)\n\n inception_4c_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_4c/pool')(\n inception_4b_output)\n\n inception_4c_pool_proj = Convolution2D(64, 1, 1, border_mode='same', activation='relu',\n name='inception_4c/pool_proj', W_regularizer=l2(0.0002))(\n inception_4c_pool)\n\n inception_4c_output = merge([inception_4c_1x1, inception_4c_3x3, inception_4c_5x5, inception_4c_pool_proj],\n mode='concat', concat_axis=1, name='inception_4c/output')\n\n inception_4d_1x1 = Convolution2D(112, 1, 1, border_mode='same', activation='relu', name='inception_4d/1x1',\n W_regularizer=l2(0.0002))(inception_4c_output)\n\n inception_4d_3x3_reduce = Convolution2D(144, 1, 1, border_mode='same', activation='relu',\n name='inception_4d/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_4c_output)\n\n inception_4d_3x3 = Convolution2D(288, 3, 3, border_mode='same', activation='relu', name='inception_4d/3x3',\n W_regularizer=l2(0.0002))(inception_4d_3x3_reduce)\n\n inception_4d_5x5_reduce = Convolution2D(32, 1, 1, border_mode='same', activation='relu',\n name='inception_4d/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_4c_output)\n\n inception_4d_5x5 = Convolution2D(64, 5, 5, border_mode='same', activation='relu', name='inception_4d/5x5',\n W_regularizer=l2(0.0002))(inception_4d_5x5_reduce)\n\n inception_4d_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_4d/pool')(\n inception_4c_output)\n\n inception_4d_pool_proj = Convolution2D(64, 1, 1, border_mode='same', activation='relu',\n name='inception_4d/pool_proj', W_regularizer=l2(0.0002))(\n inception_4d_pool)\n\n inception_4d_output = merge([inception_4d_1x1, inception_4d_3x3, inception_4d_5x5, inception_4d_pool_proj],\n mode='concat', concat_axis=1, name='inception_4d/output')\n\n loss2_ave_pool = AveragePooling2D(pool_size=(5, 5), strides=(3, 3), name='loss2/ave_pool')(inception_4d_output)\n\n loss2_conv = Convolution2D(128, 1, 1, border_mode='same', activation='relu', name='loss2/conv',\n W_regularizer=l2(0.0002))(loss2_ave_pool)\n\n loss2_flat = Flatten()(loss2_conv)\n\n loss2_fc = Dense(1024, activation='relu', name='loss2/fc', W_regularizer=l2(0.0002))(loss2_flat)\n\n loss2_drop_fc = Dropout(0.7)(loss2_fc)\n\n loss2_classifier = Dense(1000, name='loss2/classifier', W_regularizer=l2(0.0002))(loss2_drop_fc)\n\n loss2_classifier_act = Activation('softmax')(loss2_classifier)\n\n inception_4e_1x1 = Convolution2D(256, 1, 1, border_mode='same', activation='relu', name='inception_4e/1x1',\n W_regularizer=l2(0.0002))(inception_4d_output)\n\n inception_4e_3x3_reduce = Convolution2D(160, 1, 1, border_mode='same', activation='relu',\n name='inception_4e/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_4d_output)\n\n inception_4e_3x3 = Convolution2D(320, 3, 3, border_mode='same', activation='relu', name='inception_4e/3x3',\n W_regularizer=l2(0.0002))(inception_4e_3x3_reduce)\n\n inception_4e_5x5_reduce = Convolution2D(32, 1, 1, border_mode='same', activation='relu',\n name='inception_4e/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_4d_output)\n\n inception_4e_5x5 = Convolution2D(128, 5, 5, border_mode='same', activation='relu', name='inception_4e/5x5',\n W_regularizer=l2(0.0002))(inception_4e_5x5_reduce)\n\n inception_4e_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_4e/pool')(\n inception_4d_output)\n\n inception_4e_pool_proj = Convolution2D(128, 1, 1, border_mode='same', activation='relu',\n name='inception_4e/pool_proj', W_regularizer=l2(0.0002))(\n inception_4e_pool)\n\n inception_4e_output = merge([inception_4e_1x1, inception_4e_3x3, inception_4e_5x5, inception_4e_pool_proj],\n mode='concat', concat_axis=1, name='inception_4e/output')\n\n inception_4e_output_zero_pad = ZeroPadding2D(padding=(1, 1))(inception_4e_output)\n\n pool4_helper = PoolHelper()(inception_4e_output_zero_pad)\n\n pool4_3x3_s2 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), border_mode='valid', name='pool4/3x3_s2')(\n pool4_helper)\n\n inception_5a_1x1 = Convolution2D(256, 1, 1, border_mode='same', activation='relu', name='inception_5a/1x1',\n W_regularizer=l2(0.0002))(pool4_3x3_s2)\n\n inception_5a_3x3_reduce = Convolution2D(160, 1, 1, border_mode='same', activation='relu',\n name='inception_5a/3x3_reduce', W_regularizer=l2(0.0002))(pool4_3x3_s2)\n\n inception_5a_3x3 = Convolution2D(320, 3, 3, border_mode='same', activation='relu', name='inception_5a/3x3',\n W_regularizer=l2(0.0002))(inception_5a_3x3_reduce)\n\n inception_5a_5x5_reduce = Convolution2D(32, 1, 1, border_mode='same', activation='relu',\n name='inception_5a/5x5_reduce', W_regularizer=l2(0.0002))(pool4_3x3_s2)\n\n inception_5a_5x5 = Convolution2D(128, 5, 5, border_mode='same', activation='relu', name='inception_5a/5x5',\n W_regularizer=l2(0.0002))(inception_5a_5x5_reduce)\n\n inception_5a_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_5a/pool')(\n pool4_3x3_s2)\n\n inception_5a_pool_proj = Convolution2D(128, 1, 1, border_mode='same', activation='relu',\n name='inception_5a/pool_proj', W_regularizer=l2(0.0002))(\n inception_5a_pool)\n\n inception_5a_output = merge([inception_5a_1x1, inception_5a_3x3, inception_5a_5x5, inception_5a_pool_proj],\n mode='concat', concat_axis=1, name='inception_5a/output')\n\n inception_5b_1x1 = Convolution2D(384, 1, 1, border_mode='same', activation='relu', name='inception_5b/1x1',\n W_regularizer=l2(0.0002))(inception_5a_output)\n\n inception_5b_3x3_reduce = Convolution2D(192, 1, 1, border_mode='same', activation='relu',\n name='inception_5b/3x3_reduce', W_regularizer=l2(0.0002))(\n inception_5a_output)\n\n inception_5b_3x3 = Convolution2D(384, 3, 3, border_mode='same', activation='relu', name='inception_5b/3x3',\n W_regularizer=l2(0.0002))(inception_5b_3x3_reduce)\n\n inception_5b_5x5_reduce = Convolution2D(48, 1, 1, border_mode='same', activation='relu',\n name='inception_5b/5x5_reduce', W_regularizer=l2(0.0002))(\n inception_5a_output)\n\n inception_5b_5x5 = Convolution2D(128, 5, 5, border_mode='same', activation='relu', name='inception_5b/5x5',\n W_regularizer=l2(0.0002))(inception_5b_5x5_reduce)\n\n inception_5b_pool = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), border_mode='same',\n name='inception_5b/pool')(\n inception_5a_output)\n\n inception_5b_pool_proj = Convolution2D(128, 1, 1, border_mode='same', activation='relu',\n name='inception_5b/pool_proj', W_regularizer=l2(0.0002))(\n inception_5b_pool)\n\n inception_5b_output = merge([inception_5b_1x1, inception_5b_3x3, inception_5b_5x5, inception_5b_pool_proj],\n mode='concat', concat_axis=1, name='inception_5b/output')\n\n pool5_7x7_s1 = AveragePooling2D(pool_size=(7, 7), strides=(1, 1), name='pool5/7x7_s2')(inception_5b_output)\n\n loss3_flat = Flatten()(pool5_7x7_s1)\n\n pool5_drop_7x7_s1 = Dropout(0.4)(loss3_flat)\n\n loss3_classifier = Dense(1000, name='loss3/classifier', W_regularizer=l2(0.0002))(pool5_drop_7x7_s1)\n\n loss3_classifier_act = Activation('softmax', name='prob')(loss3_classifier)\n\n googlenet = Model(input=input, output=[loss1_classifier_act, loss2_classifier_act, loss3_classifier_act])\n\n if weights_path:\n googlenet.load_weights(weights_path)\n\n self.google_net = googlenet\n return googlenet\n\n def predict_googlenet(self, x):\n preds = self.google_net.predict(x)\n return [np.argmax(preds[0]), np.argmax(preds[1]), np.argmax(preds[2])]\n\n def test_googlenet(self, input_network):\n model = self.create_googlenet(input_network)\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy')\n if not self.data_generator:\n raise Exception(\"No data generator\")\n data_generator = self.data_generator\n print(\"Test Data\")\n signal_signal = 0\n signal_background = 0\n background_signal = 0\n background_background = 0\n for i, row in enumerate(data_generator._signal):\n if i >= 2500:\n break\n x = data_generator._convert_row(row)\n x = np.expand_dims(x, axis=0)\n preds = model.predict(x)\n print([np.argmax(preds[0]), np.argmax(preds[1]), np.argmax(preds[2])])\n result = np.argmax(preds[0])\n if result == 0:\n print(\"%s: s/s\" % i)\n signal_signal += 1\n else:\n print(\"%s: s/b\" % i)\n signal_background += 1\n for i, row in enumerate(data_generator._background):\n if i >= 2500:\n break\n x = data_generator._convert_row(row)\n x = np.expand_dims(x, axis=0)\n preds = model.predict(x)\n print([np.argmax(preds[0]), np.argmax(preds[1]), np.argmax(preds[2])])\n result = np.argmax(preds[0])\n if result == 0:\n print(\"%s: b/s\" % i)\n background_signal += 1\n else:\n print(\"%s: b/b\" % i)\n background_background += 1\n print(\"\\tSignal\\tBackground\")\n print(\"Signal\\t%s\\t%s\" % (signal_signal, signal_background))\n print(\"Background\\t%s\\t%s\" % (background_signal, background_background))\n\n def train_googlenet(self, save_path, recovery=None):\n if recovery:\n model = self.create_googlenet(recovery)\n else:\n model = self.create_googlenet()\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy')\n\n if not self.data_generator:\n raise Exception(\"No data generator\")\n data = self.data_generator\n print(\"Start Train.\")\n for i in range(0, 10000):\n if self.exit_signal:\n break\n print(\"=\" * 64)\n print(\"Loop %s\" % i)\n model.fit_generator(generator=data.train_generator(), samples_per_epoch=data.get_train_size(), nb_epoch=1,\n validation_data=data.test_generator(), nb_val_samples=data.get_test_size(), verbose=1)\n score = model.evaluate_generator(generator=data.test_generator(), val_samples=data.get_test_size())\n print(score)\n # print some predict:\n for i in range(100):\n row_x, row_y = data.get_some_test(1)\n predict = self.predict_googlenet(row_x)\n print('Except', [np.argmax(row_y), np.argmax(row_y), np.argmax(row_y)])\n print('Answer', predict)\n print('---')\n model.save_weights(save_path)\n\n\nt = Train()\n\n\ndef signal_handler(signum, frame):\n print(\"Try to save train data. It may take a long time\")\n t.exit_signal = True\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--type\", choices=[\"train\", \"test\"])\n parser.add_argument(\"-i\", \"--input\", required=True)\n parser.add_argument(\"-s\", \"--save\", required=True)\n parser.add_argument(\"-m\", \"--max-energy\", type=float)\n parser.add_argument(\"-r\", \"--recovery\")\n args = parser.parse_args()\n t.set_data(args.input)\n if args.max_energy:\n t.data_generator.set_max_energy(args.max_energy)\n if args.type == \"train\":\n signal.signal(signal.SIGINT, signal_handler)\n if args.recovery:\n t.train_googlenet(args.save, args.recovery)\n else:\n t.train_googlenet(args.save)\n else:\n t.test_googlenet(args.save)\n", "repo_name": "Catofes/BambooKeras", "sub_path": "googlenet.py", "file_name": "googlenet.py", "file_ext": "py", "file_size_in_byte": 33258, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 38, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 72, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 74, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 87, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 130, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 132, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 147, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 148, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 149, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 150, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 152, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 165, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 222, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 224, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 247, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 249, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 251, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 253, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.PoolHelper", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 257, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.LRN", "line_number": 260, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 262, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 263, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 265, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 266, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.LRN", "line_number": 268, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 270, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.PoolHelper", "line_number": 272, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 274, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 277, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 278, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 280, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 281, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 283, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 284, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 286, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 287, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 289, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 290, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 292, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 296, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 297, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 300, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 303, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 304, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 306, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 307, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 310, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 311, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 313, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 314, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 317, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 318, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 320, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 324, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 325, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 328, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 331, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.PoolHelper", "line_number": 333, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 335, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 338, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 339, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 341, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 342, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 344, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 345, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 347, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 348, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 350, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 351, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 353, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 357, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 358, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 361, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 364, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 366, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 369, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 371, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 371, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 373, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 377, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 379, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 380, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 382, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 383, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 386, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 387, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 389, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 390, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 393, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 394, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 396, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 400, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 401, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 404, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 407, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 408, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 410, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 411, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 414, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 415, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 417, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 418, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 421, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 422, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 424, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 428, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 429, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 432, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 435, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 436, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 438, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 439, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 442, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 443, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 445, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 446, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 449, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 450, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 452, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 456, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 457, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 460, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 463, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 465, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 466, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 468, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 470, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 470, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 472, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 474, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 474, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 476, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 478, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 479, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 481, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 482, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 485, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 486, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 488, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 489, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 492, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 493, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 495, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 499, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 500, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 503, "usage_type": "call"}, {"api_name": "keras.layers.ZeroPadding2D", "line_number": 506, "usage_type": "call"}, {"api_name": "googlenet_custom_layers.PoolHelper", "line_number": 508, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 510, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 513, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 514, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 516, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 517, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 519, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 520, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 522, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 523, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 525, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 526, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 528, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 532, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 533, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 536, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 539, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 540, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 542, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 543, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 546, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 547, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 549, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 550, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 553, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 554, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling2D", "line_number": 556, "usage_type": "call"}, {"api_name": "keras.layers.Convolution2D", "line_number": 560, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 561, "usage_type": "call"}, {"api_name": "keras.layers.merge", "line_number": 564, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling2D", "line_number": 567, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 569, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 571, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 573, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 573, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 575, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 577, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 587, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 591, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 605, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 607, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 608, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 619, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 621, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 622, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 638, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 658, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 673, "usage_type": "call"}, {"api_name": "signal.signal", "line_number": 684, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 684, "usage_type": "attribute"}]} +{"seq_id": "19127867681", "text": "# Importing the necessary Python libraries\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport tmdbv3api\nfrom imdb import IMDb\nfrom omdb import OMDBClient\nfrom rotten_tomatoes_scraper.rt_scraper import MovieScraper\n\n\n\n## FEATURE ENGINEERING FUNCTIONS\n## ---------------------------------------------------------------------------------------------------------------------\ndef generate_movie_age(df):\n \"\"\"\n Generating a movie age relative to the current year and the year that the movie was released\n\n Args:\n - df (Pandas DataFrame): A DataFrame containing the raw data for which year the movie was released\n\n Returns:\n - df (Pandas DataFrame): A DataFrame containing newly engineered feature of relative \"year\"\n \"\"\"\n\n # Extracting current year\n currentYear = datetime.now().year\n\n # Engineering the \"year\" column to be a relative \"movie_age\" column based on number of years since original release\n for index, row in df.iterrows():\n year_released = row['year']\n movie_age = currentYear - year_released\n df.loc[index, 'movie_age'] = movie_age\n\n return df\n\n\n\ndef engineer_rt_critic_score(df):\n \"\"\"\n Feature engineering the Rotten Tomatoes critic score\n\n Args:\n - df (Pandas DataFrame): A DataFrame containing the raw data RT critic score\n\n Returns:\n - df (Pandas DataFrame): A DataFrame containing an updated version of RT critic score\n \"\"\"\n\n # Removing percentage sign from RT critic score\n for index, row in df.iterrows():\n if pd.notnull(row['rt_critic_score']):\n df.loc[index, 'rt_critic_score'] = int(row['rt_critic_score'][:2])\n\n # Filling rt_critic_score nulls with critic average of 59%\n df['rt_critic_score'].fillna(59, inplace = True)\n\n # Transforming RT critic score into an integer datatype\n df['rt_critic_score'] = df['rt_critic_score'].astype(int)\n\n return df\n\n\n\ndef handle_nulls_for_metascore(df):\n \"\"\"\n Handling the nulls associated to the metascore feature\n\n Args:\n - df (Pandas DataFrame): A DataFrame containing the raw data metascore feature\n\n Returns:\n - df (Pandas DataFrame): A DataFrame containing an updated version of the metascore\n \"\"\"\n\n # Filling metascore nulls with 50.0\n df['metascore'].fillna(50.0, inplace = True)\n\n return df\n\n\n\ndef handle_nulls_for_rt_audience_score(df):\n \"\"\"\n Handling the nulls associated to the RT audience score feature\n\n Args:\n - df (Pandas DataFrame): A DataFrame containing the raw data RT audience score feature\n\n Returns:\n - df (Pandas DataFrame): A DataFrame containing an updated version of the RT audience score\n \"\"\"\n\n # Filling rt_audience_score with audience average of 59%\n df['rt_audience_score'].fillna(59.0, inplace = True)\n\n return df\n\n\n\n## MODEL INFERENCE FUNCTIONS\n## ---------------------------------------------------------------------------------------------------------------------\ndef get_movie_prediction(movie_name, tmdb_key, omdb_key, binary_classification_pipeline, regression_pipeline):\n \"\"\"\n Getting the movie review prediction from the input data\n\n Args:\n - movie_name (str): A string containing the name of the movie to infer for predictions\n - tmdb_key (str): A string representing the API key to get data from the TMDb API\n - omdb_key (str): A string representing the API key to get data from the OMDb API\n - binary_classification_pipeline (obj): The model representing the binary classification pipeline to obtain the Biehn binary yes / no approval score\n - regression_pipeline (obj): The model representing the regression pipeline to obtain the Biehn Scale score\n\n Returns:\n - final_scores (dict): A dictionary containing the movie name and final scores\n \"\"\"\n\n # Defining which features to keep from each respective source\n TMDB_FEATS = ['tmdb_id', 'imdb_id', 'budget', 'primary_genre', 'secondary_genre',\n 'tmdb_popularity', 'revenue', 'runtime', 'tmdb_vote_average', 'tmdb_vote_count']\n IMDB_FEATS = ['imdb_rating', 'imdb_votes', 'year']\n OMDB_FEATS = ['rt_critic_score', 'metascore']\n ROTT_FEATS = ['rt_audience_score']\n ALL_FEATS = TMDB_FEATS + IMDB_FEATS + OMDB_FEATS + ROTT_FEATS\n\n # Instantiating the TMDb objects and setting the API key\n tmdb = tmdbv3api.TMDb()\n tmdb_search = tmdbv3api.Search()\n tmdb_movies = tmdbv3api.Movie()\n tmdb.api_key = tmdb_key\n\n # Instantiating the IMDbPY search object\n imdb_search = IMDb()\n\n # Instantiating the OMDb client\n omdb_client = OMDBClient(apikey = omdb_key)\n\n # Getting JSON from the body of the request and loading as Pandas DataFrame\n df = pd.DataFrame(data = [movie_name], columns = ['movie_name'])\n\n # Getting TMDb full search results\n tmdb_search_results = tmdb_search.movies({'query': movie_name})\n\n # Extracting tmdb_id if search results exist\n if len(tmdb_search_results) != 0:\n tmdb_id = tmdb_search_results[0]['id']\n else:\n print(f'Results not found for title: {movie_name}.')\n\n # Getting the details of the movie using the tmdb_id\n tmdb_details = dict(tmdb_movies.details(tmdb_id))\n\n # Adding tmdb_id to tmdb_details dictionary\n tmdb_details['tmdb_id'] = tmdb_id\n\n # Checking the length of TMDb genres to see if there is a secondary genre\n tmdb_genre_length = len(tmdb_details['genres'])\n\n # Separating the primary_genre from the 'genres' nested child dictionary if it exists\n if tmdb_genre_length == 0:\n tmdb_details['primary_genre'] = np.nan\n else:\n tmdb_details['primary_genre'] = tmdb_details['genres'][0]['name']\n\n # Separating the secondary_genre from the 'genres' nested child dictionary if it exists\n if tmdb_genre_length >= 2:\n tmdb_details['secondary_genre'] = tmdb_details['genres'][1]['name']\n else:\n tmdb_details['secondary_genre'] = np.nan\n\n # Renaming some TMDb columns appropriately\n tmdb_details['tmdb_popularity'] = tmdb_details.pop('popularity')\n tmdb_details['tmdb_vote_average'] = tmdb_details.pop('vote_average')\n tmdb_details['tmdb_vote_count'] = tmdb_details.pop('vote_count')\n\n # Adding the TMDb features to df\n for feat in TMDB_FEATS:\n df[feat] = tmdb_details[feat]\n\n # Getting imdb_id from TMDb output and removing unnecessary characters\n imdb_id = df['imdb_id'][0]\n imdb_id = imdb_id[2:]\n\n # Using IMDbPY to get movie details using the IMDb ID\n imdb_details = dict(imdb_search.get_movie(imdb_id))\n\n # Renaming the features appropriately\n imdb_details['imdb_rating'] = imdb_details.pop('rating')\n imdb_details['imdb_votes'] = imdb_details.pop('votes')\n\n # Adding the IMDb features to df\n for feat in IMDB_FEATS:\n df[feat] = imdb_details[feat]\n\n # Using the OMDb client to search for the movie results using the IMDb ID\n omdb_details = omdb_client.imdbid(df['imdb_id'][0])\n\n # Setting the Rotten Tomatoes critic score based on availability\n if len(omdb_details['ratings']) > 0:\n for rater in omdb_details['ratings']:\n if rater['source'] == 'Rotten Tomatoes':\n omdb_details['rt_critic_score'] = rater['value']\n else:\n omdb_details['rt_critic_score'] = np.nan\n\n # Adding the OMDb features to df\n for feat in OMDB_FEATS:\n df[feat] = omdb_details[feat]\n\n # Setting the Rotten Tomatoes audience score to be null if RT critic score is not present from OMDb output\n if str(df['rt_critic_score'][0]) == 'nan':\n rt_movie_details = {'rt_audience_score': np.nan}\n else:\n # Setting the Rotten Tomatoes audience score appropriately from the RT scraper object if present\n try:\n # Getting the movie metadata from the RT scraper\n movie_name = df['movie_name'][0]\n rt_movie_scraper = MovieScraper(movie_title = movie_name)\n rt_movie_scraper.extract_metadata()\n\n # Extracting the critic and audience scores from the metadata\n rt_critic_score = rt_movie_scraper.metadata['Score_Rotten']\n rt_audience_score = rt_movie_scraper.metadata['Score_Audience']\n\n # Comparing the rt_critic_score from the RT scraper to the OMDb output\n if rt_critic_score == df['rt_critic_score'][0][:2]:\n rt_movie_details = {'rt_audience_score': rt_audience_score}\n else:\n rt_movie_details = {'rt_audience_score': np.nan}\n\n # Setting the Rotten Tomatoes audience score to be null if RT critic score is not present from OMDb output\n except:\n rt_movie_details = {'rt_audience_score': np.nan}\n\n # Adding the ROTT features to df\n for feat in ROTT_FEATS:\n df[feat] = rt_movie_details[feat]\n\n # Getting the inference for the Biehn \"yes or no\" approval\n df['biehn_yes_or_no'] = binary_classification_pipeline.predict(df[ALL_FEATS])\n\n # Getting the inference for the Biehn Scale score\n df['biehn_scale_score'] = regression_pipeline.predict(df[ALL_FEATS])\n\n # Establishing final output as a dictionary\n final_scores = {'movie_name': df['movie_name'][0],\n 'biehn_yes_or_no': df['biehn_yes_or_no'][0],\n 'biehn_scale_score': df['biehn_scale_score'][0]\n }\n\n return final_scores", "repo_name": "dkhundley/movie-ratings-model", "sub_path": "src/model-inference-ui/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 9355, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "pandas.notnull", "line_number": 51, "usage_type": "call"}, {"api_name": "tmdbv3api.TMDb", "line_number": 126, "usage_type": "call"}, {"api_name": "tmdbv3api.Search", "line_number": 127, "usage_type": "call"}, {"api_name": "tmdbv3api.Movie", "line_number": 128, "usage_type": "call"}, {"api_name": "imdb.IMDb", "line_number": 132, "usage_type": "call"}, {"api_name": "omdb.OMDBClient", "line_number": 135, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 168, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 203, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 211, "usage_type": "attribute"}, {"api_name": "rotten_tomatoes_scraper.rt_scraper.MovieScraper", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 232, "usage_type": "attribute"}]} +{"seq_id": "31181443040", "text": "# adapted from cuCIM, see ../LICENSE-3rdparty.txt\n\nimport os\nimport pickle\n\nimport cupy\nimport cupy as cp\nimport cupyx.scipy.ndimage\nimport dask.array as da\nimport dask_image\nimport dask_image.ndfilters\nimport numpy as np\nimport pandas as pd\nimport scipy\n\nfrom _image_bench import ImageBench\n\n\nclass ConvolveBench(ImageBench):\n def __init__(\n self,\n function_name,\n shape,\n weights_shape,\n dtypes=[np.float32],\n fixed_kwargs={},\n var_kwargs={},\n chunks='auto',\n module_cpu=scipy.ndimage,\n module_gpu=cupyx.scipy.ndimage,\n module_dask=dask_image.ndfilters,\n ):\n\n self.weights_shape = weights_shape\n\n super().__init__(\n function_name=function_name,\n shape=shape,\n dtypes=dtypes,\n fixed_kwargs=fixed_kwargs,\n var_kwargs=var_kwargs,\n module_cpu=module_cpu,\n module_gpu=module_gpu,\n module_dask=module_dask,\n chunks=chunks,\n )\n\n def set_args(self, dtype):\n imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype)\n image = cp.asnumpy(imaged)\n\n wd = cupy.testing.shaped_random(self.weights_shape, xp=cp, dtype=dtype)\n w = cp.asnumpy(wd)\n\n self.args_cpu = (image, w)\n self.args_dask_cpu = (da.from_array(image, chunks=self.chunks), w)\n self.args_gpu = (imaged, wd)\n\n\nclass FilterBench(ImageBench):\n def set_args(self, dtype):\n imaged = cupy.testing.shaped_random(self.shape, xp=cp, dtype=dtype)\n image = cp.asnumpy(imaged)\n self.args_cpu = (image,)\n self.args_dask_cpu = (da.from_array(image, chunks=self.chunks),)\n self.args_gpu = (imaged,)\n\n\npfile = \"filter_results.pickle\"\nif os.path.exists(pfile):\n with open(pfile, \"rb\") as f:\n all_results = pickle.load(f)\nelse:\n all_results = pd.DataFrame()\n\nmodes = [\"constant\", \"mirror\"]\nprefilter = True\ndtypes = [np.float32]\nfor shape in [(3840, 2160), (7680, 4320), (192, 192, 192), (512, 256, 256)]:\n ndim = len(shape)\n weights_shape = (5,) * ndim\n weights_shape1d = weights_shape[:1]\n\n if ndim == 2:\n all_chunks = [\n # (shape[0] // 5, shape[1] // 2),\n (shape[0] // 5, shape[1] // 4),\n # (shape[0] // 20, shape[1] // 1),\n # (shape[0] // 20, shape[1] // 2),\n ]\n else:\n all_chunks = [\n (shape[0] // 4, shape[1] // 2, shape[2] // 2),\n # (shape[0] // 5, shape[1] // 2, shape[2] // 2),\n # (shape[0] // 8, shape[1] // 4, shape[2])\n ]\n\n for fname, var_kwargs in [\n # (\"uniform_filter\", dict(mode=[\"nearest\"], size=[3, 7, 11])),\n # (\"gaussian_filter\", dict(mode=[\"nearest\"], sigma=[0.33, 1, 3, 9])),\n # (\"maximum_filter\", dict(mode=[\"nearest\"], size=[3, 5, 7])),\n # (\"minimum_filter\", dict(mode=[\"nearest\"], size=[3, 5, 7])),\n (\"median_filter\", dict(mode=[\"nearest\"], size=[3, 5, 7, 11])),\n # (\"percentile_filter\", dict(mode=[\"nearest\"], size=[3, 5, 7], percentile=[30])),\n # (\"rank_filter\", dict(mode=[\"nearest\"], size=[3, 5, 7], rank=[-2])),\n # (\"prewitt\", dict(mode=[\"nearest\"], axis=[0, -1])),\n # (\"sobel\", dict(mode=[\"nearest\"], axis=[0, -1])),\n # (\"laplace\", dict(mode=[\"nearest\"])),\n # (\"gaussian_laplace\", dict(mode=[\"nearest\"], sigma=[0.33, 3, 9])),\n # (\"gaussian_gradient_magnitude\", dict(mode=[\"nearest\"], sigma=[0.33, 3, 9])),\n\n # dask_image doesn't have these 1d variants\n # (\n # \"gaussian_filter1d\",\n # dict(mode=[\"nearest\"], sigma=[0.33, 3, 9], axis=[0, -1], order=[0, 1]),\n # ),\n # (\"uniform_filter1d\", dict(mode=[\"nearest\"], size=[3, 7, 11], axis=[0, -1])),\n # (\"maximum_filter1d\", dict(mode=[\"nearest\"], size=[3, 7], axis=[0, -1])),\n # (\"minimum_filter1d\", dict(mode=[\"nearest\"], size=[3, 7], axis=[0, -1])),\n\n ]:\n for chunks in all_chunks:\n # TODO: add cases for generic_filter and generic_filter1d?\n\n\n if ndim == 3 and fname == \"median_filter\":\n var_kwargs[\"size\"] = [3, 5, 7] # omit sizes > 5\n\n B = FilterBench(\n function_name=fname,\n shape=shape,\n dtypes=dtypes,\n #fixed_kwargs=dict(output=None),\n chunks=chunks,\n var_kwargs=var_kwargs,\n module_dask=dask_image.ndfilters,\n )\n results = B.run_benchmark(duration=1)\n all_results = all_results.append(results[\"full\"])\n\n for fname, wshape, var_kwargs in [\n# (\"convolve\", weights_shape, dict(mode=modes)),\n# (\"correlate\", weights_shape, dict(mode=modes)),\n # dask_image doesn't have these 1D variants\n # (\"convolve1d\", weights_shape1d, dict(mode=modes, axis=[0, -1])),\n # (\"correlate1d\", weights_shape1d, dict(mode=modes, axis=[0, -1])),\n ]:\n for chunks in all_chunks:\n # TODO: add cases for generic_filter and generic_filter1d?\n\n B = ConvolveBench(\n function_name=fname,\n shape=shape,\n weights_shape=wshape,\n dtypes=dtypes,\n #fixed_kwargs=dict(output=None, origin=0),\n fixed_kwargs=dict(origin=0),\n chunks=chunks,\n var_kwargs=var_kwargs,\n module_dask=dask_image.ndfilters,\n )\n results = B.run_benchmark(duration=1)\n all_results = all_results.append(results[\"full\"])\n\nfbase = os.path.splitext(pfile)[0]\nall_results.to_csv(fbase + \".csv\")\nall_results.to_pickle(pfile)\nwith open(fbase + \".md\", \"wt\") as f:\n f.write(all_results.to_markdown())\n", "repo_name": "grlee77/dask-summit-2021-life-sciences", "sub_path": "benchmarks/dask_cupyx_scipy_filter_bench.py", "file_name": "dask_cupyx_scipy_filter_bench.py", "file_ext": "py", "file_size_in_byte": 5756, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "52", "api": [{"api_name": "_image_bench.ImageBench", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.float32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 29, "usage_type": "attribute"}, {"api_name": "cupyx.scipy.ndimage.scipy", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cupyx.scipy.ndimage", "line_number": 30, "usage_type": "name"}, {"api_name": "dask_image.ndfilters", "line_number": 31, "usage_type": "attribute"}, {"api_name": "cupy.testing.shaped_random", "line_number": 49, "usage_type": "call"}, {"api_name": "cupy.testing", "line_number": 49, "usage_type": "attribute"}, {"api_name": "cupy.asnumpy", "line_number": 50, "usage_type": "call"}, {"api_name": "cupy.testing.shaped_random", "line_number": 52, "usage_type": "call"}, {"api_name": "cupy.testing", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cupy.asnumpy", "line_number": 53, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 56, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 56, "usage_type": "name"}, {"api_name": "_image_bench.ImageBench", "line_number": 60, "usage_type": "name"}, {"api_name": "cupy.testing.shaped_random", "line_number": 62, "usage_type": "call"}, {"api_name": "cupy.testing", "line_number": 62, "usage_type": "attribute"}, {"api_name": "cupy.asnumpy", "line_number": 63, "usage_type": "call"}, {"api_name": "dask.array.from_array", "line_number": 65, "usage_type": "call"}, {"api_name": "dask.array", "line_number": 65, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 78, "usage_type": "attribute"}, {"api_name": "dask_image.ndfilters", "line_number": 136, "usage_type": "attribute"}, {"api_name": "dask_image.ndfilters", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 165, "usage_type": "call"}, {"api_name": "os.path", "line_number": 165, "usage_type": "attribute"}]} +{"seq_id": "31041503857", "text": "from django.urls import path\n\nfrom .views import OrderCreateView, OrderListView, AllOrderView, get_half_jillion_price\n\n\napp_name = 'orderbook'\n\n\nurlpatterns = [\n path('', AllOrderView.as_view(), name='all_orderbook'),\n path('/', OrderListView.as_view(), name='orderbook'),\n path('/', OrderCreateView.as_view(), name='create_order'),\n path('get-jillion-price//', get_half_jillion_price, name='get-jillion-price'),\n]", "repo_name": "JuanCarlosFL/jillionet", "sub_path": "orderbook/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 459, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.AllOrderView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.AllOrderView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.OrderListView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "views.OrderListView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrderCreateView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.OrderCreateView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.get_half_jillion_price", "line_number": 13, "usage_type": "argument"}]} +{"seq_id": "29305641388", "text": "'''Read the file 'iris.json' as a text file :\n1. Create a list having each line of the file as an element\n2. Convert it into a list of dictionary objects.\n3. Show the details of all flowers whose species is \"setosa\".\n4. Print the minimum petal area and max sepal area in each species\n5. Sort the list of dictionaries according to the total area are sepal and petal.'''\n\nimport json\n\n\ndef read_as_list(filename):\n fp=open(filename,\"r\") \n data=fp.readlines() \t\n print(\"List : \")\t\t #The file elements as list elements\n fp.close()\n return data\n \ndef read_as_dict(filename):\n fp=open(filename,'r') \n dictionary=json.load(fp)\n print(\"List of dictionary : \") \t\n return dictionary\t\t\t #list of dictionary\n\ndef print_details_setosa(data_dict):\n print(\"\\nAll flowers whose species is setosa\\n\")\n for i in data_dict:\t\t\t \n if (i['species']=='setosa'):\n print('Sepel length : %f '%(i['sepalLength']))\n\ndef min_and_max_area(data):\n species_list = list() # species names\n for i in data:\n species_list.append(i['species'])\n #removing duplicates\n species_list = list(set(species_list))\n sepal_area = [] #list to store sepal and petal area\n petal_area = []\n for i in species_list:\n for j in data:\n if(j['species']==i):\n sepal_area.append(j['sepalLength']*j['sepalWidth'])\n petal_area.append(j['petalLength']*j['petalWidth'])\n print()\n print(i.capitalize())\n #print minimum and maximum areas\n print(\"Minimum Petal Area : \",round(min(petal_area),2))\n print(\"Maximum Sepal Area : \",round(max(sepal_area),2))\n sepal_area.clear()\n petal_area.clear()\n \ndef total_sort(data):\n for i in data:\n #add total area to dictionary\n total_area = (i['petalLength']*i['petalWidth'])+(i['sepalLength']*i['sepalWidth'])\n i.update({'total_area':round(total_area,2)})\n sortedList = sorted(data,key=lambda i:i['total_area'])\n print(\"\\nSorted List : \")\n for i in sortedList:\n print(i)\n\ndata = read_as_list('iris.json')\nfor line in data:\n print(line)\n\ndata_dict = read_as_dict('iris.json')\nfor row in data_dict:\n print(row)\n\nprint_details_setosa(data_dict)\nmin_and_max_area(data_dict)\ntotal_sort(data_dict)\n", "repo_name": "ReVuz/Python-lab-2", "sub_path": "qn3.py", "file_name": "qn3.py", "file_ext": "py", "file_size_in_byte": 2256, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "26238468327", "text": "\"\"\"\nLoad .arim file format\n\nAn .arim file is a directory which contains:\n\n- a conf.yaml file (base configuration file)\n- a conf.d directory which contains additional configuration files (optional)\n- intermediary and final results (optional).\n\nThe recommended way to load the configuration is to use :func:`load_conf`.\nThe configuration is loaded according to the following pseudo-code:\n\n.. code-block:: none\n\n conf := read(conf.yaml)\n For each file in conf.d:\n tmp_conf := read(file)\n conf := merge(conf, tmp_conf)\n Return conf\n\nThe files are conf.d are read in alphabetical order.\nIf a configuration entry is present in two files, only the entry from the file\nread the latest will be kept.\n\n\"\"\"\n\nimport copy\nimport pathlib\n\nimport numpy as np\nimport yaml\n\nfrom .. import _probes, config, core, geometry\nfrom . import brain\n\n__all__ = [\n \"load_conf\",\n \"load_conf_file\",\n \"load_conf_from_str\",\n \"probe_from_conf\",\n \"examination_object_from_conf\",\n \"block_in_immersion_from_conf\",\n \"block_in_contact_from_conf\",\n \"material_from_conf\",\n \"material_attenuation_from_conf\",\n \"grid_from_conf\",\n \"frame_from_conf\",\n]\n\n\nclass InvalidConf(Exception):\n pass\n\n\ndef load_conf_from_str(stream):\n \"\"\"Load a single configuration file from a stream or string formatted in YAML.\n\n Parameters\n ----------\n stream : stream, str\n\n Returns\n -------\n arim.config.Config\n \"\"\"\n return config.Config(yaml.safe_load(stream))\n\n\ndef load_conf_file(filename):\n \"\"\"Load a single configuration file\n\n Parameters\n ----------\n filename : str\n Filename\n\n Returns\n -------\n arim.config.Config\n \"\"\"\n with open(filename) as f:\n return load_conf_from_str(f)\n\n\ndef load_conf(dirname, filepath_keys={\"filename\", \"datafile\"}):\n \"\"\"\n Load the configuration from a `.arim` directory\n\n Parameters\n ----------\n dirname\n filepath_keys : set\n Config keys that stores files. If they are relative paths, they will be replaced\n by an absolute path (str object) assuming the root dir is the `.arim` directory.\n Set to False to disable.\n\n Returns\n -------\n arim.config.Config\n\n Notes\n -----\n Load {dirname}/conf.yaml and all yaml files in {dirname}/conf.d/.\n \"\"\"\n root_dir = pathlib.Path(dirname).resolve(strict=True)\n\n # format: foo/bar/{dataset_name}.arim\n dataset_name = root_dir.parts[-1]\n if dataset_name.endswith(\".arim\"):\n dataset_name = dataset_name[:-5]\n\n # Load root conf\n root_conf_filename = root_dir / \"conf.yaml\"\n if root_conf_filename.exists():\n conf = load_conf_file(root_conf_filename)\n else:\n conf = config.Config({})\n\n # Load conf.d fragment files\n # Remark: no error if root_dir/conf.d doesn't exist\n for conf_filename in root_dir.glob(\"conf.d/*.yaml\"):\n conf.merge(load_conf_file(conf_filename))\n\n # Populate extra keys\n conf[\"dataset_name\"] = dataset_name\n conf[\"root_dir\"] = root_dir\n\n # populate result_dir if missing, create dir if needed\n result_dir = conf.get(\"result_dir\", None)\n if result_dir is None:\n result_dir = root_dir\n result_dir.mkdir(parents=True, exist_ok=True)\n else:\n result_dir = pathlib.Path(root_dir / pathlib.Path(result_dir)).resolve(\n strict=True\n )\n conf[\"result_dir\"] = result_dir\n\n if filepath_keys:\n _resolve_filenames(conf, root_dir, filepath_keys)\n\n return conf\n\n\ndef _resolve_filenames(d, root_dir, target_keys):\n \"\"\"\n Replace target keys by an absolute pathlib.Path where the root dir\n is `root_dir`\n\n Parameters\n ----------\n d : dict or anything\n root_dir : pathlib.Path\n target_keys : set\n\n Returns\n -------\n d\n Updated dictionary\n\n \"\"\"\n if not isinstance(d, dict):\n return\n for k, v in d.items():\n if k in target_keys:\n d[k] = str(root_dir / v)\n else:\n _resolve_filenames(v, root_dir, target_keys)\n\n\ndef probe_from_conf(conf, apply_probe_location=True):\n \"\"\"\n Load probe\n\n Parameters\n ----------\n conf : dict\n Root conf\n apply_probe_location: bool\n\n Returns\n -------\n Probe\n\n \"\"\"\n # load from probe library\n if \"probe_key\" in conf and \"probe\" in conf:\n raise config.InvalidConf(\"'probe' and 'probe_key' mutually exclusive\")\n if \"probe_key\" in conf:\n probe = _probes.probes[conf[\"probe_key\"]]\n else:\n probe = core.Probe.make_matrix_probe(**conf[\"probe\"])\n\n if apply_probe_location:\n probe_location = conf[\"probe_location\"]\n\n if \"ref_element\" in probe_location:\n probe.set_reference_element(conf[\"probe_location\"][\"ref_element\"])\n probe.translate_to_point_O()\n\n if \"angle_deg\" in probe_location:\n probe.rotate(\n geometry.rotation_matrix_y(\n np.deg2rad(conf[\"probe_location\"][\"angle_deg\"])\n )\n )\n\n if \"standoff\" in probe_location:\n probe.translate([0, 0, conf[\"probe_location\"][\"standoff\"]])\n\n return probe\n\n\ndef examination_object_from_conf(conf):\n \"\"\"\n Load examination object\n\n Parameters\n ----------\n conf : dict\n Root conf\n\n Returns\n -------\n arim.core.ExaminationObject\n\n \"\"\"\n if (\n \"frontwall\" in conf.keys()\n and \"backwall\" in conf.keys()\n and \"couplant_material\" in conf.keys()\n and \"block_material\" in conf.keys()\n ):\n return block_in_immersion_from_conf(conf)\n elif \"block_material\" in conf.keys():\n return block_in_contact_from_conf(conf)\n else:\n raise NotImplementedError\n\n\ndef material_attenuation_from_conf(mat_att_conf):\n \"\"\"\n Load material attenuation\n\n Parameters\n ----------\n mat_att_conf : dict\n Material attenuation conf\n\n Returns\n -------\n func\n\n See Also\n --------\n :func:`arim.core.material_attenuation_factory`\n \"\"\"\n if isinstance(mat_att_conf, float):\n return core.material_attenuation_factory(\"constant\", mat_att_conf)\n else:\n # at this stage, assume we have a dict\n return core.material_attenuation_factory(**mat_att_conf)\n\n\ndef _material_from_conf(conf_or_none):\n if conf_or_none is None:\n return None\n else:\n return material_attenuation_from_conf(conf_or_none)\n\n\ndef material_from_conf(conf):\n \"\"\"\n Load material\n\n Parameters\n ----------\n conf : dict\n Material conf\n\n Returns\n -------\n arim.core.Material\n \"\"\"\n material_kwargs = copy.deepcopy(conf)\n material_kwargs[\"longitudinal_att\"] = _material_from_conf(\n material_kwargs.get(\"longitudinal_att\")\n )\n material_kwargs[\"transverse_att\"] = _material_from_conf(\n material_kwargs.get(\"transverse_att\")\n )\n return core.Material(**material_kwargs)\n\n\ndef block_in_immersion_from_conf(conf):\n \"\"\"\n Load block in immersion (examination object)\n\n Parameters\n ----------\n conf : dict\n Root conf\n\n Returns\n -------\n arim.BlockInImmersion\n\n \"\"\"\n couplant = material_from_conf(conf[\"couplant_material\"])\n block = material_from_conf(conf[\"block_material\"])\n frontwall = geometry.points_1d_wall_z(**conf[\"frontwall\"], name=\"Frontwall\")\n backwall = geometry.points_1d_wall_z(**conf[\"backwall\"], name=\"Backwall\")\n return core.BlockInImmersion(block, couplant, frontwall, backwall)\n\n\ndef block_in_contact_from_conf(conf):\n block = material_from_conf(conf[\"block_material\"])\n frontwall_conf = conf.get(\"frontwall\", None)\n if frontwall_conf is None:\n frontwall = None\n else:\n frontwall = geometry.points_1d_wall_z(**frontwall_conf, name=\"Frontwall\")\n backwall_conf = conf.get(\"backwall\", None)\n if backwall_conf is None:\n backwall = None\n else:\n backwall = geometry.points_1d_wall_z(**backwall_conf, name=\"Backwall\")\n under_material_conf = conf.get(\"under_material\", None)\n if under_material_conf is None:\n under_material = None\n else:\n under_material = material_from_conf(under_material_conf)\n return core.BlockInContact(block, frontwall, backwall, under_material)\n\n\ndef grid_from_conf(conf):\n \"\"\"\n Load grid\n\n Parameters\n ----------\n conf : dict\n Root conf\n\n Returns\n -------\n arim.Grid\n\n \"\"\"\n conf_grid = copy.deepcopy(conf[\"grid\"])\n if \"ymin\" not in conf_grid:\n conf_grid[\"ymin\"] = 0.0\n if \"ymax\" not in conf_grid:\n conf_grid[\"ymax\"] = 0.0\n return geometry.Grid(**conf_grid)\n\n\ndef frame_from_conf(\n conf, use_probe_from_conf=True, use_examination_object_from_conf=True\n):\n \"\"\"\n Load a Frame.\n\n Current limitation: read only from Brain (relies on :func:`arim.io.brain.load_expdata`).\n\n\n Parameters\n ----------\n conf : dict or Conf\n Root configuration\n use_probe_from_conf : bool\n If True, load probe from conf (ignores the one defined in datafile)\n use_examination_object_from_conf : bool\n If True, load examination from conf (ignores the one defined in datafile)\n\n Returns\n -------\n Frame\n\n \"\"\"\n frame_conf = conf[\"frame\"]\n frame = brain.load_expdata(frame_conf[\"datafile\"])\n\n instrument_delay = None\n try:\n instrument_delay = frame_conf[\"instrument_delay\"]\n except KeyError:\n pass\n\n if instrument_delay is not None:\n # Adjust time vector\n frame.time = core.Time(\n frame.time.start - instrument_delay, frame.time.step, len(frame.time)\n )\n\n if use_probe_from_conf:\n frame.probe = probe_from_conf(conf)\n if use_examination_object_from_conf:\n frame.examination_object = examination_object_from_conf(conf)\n\n return frame\n", "repo_name": "ndtatbristol/arim", "sub_path": "src/arim/io/native.py", "file_name": "native.py", "file_ext": "py", "file_size_in_byte": 9790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 19, "dataset": "github-code", "pt": "52", "api": [{"api_name": "yaml.safe_load", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 105, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.deg2rad", "line_number": 204, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 285, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 350, "usage_type": "call"}]} +{"seq_id": "31660410991", "text": "import torch\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\ndef build_targets(model, targets):\n # targets = [image, class, x, y, w, h]\n # print('targets.shape', targets.shape)\n nt = len(targets)\n tcls, tbox, indices, av = [], [], [], []\n multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n # reject, use_all_anchors = True, True\n reject, use_all_anchors = False, True\n for i in model.yolo_layers:\n # get number of grid points and anchor vec for this yolo layer\n #if multi_gpu:\n # ng, anchor_vec = model.module.module_list[i].ng, model.module.module_list[i].anchor_vec\n #else:\n # ng, anchor_vec = model.module_list[i].ng, model.module_list[i].anchor_vec\n ng, anchor_vec = i.ng, i.anchor_vec\n # iou of targets-anchors\n t, a = targets, []\n gwh = t[:, 4:6] * ng\n if nt:\n iou = wh_iou(anchor_vec, gwh)\n\n if use_all_anchors:\n na = len(anchor_vec) # number of anchors\n a = torch.arange(na).view((-1, 1)).repeat([1, nt]).view(-1)\n t = targets.repeat([na, 1])\n gwh = gwh.repeat([na, 1])\n else: # use best anchor only\n iou, a = iou.max(0) # best iou and anchor\n\n # reject anchors below iou_thres (OPTIONAL, increases P, lowers R)\n if reject:\n j = iou.view(-1) > model.hyp['iou_t'] # iou threshold hyperparameter\n t, a, gwh = t[j], a[j], gwh[j]\n\n # Indices\n b, c = t[:, :2].long().t() # target image, class\n gxy = t[:, 2:4] * ng # grid x, y\n gi, gj = gxy.long().t() # grid x, y indices\n indices.append((b, a, gj, gi))\n\n # Box\n gxy -= gxy.floor() # xy\n tbox.append(torch.cat((gxy, gwh), 1)) # xywh (grids)\n av.append(anchor_vec[a]) # anchor vec\n\n # Class\n tcls.append(c)\n if c.shape[0]: # if any targets\n assert c.max() < model.nc, 'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \\\n 'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (\n model.nc, model.nc - 1, c.max())\n\n return tcls, tbox, indices, av\n\nclass FocalLoss(nn.Module):\n # Wraps focal loss around existing loss_fcn() https://arxiv.org/pdf/1708.02002.pdf\n # i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=2.5)\n def __init__(self, loss_fcn, gamma=0.5, alpha=1):\n super(FocalLoss, self).__init__()\n self.loss_fcn = loss_fcn\n self.gamma = gamma\n self.alpha = alpha\n self.reduction = loss_fcn.reduction\n self.loss_fcn.reduction = 'none' # required to apply FL to each element\n\n def forward(self, input, target):\n loss = self.loss_fcn(input, target)\n loss *= self.alpha * (1.000001 - torch.exp(-loss)) ** self.gamma # non-zero power for gradient stability\n\n if self.reduction == 'mean':\n return loss.mean()\n elif self.reduction == 'sum':\n return loss.sum()\n else: # 'none'\n return loss\n\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):\n # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\n box2 = box2.t()\n\n # Get the coordinates of bounding boxes\n if x1y1x2y2: # x1, y1, x2, y2 = box1\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n else: # x, y, w, h = box1\n b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2\n b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2\n b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2\n b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2\n\n # Intersection area\n inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \\\n (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)\n\n # Union Area\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1\n union = (w1 * h1 + 1e-16) + w2 * h2 - inter\n\n iou = inter / union # iou\n if GIoU or DIoU or CIoU:\n cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width\n ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height\n if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf\n c_area = cw * ch + 1e-16 # convex area\n return iou - (c_area - union) / c_area # GIoU\n if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1\n # convex diagonal squared\n c2 = cw ** 2 + ch ** 2 + 1e-16\n # centerpoint distance squared\n rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4\n if DIoU:\n return iou - rho2 / c2 # DIoU\n elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47\n v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n with torch.no_grad():\n alpha = v / (1 - iou + v)\n return iou - (rho2 / c2 + v * alpha) # CIoU\n\n return iou\n\n\ndef compute_loss(p, targets, model, giou_flag=True): # predictions, targets, model\n ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor\n lcls, lbox, lobj = ft([0]), ft([0]), ft([0])\n tcls, tbox, indices, anchor_vec = build_targets(model, targets)\n h = model.hyp # hyperparameters\n arc = model.arc # # (default, uCE, uBCE) detection architectures\n red = 'mean' # Loss reduction (sum or mean)\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)\n BCE = nn.BCEWithLogitsLoss(reduction=red)\n CE = nn.CrossEntropyLoss(reduction=red) # weight=model.class_weights\n\n if 'F' in arc: # add focal loss\n g = h['fl_gamma']\n BCEcls, BCEobj, BCE, CE = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g), FocalLoss(BCE, g), FocalLoss(CE, g)\n\n # Compute losses\n np, ng = 0, 0 # number grid points, targets\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = indices[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0]) # target obj\n np += tobj.numel()\n\n # Compute losses\n nb = len(b)\n if nb: # number of targets\n ng += nb\n '''\n # [] 可以看作索引\n '''\n\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n # ps[:, 2:4] = torch.sigmoid(ps[:, 2:4]) # wh power loss (uncomment)\n\n '''\n pred : [0,1]xy, [2,3]wh [4]conf [5:]class Pro\n '''\n\n # GIoU\n pxy = torch.sigmoid(ps[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)\n pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchor_vec[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n # print('pbox.shape', pbox.shape)\n # print(tbox[i].shape)\n giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, CIoU=True) # giou computation\n\n # 直接用的iou\n lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss\n # 置信度目标值用的是giou\n tobj[b, a, gj, gi] = giou.detach().clamp(0).type(tobj.dtype) if giou_flag else 1.0\n\n if 'default' in arc and model.nc > 1: # cls loss (only if multiple classes)\n t = torch.zeros_like(ps[:, 5:]) # targets\n t[range(nb), tcls[i]] = 1.0\n lcls += BCEcls(ps[:, 5:], t) # BCE\n # lcls += CE(ps[:, 5:], tcls[i]) # CE\n \n '''\n # Instance-class weighting (use with reduction='none')\n # nt = t.sum(0) + 1 # number of targets per class\n # lcls += (BCEcls(ps[:, 5:], t) / nt).mean() * nt.mean() # v1\n # lcls += (BCEcls(ps[:, 5:], t) / nt[tcls[i]].view(-1,1)).mean() * nt.mean() # v2\n '''\n\n # obj loss(conf)\n if 'default' in arc: # separate obj and cls\n lobj += BCEobj(pi[..., 4], tobj) # obj loss\n\n # class loss\n elif 'BCE' in arc: # unified BCE (80 classes)\n t = torch.zeros_like(pi[..., 5:]) # targets\n if nb:\n t[b, a, gj, gi, tcls[i]] = 1.0 #正样本置信度设为1\n lobj += BCE(pi[..., 5:], t)\n\n # class loss (1(backgroud)+num_class)\n elif 'CE' in arc: # unified CE (1 background + 80 classes)\n t = torch.zeros_like(pi[..., 0], dtype=torch.long) # targets\n if nb:\n t[b, a, gj, gi] = tcls[i] + 1\n lcls += CE(pi[..., 4:].view(-1, model.nc + 1), t.view(-1))\n\n lbox *= h['giou']\n lobj *= h['obj']\n lcls *= h['cls']\n if red == 'sum':\n bs = tobj.shape[0] # batch size\n lobj *= 3 / (6300 * bs) * 2 # 3 / np * 2\n if ng:\n lcls *= 3 / ng / model.nc\n lbox *= 3 / ng\n\n loss = lbox + lobj + lcls\n return loss, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n", "repo_name": "dawn-ech/ultra_impl", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 9731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.init.normal_", "line_number": 7, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn.init.normal_", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn.init.constant_", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 103, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.atan", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.zeros_like", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.sigmoid", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 213, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "8666011050", "text": "\"\"\"\nPlotly plotting functions.\n\"\"\"\nimport plotly.graph_objs as go\nfrom .aggregate import compute_bin_areas, Voigt\nimport numpy as np\nimport os\nimport pandas as pd\n\nMIN, MAX = 0, 1000\n\n# todo: only recompute area if bin width changes\n# todo: find and fix bugs with partition selection on area plot\n\nif os.environ.get('STACK'):\n env = 'Heroku'\n BASE_DIR = '/app'\n DATABASE_URL = os.environ['DATABASE_URL']\nelse:\n env = 'Dev'\n BASE_DIR = 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\voigt'\n\n\ndef construct_shapes(scale='linear', split_point=None, max_=10):\n shapes = []\n\n if split_point:\n shapes.append({\n 'type': 'line',\n 'x0': split_point,\n 'y0': 0,\n 'x1': split_point,\n 'y1': max_,\n 'line': {\n 'color': 'rgb(55, 128, 191)',\n 'width': 3,\n },\n })\n\n return shapes\n\n\ndef countplot(bin_width=50, shapes=[],\n scale='linear', selectedData=None, DATA=None, exclude_negative=True, session_id=None):\n\n if exclude_negative:\n DATA = DATA.loc[~DATA.variable.str.contains('nm')]\n\n figure = {\n 'data': [go.Histogram(x=DATA.value,\n xbins=dict(\n start=MIN,\n end=MAX,\n size=bin_width\n ),\n marker=dict(\n color='#FFD7E9',\n ),\n opacity=0.75\n )\n ],\n 'layout': go.Layout({\n 'shapes': shapes,\n # 'dragmode': 'select',\n 'yaxis': dict(\n type=scale,\n autorange=True\n # range=range_\n )\n })\n }\n\n return figure\n\n\ndef areaplot(bin_width=50, shapes=[],\n scale='linear', selectedData=None, DATA=None, exclude_negative=True, areas=None, session_id=None):\n\n if exclude_negative:\n DATA = DATA.loc[~DATA.variable.str.contains('nm')]\n\n nbins = (MAX - MIN) / int(bin_width)\n bins = np.linspace(MIN, MAX, nbins + 1)\n bins = [(x, bins[i + 1])\n for i, x in enumerate(bins) if i < len(bins) - 1]\n\n if areas is None:\n print('COMPUTING BIN AREAS')\n input_dir = os.path.join(BASE_DIR, 'input', f'input_{session_id}')\n models = pd.read_csv(os.path.join(input_dir, 'models.csv'))\n areas = compute_bin_areas(bins, models)\n\n figure = {\n 'data': [go.Bar(x=[x[0] for x in bins],\n y=areas, width=[bin_width] * len(bins),\n marker=dict(\n color='#FFD7E9',\n ),\n opacity=0.75\n )\n ],\n 'layout': go.Layout({\n 'shapes': shapes,\n # 'dragmode': 'select',\n 'yaxis': dict(\n type=scale,\n autorange=True\n # range=range_\n\n )\n })\n }\n\n hist_output_2_df = pd.DataFrame([], columns=['bin_position', 'height', 'bin_width'])\n hist_output_2_df['bin_position'] = [x[0] for x in bins]\n hist_output_2_df['height'] = areas\n hist_output_2_df['bin_width'] = bin_width\n\n\n session = os.path.join(BASE_DIR, 'output', f'output_{session_id}')\n if not os.path.isdir(session):\n os.mkdir(session)\n hist_output_2_df.to_csv(os.path.join(session, 'histogram.csv'), index=False)\n\n return figure\n\n\ndef curveplot(bin_width=50, shapes=[],\n scale='linear', selectedData=None, DATA=None, exclude_negative=True, session_id=None):\n\n if exclude_negative:\n DATA = DATA.loc[~DATA.variable.str.contains('nm')]\n\n models = DATA\n\n X = np.linspace(30, 1000, 1000 - 30 + 1)\n\n data = list()\n\n for idx, m in models.iterrows():\n\n prefix = str.split(m.variable, '_')[0]\n\n sigma = m.loc[prefix + '_sigma']\n gamma = sigma\n\n amplitude = m.loc[prefix + '_amplitude']\n\n trace = go.Scatter(\n x=X,\n y=Voigt(X, center=m.value, sigma=sigma,\n gamma=gamma, amplitude=amplitude),\n mode='lines',\n name=m.filename.strip('.txt') + f'/{prefix}'\n )\n\n data.append(trace)\n\n figure = {\n 'data': data,\n 'layout': go.Layout({\n 'shapes': shapes,\n # 'dragmode': 'select',\n 'yaxis': dict(\n type=scale,\n autorange=True\n # range=range_\n\n )\n })\n }\n\n return figure\n\n\ndef sumcurveplot(bin_width=50, shapes=[],\n scale='linear', selectedData=None, DATA=None, exclude_negative=True, session_id=None):\n\n if exclude_negative:\n DATA = DATA.loc[~DATA.variable.str.contains('nm')]\n\n models = DATA\n\n def F(x):\n vals = np.array([0]*len(x), ndmin=2)\n\n for idx, model in models.iterrows():\n prefix = model.variable[:model.variable.index('_')]\n sigma = model.loc[prefix + '_sigma']\n gamma = sigma\n amplitude = model.loc[prefix + '_amplitude']\n if 'nm' in prefix:\n amplitude = -amplitude\n\n res = np.array(Voigt(x, center=model.value, amplitude=amplitude, sigma=sigma, gamma=gamma), ndmin=2)\n # print(vals, res)\n vals = np.concatenate([vals, res], axis=0)\n return vals.sum(axis=0)\n\n X = np.linspace(30, 1000, 1000 - 30 + 1)\n\n data = list()\n\n trace = go.Scatter(\n x=X,\n y=F(X),\n mode='lines',\n )\n\n data.append(trace)\n\n figure = {\n 'data': data,\n 'layout': go.Layout({\n 'shapes': shapes,\n # 'dragmode': 'select',\n 'yaxis': dict(\n type=scale,\n autorange=True\n # range=range_\n\n )\n })\n }\n\n return figure\n\n\ndef emptyplot(session_id=None):\n data = list()\n\n figure = {\n 'data': data,\n 'layout': go.Layout({\n # 'dragmode': 'select',\n 'yaxis': dict(\n type='linear',\n autorange=True\n # range=range_\n\n )\n })\n }\n\n return figure\n", "repo_name": "MatthewChatham/voigt", "sub_path": "voigt/common/drawing.py", "file_name": "drawing.py", "file_ext": "py", "file_size_in_byte": 6271, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "plotly.graph_objs.Histogram", "line_number": 50, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 50, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 62, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "aggregate.compute_bin_areas", "line_number": 91, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Bar", "line_number": 94, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 94, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 102, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 102, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 136, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 149, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 149, "usage_type": "name"}, {"api_name": "aggregate.Voigt", "line_number": 151, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 161, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 195, "usage_type": "call"}, {"api_name": "aggregate.Voigt", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 200, "usage_type": "call"}, {"api_name": "plotly.graph_objs.Scatter", "line_number": 204, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 204, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 214, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 214, "usage_type": "name"}, {"api_name": "plotly.graph_objs.Layout", "line_number": 234, "usage_type": "call"}, {"api_name": "plotly.graph_objs", "line_number": 234, "usage_type": "name"}]} +{"seq_id": "490880559", "text": "# Name: Murilo Fantucci Todão\n# NUSP: 11299982\n# Course: SCC0251\n# Year: 2022\n# Title: Assignment 4 - Image Restoration\nimport numpy as np\nimport imageio\nfrom numpy.fft import fft2, ifft2, fftshift\n\n\n# Create numpy matrix with uint8 format\ndef create_matrix(m, n):\n return np.zeros((m, n), np.uint8)\n\n\n# Create numpy matrix with float format\ndef create_float_matrix(m, n):\n return np.zeros((m, n), float)\n\n\n# Pad a matrix with certain amount of cols and rows\ndef pad_matrix(matrix, nums_to_pad, odd_padding=True):\n if odd_padding:\n return np.pad(matrix, (nums_to_pad, nums_to_pad), \"constant\", constant_values=0)\n else:\n return np.pad(matrix, (nums_to_pad, nums_to_pad + 1), \"constant\", constant_values=0)\n\n\ndef convert_float_uint8(matrix):\n # Normalize matrix to be between 0 and 1, scale and convert it to uint8 format\n return (255 * matrix / np.max(matrix)).astype(np.uint8)\n\n\n# Gaussian filter generator as shown in class\ndef gaussian_filter(k, sigma):\n \"\"\"Gaussian filter\n :param k: defines the lateral size of the kernel/filter, default 5\n :param sigma: standard deviation (dispersion) of the Gaussian distribution\n :return matrix with a filter [k x k] to be used in convolution operations\n \"\"\"\n arx = np.arange((-k // 2) + 1.0, (k // 2) + 1.0)\n x, y = np.meshgrid(arx, arx)\n filt = np.exp(-(1 / 2) * (np.square(x) + np.square(y)) / np.square(sigma))\n return filt / np.sum(filt)\n\n\ndef create_gaussian_degraded_image(image, deg_filter_size, sigma):\n # Generate gaussian filter matrix\n filt = gaussian_filter(deg_filter_size, sigma)\n\n num_to_pad = int(image.shape[0] // 2 - filt.shape[0] // 2)\n filt_pad = pad_matrix(filt, num_to_pad, deg_filter_size % 2)\n\n # Apply blur to image in frequency domain\n I = fft2(image)\n H = fft2(filt_pad)\n G = np.multiply(I, H)\n\n # Return filter and degraded image in frequency domain\n return H, G\n\n\ndef apply_constrained_least_squares_filtering(G, H, gamma):\n # Define laplacian operator and pad to fit size\n laplacian = np.matrix([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])\n num_to_pad = int(G.shape[0] // 2 - laplacian.shape[0] // 2)\n laplacian_pad = pad_matrix(laplacian, num_to_pad)\n\n # Convert laplacian operator to frequency domain and calculate its power spectrum\n P = fft2(laplacian_pad)\n P_power = np.multiply(P, np.conjugate(P))\n\n # Calculate H conjugate and its power spectrum\n H_conj = np.conjugate(H)\n H_power = np.multiply(H, H_conj)\n\n # Find image approximation using given expression and return it\n return np.divide(np.multiply(H_conj, G), (H_power + gamma * P_power + 1e-20))\n\n\ndef constrained_least_squares(image):\n # Read user parameters\n k = int(input())\n sigma = float(input())\n gamma = float(input())\n\n # Degrade image using gaussian filter\n H, G = create_gaussian_degraded_image(image, k, sigma)\n # Apply method to restore original image in frequency domain\n F_hat = apply_constrained_least_squares_filtering(G, H, gamma)\n\n # Convert restored image to space domain, clipping it in the process\n return np.clip(ifft2(F_hat).real.astype(int), 0, 255)\n\n\n# Create a motion point spread function as shown in the lectures\ndef get_motion_psf(shape, degree_angle: float, num_pixel_dist: int = 20) -> np.ndarray:\n \"\"\"Essa função retorna uma array representando a PSF para um dado ângulo em graus\n\n Parameters:\n -----------\n dim_x: int\n The width of the image.\n dim_y: int\n The height of the image.\n degree_angle: float\n The angle of the motion blur. Should be in degrees. [0, 360)\n num_pixel_dist: int\n The distance of the motion blur. [0, \\infinity).\n Remember that the distance is measured in pixels.\n Greater will be more blurry.\n\n Returns:\n --------\n np.ndarray\n The point-spread array associated with the motion blur.\n\n \"\"\"\n psf = np.zeros(shape)\n center = np.array([shape[0] - 1, shape[1] - 1]) // 2\n radians = degree_angle / 180 * np.pi\n phase = np.array([np.cos(radians), np.sin(radians)])\n for i in range(num_pixel_dist):\n offset_x = int(center[0] - np.round_(i * phase[0]))\n offset_y = int(center[1] - np.round_(i * phase[1]))\n psf[offset_x, offset_y] = 1\n psf /= psf.sum()\n\n return psf\n\n\n# Calculate convolution between two images using frequency domain\ndef fft_convolve2d(x, y):\n freq_convolution = np.multiply(fft2(x), fft2(y))\n space_convolution = fftshift(ifft2(freq_convolution).real)\n return np.clip(space_convolution, 1e-18, np.max(space_convolution))\n\n\n# Apply iterative method to find image coefficients\ndef apply_richardson_lucy(image, psf, max_iter):\n # Create constant starting guess\n r0 = np.full(shape=image.shape, fill_value=1, dtype=\"float64\")\n psf_flipped = np.transpose(psf)\n # Apply iterative method\n for _ in range(max_iter):\n den = np.clip(fft_convolve2d(r0, psf), 1e-10, 255)\n img_conv = fft_convolve2d(np.divide(image, den), psf_flipped)\n r_new = np.multiply(r0, img_conv)\n r_new = np.clip(r_new, 1e-8, 255)\n r0 = r_new\n return r_new\n\n\ndef richardson_lucy(image):\n # Read parameters from user input\n psf_angle = int(input())\n max_iter = int(input())\n psf = get_motion_psf(image.shape, psf_angle)\n\n # Apply method to get image approximation\n filtered_image = apply_richardson_lucy(image, psf, max_iter)\n return convert_float_uint8(filtered_image)\n\n\n# Calculate difference between two images\ndef rmse(image1, image2):\n m, n = image1.shape\n return np.sqrt(np.sum((image1.real - image2.real) ** 2) / m / n)\n\n\nif __name__ == \"__main__\":\n # Read input image name and method\n input_image_name = str(input()).rstrip()\n method = int(input())\n\n # Open image and select corresponding method\n input_image = imageio.imread(input_image_name)\n\n if method == 1:\n selected_method = constrained_least_squares\n\n elif method == 2:\n selected_method = richardson_lucy\n\n # Apply method and compare images\n filtered_image = selected_method(input_image)\n print(round(rmse(input_image, filtered_image), 4))\n", "repo_name": "murilo-toddy/image-processing", "sub_path": "Trabalho 04 - Image Restoration/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6231, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.fft.fft2", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fft.fft2", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.fft.fft2", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.conjugate", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.fft.ifft2", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.multiply", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.fft.fft2", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.fft.fftshift", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.fft.ifft2", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 168, "usage_type": "call"}, {"api_name": "imageio.imread", "line_number": 177, "usage_type": "call"}]} +{"seq_id": "32684063501", "text": "import numpy as np\nimport scipy\nfrom ..utils.my_svd import my_svd\nfrom ..utils.max_eigh import max_eigh\nfrom ..utils.symmetric import symmetric\nfrom ..utils.dDiag import dDiag\n\ndef vl_myrec(X, epsilon, dzdy=None):\n \n svd_u = np.zeros((X.shape[0], X.shape[1], X.shape[1]))\n svd_s = np.zeros((X.shape[0], X.shape[1], X.shape[2]))\n svd_v = np.zeros((X.shape[0], X.shape[2], X.shape[2]))\n \n for i in range(X.shape[0]):\n svd_u[i], svd_s[i], svd_v[i] = my_svd(X[i])\n \n \n answer = X.copy()\n \n if dzdy is None:\n for i in range(X.shape[0]):\n \n max_S, _ = max_eigh(svd_s[i], epsilon)\n answer[i] = svd_u[i].dot(max_S).dot(svd_u[i].T)\n \n return answer\n \n else:\n for i in range(X.shape[0]):\n U, S, V = svd_u[i], svd_s[i], svd_v[i]\n \n Dmin = S.shape[0]\n \n dLdC = symmetric(dzdy[i])\n \n max_S, max_I = max_eigh(S, epsilon)\n dLdV = 2 * dLdC.dot(U).dot(max_S)\n \n dLdS = np.diag(np.where(max_I == 0, 1, 0)).copy()\n \n dLdS = dLdS.dot(U.T).dot(dLdC).dot(U)\n \n K = np.diag(S).copy().reshape(-1,)\n K = (np.repeat(K.reshape(1,-1), K.shape[0], axis=0) - np.repeat(K.reshape(-1,1), K.shape[0], axis=1)).T\n K = np.where(np.abs(K) < 1e-6, np.inf, K)\n K = 1. / K\n \n dzdx = U.dot(symmetric(K.T * (U.T.dot(dLdV))) + dDiag(dLdS)).dot(U.T)\n answer[i, :, :] = dzdx\n return answer\n", "repo_name": "svmakarychev/GMML_project", "sub_path": "SPDNet/spdnet/vl_folder/vl_myrec.py", "file_name": "vl_myrec.py", "file_ext": "py", "file_size_in_byte": 1596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "numpy.zeros", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.my_svd.my_svd", "line_number": 15, "usage_type": "call"}, {"api_name": "utils.max_eigh.max_eigh", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.symmetric.symmetric", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.max_eigh.max_eigh", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 45, "usage_type": "attribute"}, {"api_name": "utils.symmetric.symmetric", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.dDiag.dDiag", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "20839517366", "text": "import time\nimport glob\nimport tflite_runtime.interpreter as tflite\nimport tflite_utils\nimport os\nimport pandas as pd\n\n\nPATH_TO_IMAGES = './images/'\nPATH_TO_CONFIG = './config/'\nPATH_TO_LOG = './results/inference_times.csv'\nCONF_THRESHOLD = .4\nis_coral_plugged = True\nonly_inference_test = True\n\n\n# Define path to images and grab all paths\nimage_paths = [i.replace('\\\\', '/') for i in glob.glob(r'' + PATH_TO_IMAGES + '/*')]\n\n# Initialize log file for inference times\ntest_index = tflite_utils.get_log_index(PATH_TO_LOG)\n\n# Get model types and file names\nfolders = ['edgetpu', 'mobilenet_models', 'mobilenet', 'mobiledet', 'yolo4', 'yolo5', 'yolo5_models']\n\nfor folder in folders[:1]:\n print(\"\\nFolder: \" + folder)\n PATH_TO_MODELS = './models/' + folder + '/'\n models = [{'name': model, 'type': 'mobilenet'} for model in os.listdir(PATH_TO_MODELS)]\n # d = pd.read_csv(PATH_TO_CONFIG + folder + '.csv', squeeze=True)\n # models = [{d.columns[i]: item[i] for i in range(len(d.columns))} for item in d.values]\n for MODEL in models:\n # Overwrite name:\n # MODEL['name'] = 'ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite'\n print('\\n' + MODEL['name'] + ':')\n\n # Load the Tensorflow Lite model\n if is_coral_plugged:\n model_file, *device = MODEL['name'].split('@')\n delegate = [tflite.load_delegate(tflite_utils.EDGETPU_SHARED_LIB, {'device': device[0]} if device else {})] \\\n if 'edge' in MODEL['name'] else None\n interpreter = tflite.Interpreter(model_path=PATH_TO_MODELS + MODEL['name'], experimental_delegates=delegate)\n else:\n if 'edge' not in MODEL['name']:\n interpreter = tflite.Interpreter(model_path=PATH_TO_MODELS + MODEL['name'])\n else:\n print(\"Coral is not plugged, skipping \" + MODEL['name'])\n continue\n interpreter.allocate_tensors()\n # Get model details\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n for image_path in image_paths:\n # Load image and resize to expected shape [1xHxWx3]\n image, input_data = tflite_utils.prepare_input_data(image_path, input_details, MODEL['type'])\n # Perform the actual detection by running the model with the image as input\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n # Run the model\n start_time = time.time()\n interpreter.invoke()\n inference_time = str(round(time.time() - start_time, 4))\n print(\"\\tInference time: \" + inference_time + \" s\")\n\n if only_inference_test:\n # Save results\n tflite_utils.save_only_inference_time(\n test_index, MODEL, PATH_TO_LOG, image_path, inference_time)\n\n else:\n # Retrieve detection results\n boxes, classes, scores = tflite_utils.get_detection_results(\n interpreter, input_details,\n output_details, CONF_THRESHOLD, MODEL['type'])\n # Draw boxes\n image_result = tflite_utils.draw_boxes(\n input_details, image, boxes, classes, scores, CONF_THRESHOLD,\n MODEL['type'])\n # Save results\n tflite_utils.save_results(image_result, test_index, MODEL,\n PATH_TO_LOG, image_path, inference_time)\n\n", "repo_name": "kberci/Deep-Learning-based-Object-Detection", "sub_path": "inference_test/tflite_inference.py", "file_name": "tflite_inference.py", "file_ext": "py", "file_size_in_byte": 3501, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "52", "api": [{"api_name": "glob.glob", "line_number": 18, "usage_type": "call"}, {"api_name": "tflite_utils.get_log_index", "line_number": 21, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 29, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter.load_delegate", "line_number": 40, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter", "line_number": 40, "usage_type": "name"}, {"api_name": "tflite_utils.EDGETPU_SHARED_LIB", "line_number": 40, "usage_type": "attribute"}, {"api_name": "tflite_runtime.interpreter.Interpreter", "line_number": 42, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter", "line_number": 42, "usage_type": "name"}, {"api_name": "tflite_runtime.interpreter.Interpreter", "line_number": 45, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter", "line_number": 45, "usage_type": "name"}, {"api_name": "tflite_utils.prepare_input_data", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "tflite_utils.save_only_inference_time", "line_number": 68, "usage_type": "call"}, {"api_name": "tflite_utils.get_detection_results", "line_number": 73, "usage_type": "call"}, {"api_name": "tflite_utils.draw_boxes", "line_number": 77, "usage_type": "call"}, {"api_name": "tflite_utils.save_results", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "40071327960", "text": "from arcade import key\n\nfrom sys import platform\nif platform == \"darwin\":\n DEFAULT_FONT = \"/Library/Fonts/Arial.ttf\"\nelse:\n DEFAULT_FONT = \"arial\"\n\nSCREEN_TITLE = \"Asteroids\"\nSCREEN_WIDTH = 900\nSCREEN_HEIGHT = 900\nSPRITE_SCALING = 1\nMAIN_MENU = \"assets/mainMenu2.png\"\nUPGRADE_SCREEN = \"assets/upgrades.png\"\n\n# Scaling of difficulty for each level increase\nLEVEL_SCALING = 0.1\n\n# How fast upgrades increase in cost\nPLAYER_UPGRADE_COST_SCALING = 1.2\n\n# How fast the player moves forward and backward\nPLAYER_ACCELERATION_RATE = 800\n# How much the acceleration upgrade increases acceleration\nPLAYER_ACCELERATION_SCALING = 0.1\n# How fast the player turns left and right\nPLAYER_TURN_SPEED = 135\nPLAYER_TURN_SPEED_SCALING = 0.1\nDEFAULT_KEYS = {\n key.A: \"left\",\n key.D: \"right\",\n key.W: \"up\",\n key.S: \"down\",\n key.SPACE: \"shoot\"\n}\n# The key that is used to return to the main menu or exit out of the game\nESCAPE_KEY = key.ESCAPE\n\n# How fast laser speed increases with upgrades\nLASER_SPEED_SCALING = 0.1\n# Default laser speed\nLASER_SPEED = 1000\n# This is the maximum angle relative to the ship at which the laser can be fired when there are multiple lasers\nLASER_MAX_ANGLE = 45\n# The amount of levels required to increase the multishot of the ship\nLASER_BONUS_LEVEL = 3\n# How fast damage increases\nLASER_DAMAGE_SCALING = 0.1\n# Default laser damage\nLASER_DAMAGE = 1\n# Starting bonus lasers\nDEFAULT_BONUS_LASER_COUNT = 0\n\nASTEROID_SPIN_RANGE = (-100, 100)\nASTEROID_SPEED_RANGE = (0, 200)\nASTEROID_ANGLE_RANGE = (0, 359)\n# Size range is as a percentage, 0.5 to 1.5 would be a range from 50% size to 150% size\nASTEROID_SIZE_RANGE = (0.9, 1.2)\n# The units refer to how many singular asteroids make up that size, the small asteroid is 1 since it is the baseline\nASTEROID_SIZE_UNITS = (6, 2, 1)\nASTEROID_SPRITE_LIST = [\"assets/meteorGrey_big1.png\", \"assets/meteorGrey_med1.png\", \"assets/meteorGrey_small1.png\"]\nASTEROID_HEALTHBAR_OFFSET_Y = 35\nASTEROID_HEALTHBAR_WIDTH_FACTOR = 1\nASTEROID_HEALTHBAR_HEIGHT = 20\n# The change in color, left is high health, right is low health\nASTEROID_HEALTH_COLOR_CHANGE = ((0, 255, 0), (255, 0, 0))\n\n# Particle constants\nPARTICLE_GRAVITY = 0\nPARTICLE_FADE_RATE = 12\nPARTICLE_MIN_SPEED = 2.5\nPARTICLE_SPEED_RANGE = 2.5\nPARTICLE_COUNT = 10\nPARTICLE_RADIUS = 3\nPARTICLE_COLORS = [\n (100, 100, 100),\n (150, 150, 150),\n (160, 160, 160)\n]\nPARTICLE_SPARKLE_CHANCE = 0.02\nSMOKE_START_SCALE = 0.25\nSMOKE_EXPANSION_RATE = 0.03\nSMOKE_FADE_RATE = 12\nSMOKE_RISE_RATE = 0\nSMOKE_CHANCE = 0.05", "repo_name": "ethancharles02/asteroids", "sub_path": "data/constants.py", "file_name": "constants.py", "file_ext": "py", "file_size_in_byte": 2525, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.platform", "line_number": 4, "usage_type": "name"}, {"api_name": "arcade.key.A", "line_number": 30, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 30, "usage_type": "name"}, {"api_name": "arcade.key.D", "line_number": 31, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 31, "usage_type": "name"}, {"api_name": "arcade.key.W", "line_number": 32, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 32, "usage_type": "name"}, {"api_name": "arcade.key.S", "line_number": 33, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 33, "usage_type": "name"}, {"api_name": "arcade.key.SPACE", "line_number": 34, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 34, "usage_type": "name"}, {"api_name": "arcade.key.ESCAPE", "line_number": 37, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "29933406604", "text": "import threading\nimport tkinter\n\nfrom ttkthemes import ThemedStyle\nfrom tkinter import ttk\nfrom tkinter import *\n\nfrom Recursos import funcoes\nfrom Recursos import colors\nfrom Recursos import janela_opcoes\nfrom datetime import datetime\n\nFuncs = funcoes.Funcs\nColors = colors.Colors\nAppOpt = janela_opcoes.AppOpt\n\n# Função Principal da Aplicação\nclass Application(Funcs):\n def __init__(self):\n ##IMPORTANTE: todas as funções criadas devem ser chamadas em ordem e antes do mainloop()\n super().__init__()\n root = Tk()\n self.colors = Colors()\n self.MontaTabela()\n self.frame_1 = None\n self.frame_2 = None\n self.btn_confirmar = None\n self.lbl4 = None\n self.rd_caixa = None\n self.rd_pix = None\n self.scrollTabela = None\n self.root = root\n self.tela()\n self.frame_da_tela()\n self.criando_widgets_frame1()\n self.tabela_frame2()\n self.atualizarCabeca()\n self.select_tabela()\n root.mainloop()\n\n ## Configuração da Tela\n def tela(self):\n\n style = ThemedStyle(self.root)\n style.set_theme(self.colors.theme)\n \n self.root.title(\"Gerenciamento de Ganhos [CAIXA]\")\n self.root.configure(background=self.colors.background_color)\n #self.root.geometry(\"720x1080\")\n self.root.geometry(\"420x720\")\n self.root.resizable(True, True)\n # self.root.maxsize(width=720, height=1080)\n self.root.minsize(width=420, height=720)\n\n ## Frames da Tela, na minha aplicação foi dividido em 2 Frames\n def frame_da_tela(self):\n self.frame_1 = Frame(self.root, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.bg_color)\n self.frame_1.place(relx=0.02, rely=0.02, relwidth=0.96, relheight=0.36)\n self.frame_2 = Frame(self.root, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.bg_color)\n self.frame_2.place(relx=0.02, rely=0.40, relwidth=0.96, relheight=0.56)\n\n ## Criação dos Widgets do Frame 1\n def criando_widgets_frame1(self):\n ##Botões\n self.btn_confirmar = Button(self.frame_1, text=\"Confirmar\", bd=4, bg=self.colors.hgb_color, font=self.colors.fonte, command=self.confirmar, fg=self.colors.fg_green)\n self.btn_confirmar.place(relx=0.40, rely=0.85, relwidth=0.20, relheight=0.15)\n\n self.btn_outros = Button(self.frame_1, text=\"Outros\", bd=4, bg=self.colors.hgb_color, font=self.colors.fonte, command=AppOpt, fg=self.colors.fg_green)\n self.btn_outros.place(relx=0.80, rely=0.85, relwidth=0.20, relheight=0.15)\n ##Labels\n self.lbl1 = Label(self.frame_1, text=\"PIX: \", font=self.colors.fonte, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl1.place(relx=0.03, rely=0.03, relwidth=0.4)\n\n self.lbl2 = Label(self.frame_1, text=\"CARTEIRA: \", font=self.colors.fonte, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl2.place(relx=0.03, rely=0.15, relwidth=0.4)\n\n self.lbl3 = Label(self.frame_1, text=\"TOTAL: \", font=self.colors.fonte, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl3.place(relx=0.57, rely=0.03, relwidth=0.4)\n\n self.lbl_id = Label(self.frame_1, text=\"ID: \", font=self.colors.fonte, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl_id.place(relx=0.57, rely=0.15, relwidth=0.4)\n\n self.lbl_dia = Label(self.frame_1, text=f\"{datetime}\", font=self.colors.fonte_small, bd=4, highlightbackground=self.colors.hgb_color, highlightthickness=3, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl_dia.place(relx=0.03, rely=0.85, relwidth=0.3)\n ##atualizar cabeçalho\n self.atualizarCabeca()\n ##Entry e Label da Entry\n self.entry_valor = Entry(self.frame_1, font=self.colors.fonte_entry, justify=\"center\", bd=2, background=self.colors.background_color1, fg=self.colors.fg_green)\n self.entry_valor.place(relx=0.25, rely=0.30, relwidth=0.5, relheight=0.2)\n self.lbl4 = Label(self.frame_1, text=\"R$\", font=self.colors.fonte, background=self.colors.background_color1, fg=self.colors.text_color)\n self.lbl4.place(relx=0.26, rely=0.31)\n ##Radiobuttons Pix e Caixa\n self.rd_opt = tkinter.IntVar(value=1)\n self.rd_caixa = Radiobutton(self.frame_1, text=\"CARTEIRA\", variable=self.rd_opt, value=1, font=self.colors.fonte, background=self.colors.background_color1, fg=self.colors.text_color)\n self.rd_caixa.place(relx=0.25, rely=0.55, relwidth=0.5)\n self.rd_pix = Radiobutton(self.frame_1, text=\"PIX\", variable=self.rd_opt, value=2, font=self.colors.fonte, background=self.colors.background_color1, fg=self.colors.text_color)\n self.rd_pix.place(relx=0.25, rely=0.65, relwidth=0.5)\n\n def tabela_frame2(self):\n\n style = ttk.Style()\n style.configure(\"Custom.Treeview\", font=self.colors.fonte_small, foreground=self.colors.text_color)\n style.configure(\"Treeview.Heading\", font=self.colors.fonte_small, background=\"#3c3f41\", foreground=self.colors.text_color)\n ##Criação da tabela, especificado em qual frame ela é filha, o height, e as colunas\n self.tabela = ttk.Treeview(self.frame_2, height=3, columns=(\"id\", \"data\", \"forma\", \"total\"))\n self.tabela.configure(style=\"Treeview\")\n self.tabela.configure(style=\"Treeview.Heading\")\n self.tabela.configure(style=\"Custom.Treeview\")\n ##Heading, a criação do cabeçalho\n self.tabela.heading(\"#0\", text=\"\")\n self.tabela.heading(\"#1\", text=\"Id\", anchor=\"w\")\n self.tabela.heading(\"#2\", text=\"Data\", anchor=\"w\")\n self.tabela.heading(\"#3\", text=\"Forma\", anchor=\"w\")\n self.tabela.heading(\"#4\", text=\"Total\", anchor=\"w\")\n ##Alguns ajustes, CURIOSIDADE: essa tabela funciona como se \"500\" fosse 100%, ou seja, 200/500 = 0,4 que é 40%\n self.tabela.column(\"#0\", width=1)\n self.tabela.column(\"#1\", width=10)\n self.tabela.column(\"#2\", width=90)\n self.tabela.column(\"#3\", width=50)\n self.tabela.column(\"#4\", width=50)\n self.tabela.place(relx=0.01, rely=0.01, relwidth=0.98, relheight=0.95)\n ##Scroll da tabela\n self.scrollTabela = Scrollbar(self.frame_2, orient=\"vertical\")\n self.tabela.configure(yscrollcommand=self.scrollTabela.set)\n self.scrollTabela.place(relx=0.97, rely=0.06, relwidth=0.02, relheight=0.90)\n ##DoubleClick\n # self.tabela.bind(\"\", self.OnDoubleClick)\n\nif __name__ == \"__main__\":\n Application()", "repo_name": "dotghst/ProjetoDeControleDeGanho", "sub_path": "ProjetosXerox/GerenciadorDeGanhos.pyw", "file_name": "GerenciadorDeGanhos.pyw", "file_ext": "pyw", "file_size_in_byte": 6989, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Recursos.funcoes.Funcs", "line_number": 13, "usage_type": "attribute"}, {"api_name": "Recursos.funcoes", "line_number": 13, "usage_type": "name"}, {"api_name": "Recursos.colors.Colors", "line_number": 14, "usage_type": "attribute"}, {"api_name": "Recursos.colors", "line_number": 14, "usage_type": "name"}, {"api_name": "Recursos.janela_opcoes.AppOpt", "line_number": 15, "usage_type": "attribute"}, {"api_name": "Recursos.janela_opcoes", "line_number": 15, "usage_type": "name"}, {"api_name": "ttkthemes.ThemedStyle", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "tkinter.IntVar", "line_number": 93, "usage_type": "call"}, {"api_name": "tkinter.ttk.Style", "line_number": 101, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 101, "usage_type": "name"}, {"api_name": "tkinter.ttk.Treeview", "line_number": 105, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "12162857720", "text": "#! /usr/bin/env python3\n\n\nprint(r\"\"\"\\\n\n`-:-. ,-;\"`-:-. ,-;\"`-:-. ,-;\"`-:-. ,-;\"\n `=`,'=/ `=`,'=/ `=`,'=/ `=`,'=/\n y==/ y==/ y==/ y==/\n ,=,-<=`. ,=,-<=`. ,=,-<=`. ,=,-<=`.\n,-'-' `-=_,-'-' `-=_,-'-' `-=_,-'-' `-=_\n \"\"\")\n\nprint('QuickGene v. 1.0.0')\nprint('Copyrights: Lukasz Sitko')\nprint('Contact: sitkolukasz98@gmail.com')\n\nimport argparse\nfrom Bio.Blast import NCBIWWW\nfrom Bio.Blast import NCBIXML\n\ndef run(args):\n with open(args.input, 'r') as filename:\n x = str(filename.readlines()[1:])\n x = x.upper()\n\n blast_query = NCBIWWW.qblast('blastn', 'nt', x)\n\n with open(\"my_blast.xml\", \"w\") as save_to:\n save_to.write(blast_query.read())\n blast_query.close()\n\n blast_results = open(\"my_blast.xml\", 'r')\n blast_record = NCBIXML.read(blast_results)\n\n for alignment in blast_record.alignments:\n for hsp in alignment.hsps:\n print('****Alignment****')\n print('sequence:', alignment.title)\n print('e value:', hsp.expect)\n print('Score:', hsp.score)\n print('Alignment length:', hsp.align_length)\n print('Identities', hsp.identities)\n print(' ')\n\n\n#Second part\n\n out = [(x[i:i+1]) for i in range(0, len(x), 1)]\n\n#Base pairs count\n\n A=out.count('A')\n T=out.count('T')\n G=out.count('G')\n C=out.count('C')\n\n#Output of sequence nucleotides composition\n\n print('Composition of A: ', A)\n print('Composition of T: ', T)\n print('Composition of G: ', G)\n print('Composition of C: ', C)\n\n#Start and stop codons, number of putative coding sequences\n\n out1 = [(x[i:i+3]) for i in range(0, len(x), 3)]\n\n start_codon=out1.count('ATG')\n stop_codon_1=out1.count('TAA')\n stop_codon_2=out1.count('TAG')\n stop_codon_3=out1.count('TGA')\n\n stop_codons=stop_codon_1+stop_codon_2+stop_codon_3\n\n\n if start_codon>stop_codons:\n start_codon=stop_codons\n\n print('Number of start codons:', start_codon)\n print('Number of stop codons:', stop_codons)\n\ndef main():\n\tparser=argparse.ArgumentParser(description=\"Performs BLASTN search on input .fasta sequence\")\n\tparser.add_argument(\"-in\",help=\".fasta input file\" ,dest=\"input\", type=str, required=True)\n\tparser.set_defaults(func=run)\n\targs=parser.parse_args()\n\targs.func(args)\n\nif __name__==\"__main__\":\n\tmain()\n", "repo_name": "LukaszSitko/Quick_gene", "sub_path": "quick_gene.py", "file_name": "quick_gene.py", "file_ext": "py", "file_size_in_byte": 2401, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "Bio.Blast.NCBIWWW.qblast", "line_number": 26, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIWWW", "line_number": 26, "usage_type": "name"}, {"api_name": "Bio.Blast.NCBIXML.read", "line_number": 33, "usage_type": "call"}, {"api_name": "Bio.Blast.NCBIXML", "line_number": 33, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 83, "usage_type": "call"}]} +{"seq_id": "74157393124", "text": "import re\nimport requests\nfrom datetime import date, datetime\nfrom .addr import *\nfrom .util import case_insensitive_lookup, strip_html_tags\n\n\nclass UnknownCollectionType(Exception):\n pass\n\n\nclass UnknownCollectionDate(Exception):\n pass\n\n\nclass CollectionResponseError(Exception):\n pass\n\n\nclass CollectionType:\n GARBAGE = 0\n RECYCLING = 1\n\n _MAPPING = {\n \"garbage\": GARBAGE,\n \"recycling\": RECYCLING\n }\n\n # Raises UnknownCollectionType when the text cannot be mapped\n # to a known type\n @staticmethod\n def from_text(text):\n try:\n return case_insensitive_lookup(CollectionType._MAPPING, text)\n except KeyError as ex:\n raise UnknownCollectionType from ex\n\n\n# Returns datetime.date on success\n# Raises UnknownCollectionType if the collection type cannot be interpreted\n# Raises AddrError if the address cannot be interpreted\n# TODO: Could the result of this be saved in a session to avoid having to\n# query again?\ndef get_collection_date(coll_type_str, addr_str):\n coll_type = CollectionType.from_text(coll_type_str)\n addr_parts = AddrParts.from_text(addr_str)\n\n try:\n response_text = _make_request(addr_parts)\n except requests.exceptions.HTTPError as ex:\n raise CollectionResponseError from ex\n if coll_type == CollectionType.GARBAGE:\n return _read_garbage_date(response_text)\n else:\n return _read_recycling_date(response_text)\n\n\ndef _make_request(addr):\n URL = \"https://itmdapps.milwaukee.gov/DpwServletsPublic/garbage_day\"\n PARAMS = {\"embed\": \"Y\"}\n data = {\n \"laddr\": addr.st_num,\n \"sdir\": addr.st_dir,\n \"sname\": addr.st_name,\n \"stype\": addr.st_suffix,\n \"embed\": \"Y\",\n \"Submit\": \"Submit\"\n }\n resp = requests.post(URL, params=PARAMS, data=data)\n resp.raise_for_status()\n return resp.text\n\n# TODO: Remove the duplication of the next two functions\n\n# Returns a datetime.date on success\n# Raises UnknownCollectionDate if the garbage date could not be determined\n# Raises CollectionResponseError if the response cannot be interpreted\n\n\ndef _read_garbage_date(response_text):\n text = strip_html_tags(response_text)\n garbage_day = _match_garbage_day(text)\n if garbage_day is None:\n if _match_garbage_day_undetermined(text):\n raise UnknownCollectionDate\n else:\n raise CollectionResponseError\n return garbage_day\n\n\n# Returns a datetime.date on success\n# Raises UnknownCollectionDate if the recycling date could not be determined\n# Raises CollectionResponseError if the response cannot be interpreted\ndef _read_recycling_date(response_text):\n text = strip_html_tags(response_text)\n recycling_day = _match_recycling_day(text)\n if recycling_day is None:\n if _match_recycling_day_undetermined(text):\n raise UnknownCollectionDate\n else:\n raise CollectionResponseError\n return recycling_day\n\n\n# Try to find the garbage day by matching known text.\n# Returns a datetime.date if the garbage day is found, otherwise None\ndef _match_garbage_day(text):\n prog = re.compile(r\"The next garbage collection pickup for this location \"\n r\"is: \\w+ (\\w+) (\\d+), (\\d+)\")\n match = prog.search(text)\n if match is None:\n return None\n\n date_str = \"{} {:0>2} {}\".format(*match.groups())\n try:\n # TODO: Might run into a locale bug here. Better to set locale\n # explicitly to en_US. Should add a test for this.\n return datetime.strptime(date_str, \"%B %d %Y\").date()\n except ValueError:\n return None\n\n\n# Try to see if the garbage day could not be determined by matching known text.\n# Returns True if garbage day could not be determined, False otherwise\ndef _match_garbage_day_undetermined(text):\n return text.find(\"Your garbage collection schedule could not be determined\") != -1\n\n\n# Try to find the recycling day by matching known text.\n# Returns a datetime.date if the recycling day is found, otherwise None\ndef _match_recycling_day(text):\n # TODO: Remove duplication with garbage day\n prog = re.compile(r\"The next recycling collection pickup for this location \"\n r\"is: \\w+ (\\w+) (\\d+), (\\d+)\")\n match = prog.search(text)\n if match is None:\n return None\n\n date_str = \"{} {:0>2} {}\".format(*match.groups())\n try:\n # TODO: Might run into a locale bug here. Better to set locale\n # explicitly to en_US. Should add a test for this.\n return datetime.strptime(date_str, \"%B %d %Y\").date()\n except ValueError:\n return None\n\n\n# Try to see if the recycling day could not be determined.\n# Currently there is no text returned that describes the inability to\n# determine the recycling date, so we can't determine this directly.\n# We can, however, sense it through the garbage date, which will return\n# undetermined in this case as well. This assumes that we will never be\n# able to find one date without also finding the other.\n# Returns True if recycling day could not be determined, False otherwise\ndef _match_recycling_day_undetermined(text):\n return _match_garbage_day_undetermined(text)\n", "repo_name": "njpaul/mke_sanitation", "sub_path": "sanitation/collection.py", "file_name": "collection.py", "file_ext": "py", "file_size_in_byte": 5187, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "util.case_insensitive_lookup", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 50, "usage_type": "attribute"}, {"api_name": "addr.st_num", "line_number": 62, "usage_type": "attribute"}, {"api_name": "addr.st_dir", "line_number": 63, "usage_type": "attribute"}, {"api_name": "addr.st_name", "line_number": 64, "usage_type": "attribute"}, {"api_name": "addr.st_suffix", "line_number": 65, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 69, "usage_type": "call"}, {"api_name": "util.strip_html_tags", "line_number": 81, "usage_type": "call"}, {"api_name": "util.strip_html_tags", "line_number": 95, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 118, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "69796430884", "text": "# -*- coding=utf-8 -*-\n__author__ = 'ghostclock'\n\n\"\"\"\nhttps://github.com/muchrooms/zheye\n识别知乎倒立验证码,登录\n\"\"\"\n\nimport requests\nimport shutil\nimport time\nimport re\ntry:\n import cookielib\nexcept:\n import http.cookiejar as cookielib\n\nfrom zheye import zheye\nz = zheye()\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\",\n \"Accept-Encoding\": \"gzip, deflate, br\"\n}\n\n\nsession = requests.session()\nsession.cookies = cookielib.LWPCookieJar(filename='cookies.txt')\ntry:\n session.cookies.load(ignore_discard=True)\nexcept:\n print(\"Cookie 未能加载\")\n\ndef get_xsrf():\n \"\"\"\n 获取xsrf\n \"\"\"\n response = session.get(\"https://www.zhihu.com/\", headers=headers)\n match_obj = re.match('.*name=\"_xsrf\" value=\"(.*?)\"', response.text, re.DOTALL)\n if match_obj:\n _xsrf = match_obj.group(1)\n print(\"_xsrf = \", _xsrf)\n return _xsrf\n else:\n return \"\"\n\n\ndef get_captcha():\n \"\"\"\n 获取中文倒立验证码\n \"\"\"\n time_date = str(int(time.time() * 1000))\n captcha_url = \"https://www.zhihu.com/captcha.gif?r={}&type=login&lang=cn\".format(time_date)\n response = session.get(url=captcha_url, headers=headers, stream=True)\n if 200 == response.status_code:\n with open(\"cn_captcha.gif\", \"wb\") as file:\n response.raw.decode_content = True\n shutil.copyfileobj(response.raw, file)\n\n positions = z.Recognize(\"cn_captcha.gif\") # 返回的tuple的第二个值是x坐标,第一个值是y坐标,笛卡尔坐标系 [(y, x), (y, x)]\n print(positions)\n\n pos = positions\n\n poss = []\n for tup in pos:\n temp = []\n x = float(format(tup[1] / 2, \"0.2f\"))\n y = float(format(tup[0] / 2, \"0.2f\"))\n temp.append(x)\n temp.append(y)\n\n poss.append(temp)\n print(\"处理后的坐标 \", poss)\n return poss\n\n\ndef get_captcha_str():\n \"\"\"\n 获取验证码字符串\n \"\"\"\n pos = get_captcha()\n captcha_str = '{\"img_size\": [200, 44], \"input_points\": %s}' % pos\n print(\"captcha_str \", captcha_str)\n return captcha_str\n\n\ndef zhihu_login(account, password):\n \"\"\"\n 发起登录请求\n \"\"\"\n def login_request(login_url, login_key):\n post_data = {\n \"_xsrf\": get_xsrf(),\n login_key: account,\n \"password\": password,\n }\n # 不需要验证码,直接登录\n login_page = session.post(login_url, data=post_data, headers=headers)\n code = login_page.status_code\n if 200 == code:\n login_status = login_page.json()\n if 1 == login_status.get(\"r\"):\n captcha_str = get_captcha_str()\n post_data[\"captcha\"] = captcha_str\n post_data[\"captcha_type\"] = \"cn\"\n\n login_page = session.post(login_url, data=post_data, headers=headers)\n login_status = login_page.json()\n if \"登录成功\" == login_status.get(\"msg\"):\n print(\"登录成功\")\n # 保存cookies到本地\n # 下次可以使用cookie直接登录,不需要输入账号和密码\n session.cookies.save()\n else:\n print(login_status.get(\"msg\"))\n else:\n print(login_page.content)\n\n # 知乎登录方式\n if re.match(\"^1\\d{10}\", account):\n \"\"\"\n 手机号登录\n \"\"\"\n print(\"要登录的手机号为: \", account)\n login_url = \"https://www.zhihu.com/login/phone_num\"\n login_request(login_url, \"phone_num\")\n else:\n if \"@\" in account:\n \"\"\"\n 邮箱登录\n \"\"\"\n print(\"要登录的邮箱为: \", account)\n login_url = \"https://www.zhihu.com/login/email\"\n login_request(login_url, \"email\")\n\ndef is_login():\n \"\"\"\n 通过个人中心页面返回状态码来判断是否为登录状态\n \"\"\"\n inbox_url = \"https://www.zhihu.com/inbox\"\n response = session.get(inbox_url, headers=headers)\n if 200 != response.status_code:\n return False\n else:\n return True\n\nif __name__ == \"__main__\":\n zhihu_login(\"youaccount\", \"youpassword\")\n # if is_login():\n # print(\"你已登录\")\n # else:\n # print(\"请登录\")\n\n", "repo_name": "GhostClock/LoginZhihu", "sub_path": "LoginForCaptch_cn.py", "file_name": "LoginForCaptch_cn.py", "file_ext": "py", "file_size_in_byte": 4551, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "zheye.zheye", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 29, "usage_type": "call"}, {"api_name": "http.cookiejar.LWPCookieJar", "line_number": 30, "usage_type": "call"}, {"api_name": "http.cookiejar", "line_number": 30, "usage_type": "name"}, {"api_name": "re.match", "line_number": 41, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 60, "usage_type": "call"}, {"api_name": "re.match", "line_number": 123, "usage_type": "call"}]} +{"seq_id": "35335486300", "text": "from flask import Blueprint, jsonify, request\nfrom models import returnDBConnection, authenticate_token\n\nimport os\nfrom dotenv import load_dotenv\nimport stripe\nimport json\nfrom datetime import datetime, timedelta\n\nload_dotenv()\n\npayment_bp = Blueprint('payment_bp', __name__)\n\nstripe.api_key = os.getenv(\"stripe_secret_key\")\n\n@payment_bp.route(\"/stripe-webhook\", methods=[\"POST\"])\ndef stripe_webhook():\n\tpayload = request.get_data(as_text=True)\n\tsig_header = request.headers.get(\"Stripe-Signature\")\n\n\ttry:\n\t\tevent = stripe.Webhook.construct_event(\n\t\t\tpayload, sig_header, os.getenv(\"stripe_webhook_secret_key\")\n\t\t)\n\n\texcept ValueError as e:\n\t\t# Invalid payload\n\t\treturn \"Invalid payload\", 400\n\texcept stripe.error.SignatureVerificationError as e:\n\t\t# Invalid signature\n\t\treturn \"Invalid signature\", 400\n\n\t# Handle the checkout.session.completed event\n\tif event[\"type\"] == \"invoice.paid\":\n\t\tpayload = json.loads(payload)\n\n\t\tstripeSubscriptionID = payload[\"data\"][\"object\"][\"lines\"][\"data\"][0][\"subscription\"]\n\n\t\t# getting user ID from metadata in subscription\n\t\tsubscription_data = stripe.Subscription.retrieve(stripeSubscriptionID)\n\t\tuserID = subscription_data[\"metadata\"][\"userID\"]\n\n\t\t# if they have paid their invoice, create a record in the userPayment table\n\t\t# if they have renewed their subscription, a record should already exist so the code will update their subscription date\n\n\t\trenewal_date = (datetime.now()+timedelta(days=34)).strftime(\"%Y-%m-%d\")\n\n\t\tconn, cur = returnDBConnection()\n\n\t\tcur.execute(f\"\"\"\n\t\t\tINSERT INTO userPayment(userID, subscriptionEnds, stripeSubscriptionID) VALUES ('{userID}', '{renewal_date}', '{stripeSubscriptionID}')\n\t\t\tON DUPLICATE KEY\n\t\t\tUPDATE subscriptionEnds = '{renewal_date}', stripeSubscriptionID='{stripeSubscriptionID}'\n\t\t\"\"\")\n\n\t\tcur.execute(f\"\"\"\n\t\t\tUPDATE users\n\t\t\tSET userPaid = 1\n\t\t\tWHERE userID = '{userID}'\n\t\t\"\"\")\n\n\t\tconn.commit()\n\t\tconn.close()\n\n\treturn \"Success\", 200\n\n@payment_bp.route(\"/cancel_subscription\", methods=[\"POST\"])\ndef cancel_subscription():\n\tif not authenticate_token(request.headers):\n\t\treturn \"Invalid API key\", 403\n\n\tconn, cur = returnDBConnection()\n\t\n\tuserID = request.json[\"userID\"]\n\n\tcur.execute(f\"\"\"\n\t\tSELECT stripeSubscriptionID FROM userPayment WHERE userID='{userID}'\n\t\"\"\")\n\tstripeSubscriptionID = cur.fetchone()[0]\n\n\tstripe.Subscription.delete(stripeSubscriptionID)\n\n\treturn \"Success\"", "repo_name": "davidliebs/DrumProject", "sub_path": "api/payment/payment.py", "file_name": "payment.py", "file_ext": "py", "file_size_in_byte": 2365, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 12, "usage_type": "call"}, {"api_name": "stripe.api_key", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.get_data", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "stripe.Webhook.construct_event", "line_number": 22, "usage_type": "call"}, {"api_name": "stripe.Webhook", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 23, "usage_type": "call"}, {"api_name": "stripe.error", "line_number": 29, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 35, "usage_type": "call"}, {"api_name": "stripe.Subscription.retrieve", "line_number": 40, "usage_type": "call"}, {"api_name": "stripe.Subscription", "line_number": 40, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 46, "usage_type": "call"}, {"api_name": "models.returnDBConnection", "line_number": 48, "usage_type": "call"}, {"api_name": "models.authenticate_token", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "models.returnDBConnection", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "stripe.Subscription.delete", "line_number": 81, "usage_type": "call"}, {"api_name": "stripe.Subscription", "line_number": 81, "usage_type": "attribute"}]} +{"seq_id": "32515475729", "text": "#!/usr/bin/env python\nimport rospy\nimport time\nimport sys\nfrom stella_nav_recognizer.recognizer import Recognizer\nfrom stella_nav_core.navigation import Navigation\nfrom stella_nav_core.state_machine import StateMachine\nimport importlib\n\n\ndef _get_class(plugin_modules, class_name):\n c = None\n for module in plugin_modules:\n if module in sys.modules:\n if hasattr(sys.modules[module], class_name):\n c = getattr(sys.modules[module], class_name)\n else:\n rospy.error(\"module {} is not found\".format(module))\n if c is None:\n raise ValueError(\"class {} is not found in {}\".format(class_name, plugin_modules))\n return c\n\n\ndef _except_key(key, d):\n return {k: d[k]for k in d if k != key}\n\n\ndef _get_recognizer(key, value, plugin_modules):\n value[\"type\"] = key\n return _get_obj({}, value, plugin_modules)\n\n\ndef _get_obj(args, value, plugin_modules):\n c = _get_class(plugin_modules, value[\"type\"])\n assert value[\"type\"] == c.__name__\n cat_args = {}\n cat_args.update(args)\n cat_args.update(value)\n rospy.logdebug(\"initialize: {}, args: {}\".format(c, cat_args))\n try:\n obj = c(**_except_key(\"type\", cat_args))\n except TypeError as e:\n raise TypeError(\"initializing {}: {}\".format(c, e))\n return obj\n\n\nclass RecognizerParser(object):\n def __init__(self, plugin_modules):\n self._plugin_modules = plugin_modules\n\n def _parse_recognizer(self, to_parse):\n recognizers = []\n for key, value in to_parse.iteritems():\n if \"All\" == key:\n recognizer = Recognizer.all(self._parse_recognizer(value))\n elif \"Any\" == key:\n recognizer = Recognizer.any(self._parse_recognizer(value))\n else:\n recognizer = _get_recognizer(key, value, self._plugin_modules)\n recognizers.append(recognizer)\n return recognizers\n\n def parse_recognizer(self, to_parse):\n recognizers = {}\n for key, item in to_parse.iteritems():\n recognizers_top = self._parse_recognizer(item)\n if len(recognizers_top) == 1:\n recognizers[key] = recognizers_top[0]\n else:\n recognizers[key] = Recognizer.any(recognizers_top)\n return recognizers\n\ndef main():\n rospy.init_node(\"stella_nav_node\")\n recognizer_plugin_modules = rospy.get_param(\"~recognizer_plugin_modules\")\n planner_plugin_modules = rospy.get_param(\"~planner_plugin_modules\")\n costmap_plugin_modules = rospy.get_param(\"~costmap_plugin_modules\")\n listener_plugin_modules = rospy.get_param(\"~listener_plugin_modules\")\n observer_plugin_modules = rospy.get_param(\"~observer_plugin_modules\")\n handler_plugin_modules = rospy.get_param(\"~handler_plugin_modules\")\n plugin_modules_list = [\n recognizer_plugin_modules, planner_plugin_modules, costmap_plugin_modules,\n listener_plugin_modules, observer_plugin_modules, handler_plugin_modules]\n for plugin_modules in plugin_modules_list:\n for module in plugin_modules:\n importlib.import_module(module)\n start_nearest_goal = rospy.get_param(\"~start_nearest_goal\", False)\n recognizer_parser = RecognizerParser(recognizer_plugin_modules)\n recognizers = recognizer_parser.parse_recognizer(rospy.get_param(\"~recognizers\"))\n default_achievement_recognizer = recognizers[\"default\"]\n achievement_recognizer_sub = recognizers[\"sub\"]\n recognizers = recognizer_parser.parse_recognizer(rospy.get_param(\"~recognizers\"))\n costmaps = {key: _get_obj({}, value, costmap_plugin_modules) for key, value in rospy.get_param(\"~costmaps\").iteritems()}\n\n planner_args = {\"costmaps\": costmaps}\n local_planners = {key: _get_obj(planner_args, value, planner_plugin_modules) for key, value in rospy.get_param(\"~local_planners\").iteritems()}\n state_to_local_planner = {\n key: local_planners[value]\n for key, value in rospy.get_param(\"~state_to_local_planner\").iteritems()\n }\n global_planners = {key: _get_obj(planner_args, value, planner_plugin_modules) for key, value in rospy.get_param(\n \"~global_planners\").iteritems()}\n state_to_global_planner = {\n key: global_planners[value]\n for key, value in rospy.get_param(\"~state_to_global_planner\").iteritems()\n }\n state_machine = StateMachine(**rospy.get_param(\n \"~state_machine\",\n dict(states=[\"initial\"], transitions=[])))\n\n handlers = {key: _get_obj({}, value, handler_plugin_modules) for key, value in rospy.get_param(\"~handlers\").iteritems()}\n plan_handlers = {\"local_plan\": handlers[\"local_plan\"], \"global_plan\": handlers[\"global_plan\"]}\n gridmap_handlers = {\"robot_costmap\": handlers[\"robot_costmap\"], \"global_costmap\": handlers[\"global_costmap\"]}\n\n controller_frequency = rospy.get_param(\"~controller_frequency\", 10.0)\n navigation = Navigation(\n state_to_local_planner, state_to_global_planner, state_machine,\n handlers[\"goal\"], handlers[\"subgoal\"], recognizers, plan_handlers, gridmap_handlers[\"global_costmap\"],\n controller_frequency)\n initial_setup = {\"obstacle\": False, \"pose\": False}\n\n observer_args = {\"handlers\": handlers, \"recognizers\": recognizers}\n observers = {key: _get_obj(observer_args, value, observer_plugin_modules) for key, value in rospy.get_param(\"~observers\").iteritems()}\n\n listener_args = {\"handlers\": handlers, \"observers\": observers, \"initial_setup\": initial_setup, \"costmaps\": costmaps, \"local_planners\": local_planners, \"state_machine\": state_machine, \"navigation\": navigation}\n listeners = {key: _get_obj(listener_args, value, listener_plugin_modules) for key, value in rospy.get_param(\"~listeners\").iteritems()}\n\n for key, value in rospy.get_param(\"~add_listener\").items():\n for l in value:\n observers[key].add_listener(listeners[l])\n workers = [\n handlers[\"common_msg\"],\n navigation]\n workers.extend(observers.values())\n rospy.loginfo(\"waiting for initialize\")\n while not all(initial_setup.values()):\n time.sleep(1)\n if rospy.is_shutdown():\n return\n if \"pose_update\" in listeners:\n initial_pose = listeners[\"pose_update\"].initial_pose\n else:\n initial_pose = None\n if start_nearest_goal and initial_pose is not None:\n nearest_goal_idx = goal_handler.get_nearest_goal_idx((initial_pose.pose.position.x, initial_pose.pose.position.y))\n goal_handler.set_current_goal(nearest_goal_idx)\n goals = goal_handler.get_goals()\n rospy.loginfo(\"nearest goal is {} {}\".format(nearest_goal_idx, goals[nearest_goal_idx]))\n rospy.loginfo(\"send \\\"start\\\" trigger to start\")\n navigation.start()\n rospy.logdebug(\"threads started\")\n rospy.spin()\n for worker in workers:\n worker.join()\n\ndef profile():\n import yappi\n # yappi.set_clock_type(\"WALL\")\n yappi.start()\n main()\n columns = {0:(\"name\",80), 1:(\"ncall\", 5), 2:(\"tsub\", 8), 3:(\"ttot\", 8), 4:(\"tavg\",8)}\n with open(\"yappi_prof.txt\", \"w\") as of:\n yappi.get_func_stats().strip_dirs().sort(\"tsub\").print_all(out=of, columns=columns)\n yappi.get_thread_stats().print_all()\n\nif __name__ == '__main__':\n use_profiler = rospy.get_param(\"use_profiler\", False)\n if use_profiler:\n profile()\n else:\n main()\n", "repo_name": "ymd-stella/stella_nav", "sub_path": "stella_nav_core/src/stella_nav_core/stella_nav_node.py", "file_name": "stella_nav_node.py", "file_ext": "py", "file_size_in_byte": 7338, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.modules", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 16, "usage_type": "attribute"}, {"api_name": "rospy.error", "line_number": 18, "usage_type": "call"}, {"api_name": "rospy.logdebug", "line_number": 39, "usage_type": "call"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer.all", "line_number": 55, "usage_type": "call"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer", "line_number": 55, "usage_type": "name"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer.any", "line_number": 57, "usage_type": "call"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer", "line_number": 57, "usage_type": "name"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer.any", "line_number": 70, "usage_type": "call"}, {"api_name": "stella_nav_recognizer.recognizer.Recognizer", "line_number": 70, "usage_type": "name"}, {"api_name": "rospy.init_node", "line_number": 74, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 75, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 76, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 77, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 78, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 79, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 80, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 86, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 87, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 89, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 92, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 93, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 96, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 99, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 101, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 105, "usage_type": "call"}, {"api_name": "stella_nav_core.state_machine.StateMachine", "line_number": 107, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 107, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 111, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 115, "usage_type": "call"}, {"api_name": "stella_nav_core.navigation.Navigation", "line_number": 116, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 123, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 126, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 128, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 135, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 138, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 148, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 149, "usage_type": "call"}, {"api_name": "rospy.logdebug", "line_number": 151, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 152, "usage_type": "call"}, {"api_name": "yappi.start", "line_number": 159, "usage_type": "call"}, {"api_name": "yappi.get_func_stats", "line_number": 163, "usage_type": "call"}, {"api_name": "yappi.get_thread_stats", "line_number": 164, "usage_type": "call"}, {"api_name": "rospy.get_param", "line_number": 167, "usage_type": "call"}]} +{"seq_id": "70498061925", "text": "import config, json\nfrom iex import IEXStock\nfrom datetime import datetime, timedelta\n\nsymbol = st.sidebar.text_input(\"Symbol\", value='MSFT')\nstock = IEXStock(config.IEX_TOKEN, symbol)\n\n\ncompany = stock.get_company_info()\njson.dumps(company)\ncompany['companyName']\ncompany['industry']\ncompany['description']\n\n\nnews = stock.get_company_news()\nclient.set(news_cache_key, json.dumps(news))\n\ndt = datetime.utcfromtimestamp(article['datetime']/1000).isoformat()\nprint(f\"Posted by {article['source']} at {dt}\")\narticle['url']\narticle['summary']\n \nstats = stock.get_stats()\nstats['peRatio']\nstats['forwardPERatio']\nstats['pegRatio']\nstats['priceToSales']\nstats['priceToBook']\n\nstats['revenue']\nstats['totalCash']\nstats['currentDebt']\nstats['day200MovingAvg']\nstats['day50MovingAvg']\n\nfundamentals = stock.get_fundamentals('quarterly')\nfor quarter in fundamentals:\n quarter['filingDate']\n quarter['revenue']\n quarter['incomeNet']\n \ndividends = stock.get_dividends()\njson.dumps(dividends)\nfor dividend in dividends:\n dividend['paymentDate']\n dividend['amount']\n\ninstitutional_ownership = stock.get_institutional_ownership()\njson.dumps(institutional_ownership)\nfor institution in institutional_ownership:\n institution['date']\n institution['entityProperName']\n institution['reportedHolding']\n\ninsider_transactions = stock.get_insider_transactions()\njson.dumps(insider_transactions)\nfor transaction in insider_transactions:\n transaction['filingDate']\n transaction['fullName']\n transaction['transactionShares']\n transaction['transactionPrice']\n", "repo_name": "alexanu/Python_Trading_Snippets", "sub_path": "data/IEX/play_with_IEX.py", "file_name": "play_with_IEX.py", "file_ext": "py", "file_size_in_byte": 1577, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 20, "dataset": "github-code", "pt": "52", "api": [{"api_name": "iex.IEXStock", "line_number": 6, "usage_type": "call"}, {"api_name": "config.IEX_TOKEN", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 44, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "35374948416", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[5]:\n\n\nimport numpy as np\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Subset\nfrom torch.utils.data import random_split\nfrom torchvision import datasets\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport os\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nimport sys\nimport torch.linalg\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset\nimport torch.optim as optim\n#from torch.optim import\nfrom torchvision import transforms\nimport datetime\nfrom torch.nn.utils import parameters_to_vector\n\n# In[12]:\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Deep_Emotion(nn.Module):\n def __init__(self):\n '''\n Deep_Emotion class contains the network architecture.\n '''\n super(Deep_Emotion,self).__init__()\n self.use_cuda = torch.cuda.is_available()\n self.conv1 = nn.Conv2d(1,10,3)\n self.conv2 = nn.Conv2d(10,10,3)\n self.pool2 = nn.MaxPool2d(2,2)\n\n self.conv3 = nn.Conv2d(10,10,3)\n self.conv4 = nn.Conv2d(10,10,3)\n self.pool4 = nn.MaxPool2d(2,2)\n\n self.norm = nn.BatchNorm2d(10)\n\n self.fc1 = nn.Linear(810*2,50)\n self.fc2 = nn.Linear(50,7)\n\n self.localization = nn.Sequential(\n nn.Conv2d(1, 8, kernel_size=7),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True),\n nn.Conv2d(8, 10, kernel_size=5),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True)\n )\n\n\n self.localization_2 = nn.Sequential(\n nn.Conv2d(1, 24, kernel_size=5),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True),\n nn.Conv2d(24, 32, kernel_size=3),\n nn.MaxPool2d(2, stride=2),\n nn.ReLU(True)\n )\n\n self.fc_loc = nn.Sequential(\n nn.Linear(640, 32),\n nn.ReLU(True),\n nn.Linear(32, 3 * 2)\n )\n self.fc_loc_2 = nn.Sequential(\n nn.Linear(3200, 64),\n nn.ReLU(True),\n nn.Linear(64, 3 * 2)\n )\n\n self.fc_loc[2].weight.data.zero_()\n self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n self.fc_loc_2[2].weight.data.zero_()\n self.fc_loc_2[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))\n\n def stn(self, x):\n xs = self.localization(x)\n xs = xs.view(-1, 640)\n theta = self.fc_loc(xs)\n theta = theta.view(-1, 2, 3)\n\n grid = F.affine_grid(theta, x.size(), align_corners=True)\n x = F.grid_sample(x, grid, align_corners=True)\n return x\n\n\n def stn_2(self, x):\n xs = self.localization_2(x)\n #print(\"xs.shape \", xs.shape)\n xs = xs.view(-1, 3200)\n theta = self.fc_loc_2(xs)\n theta = theta.view(-1, 2, 3)\n\n grid = F.affine_grid(theta, x.size(), align_corners = True)\n x = F.grid_sample(x, grid, align_corners = True)\n return x\n\n def forward(self,input):\n out = self.stn(input)\n out2 = self.stn_2(input)\n\n #print(\"out.shape: \", out.shape)\n #print(\"out2.shape: \", out2.shape)\n\n out = torch.cat((out, out2))\n #print(\"concat shape: \", out.shape)\n out = F.relu(self.conv1(out))\n out = self.conv2(out)\n out = F.relu(self.pool2(out))\n\n out = F.relu(self.conv3(out))\n out = self.norm(self.conv4(out))\n out = F.relu(self.pool4(out))\n\n out = F.dropout(out)\n #print(\"fc layer:\", out.shape )\n #out = out.view(-1, 810)\n out = out.view(-1, 810*2)\n out = F.relu(self.fc1(out))\n out = self.fc2(out)\n\n #print(parameters_to_vector(list(self.fc1.parameters())).shape)\n #print(parameters_to_vector(list(self.fc2.parameters())).shape)\n\n return out\n\n\n# In[13]:\n\n\n\n\nX_train = np.load(r\"C:\\Users\\bches\\Classes\\Spring_2021\\Pattern_Recognition\\Project\\datasets\\FER2013\\train\\X_train.npy\") #np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/X_train.npy\")\ny_train = np.load(r\"C:\\Users\\bches\\Classes\\Spring_2021\\Pattern_Recognition\\Project\\datasets\\FER2013\\train\\y_train.npy\") #np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/y_train.npy\")\nX_val = np.load(r\"C:\\Users\\bches\\Classes\\Spring_2021\\Pattern_Recognition\\Project\\datasets\\FER2013\\train\\X_val.npy\") #np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/X_val.npy\")\ny_val = np.load(r\"C:\\Users\\bches\\Classes\\Spring_2021\\Pattern_Recognition\\Project\\datasets\\FER2013\\train\\y_val.npy\") #np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/y_val.npy\")\n\n\n\n#X_train = np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/X_train.npy\")\n#y_train = np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/y_train.npy\")\n#X_val = np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/X_val.npy\")\n#y_val = np.load(\"/blue/wu/bchesley97/PR/datasets/FER2013/train/y_val.npy\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[3]:\n\n\nX_train.shape, y_train.shape, X_val.shape, y_val.shape, np.unique(y_train, return_counts=True), np.unique(y_val, return_counts=True)\n\n\n# In[4]:\n\n\n\n\n# In[5]:\n\n\n#convert data from numpy to tensors\nX_train_t = torch.tensor(X_train.tolist(), dtype=torch.float32)/255\ny_train_t = torch.tensor(y_train.tolist(), dtype=torch.long)\nX_val_t = torch.tensor(X_val.tolist(), dtype=torch.float32)/255\ny_val_t = torch.tensor(y_val.tolist(), dtype=torch.long)\n\n\nX_train_t.type(torch.float32)\ny_train_t.type(torch.long)\nX_val_t.type(torch.float32)\ny_val_t.type(torch.long)\n\nX_train_t.shape, X_train_t.type, y_train_t.shape\n\n\n# In[6]:\n\n\n#pytorch tensors require N X C X H X W\nX_train_t = X_train_t.unsqueeze(1).contiguous()\nX_val_t = X_val_t.unsqueeze(1).contiguous()\n\nX_train_t.shape, X_val_t.shape, y_train_t[0].type\n\n\n# In[7]:\n\n\nX_train_t.min(), X_train_t.max(), y_train_t.min() #double check to make sure min is 0 and max is 1\n\n\n# In[8]:\n\n\nX_train_t.view(1,-1).mean(dim=1), X_train_t.view(1,-1).std(dim=1) #check mean and std deviation values\n\n\n# In[9]:\n\n\ntrain_mean = X_train_t.view(1,-1).mean(dim=1)\ntrain_std = X_train_t.view(1,-1).std(dim=1)\n\n\n# In[10]:\n\n\ntrain_transform = torchvision.transforms.Compose([\n #torchvision.ToPILImage(), #need this to do data augmentation, only accepts PIL images\n #torchvision.transforms.Resize(48), #48 is FER2013 size\n #torchvision.transforms.ToTensor(),\n transforms.Normalize(mean=train_mean, std=train_std)\n])\n\nval_transform = torchvision.transforms.Compose([\n #torchvision.ToPILImage(), #need this to do data augmentation, only accepts PIL images\n #torchvision.transforms.Resize(48), #48 is FER2013 size\n #torchvision.transforms.ToTensor(),\n transforms.Normalize(mean=train_mean, std=train_std)\n])\n\n# In[11]:\n\n\n#data set class definition\n\nclass my_dataset(Dataset):\n def __init__(self, X, y, transform = None):\n self.data = X\n self.target = y\n self.transform = transform\n\n if torch.cuda.is_available():\n print(\"Data placed in GPU memory\")\n self.data = self.data.cuda()\n self.target = self.target.cuda()\n\n def __getitem__(self, index):\n x = self.data[index]\n y = self.target[index]\n if self.transform:\n x = self.transform(x.cpu())\n\n if torch.cuda.is_available():\n return x.cuda(), y.cuda()\n\n return x,y\n\n def __len__(self):\n return len(self.data)\n\n\n\n# In[12]:\n\n\nnum_classes = 7\n\ntrain_errors = []\nval_errors = []\n#val_loader = torch.utils.data.DataLoader(val_dataset, batch_size = 32, shuffle=True)\ndevice = torch.device(\"cuda:0\")\ndef training_loop(n_epochs, optimizer, model, loss_fn, train_loader, val_loader):\n patience = 50 #50 epoch patience for early stopping\n lamb = 0.001\n if model.use_cuda:\n if(torch.cuda.device_count() > 1):\n print(\"Using data parallel for training model\")\n model = nn.DataParallel(model)\n model.to(device)\n for epoch in range(1, n_epochs+1):\n loss_train = 0.0\n val_loss = 0.0\n correct_train = 0\n total_train = 0\n for imgs, labels in train_loader:\n model.train()\n imgs = imgs.to(device)\n outputs = model(imgs)\n loss = loss_fn(outputs, labels)\n #l2 = torch.cat((parameters_to_vector(model.fc1.parameters()), parameters_to_vector(model.fc2.parameters())), 0)\n #l2 = torch.norm(l2, 2)\n #print(\"l2 shape: \", l2)\n #loss += lamb*l2\n\n #print(\"_\", _)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_train += loss.item()\n\n _, pred1 = torch.max(outputs, dim=1)\n total_train += labels.shape[0]\n correct_train += (pred1==labels).sum()\n\n\n #print(\"\\n\")\n #print(\"outputs: \", outputs)\n #print(\"predictions: \", pred1)\n #print(\"labels: \", labels)\n\n #train_acc = 100*correct_train/total_train\n\n train_errors.append(loss_train)\n\n total = 0\n correct = 0\n with torch.no_grad():\n model.eval()\n for imgs1, label in val_loader:\n imgs1 = imgs1.to(device)\n outputs1 = model(imgs1)\n #print(outputs1)\n _, pred = torch.max(outputs1, dim=1)\n total += label.shape[0]\n correct += (pred == label).sum()\n val_loss += loss_fn(outputs1, label)\n l2_val = torch.cat((parameters_to_vector(model.fc1.parameters()), parameters_to_vector(model.fc2.parameters())), 0)\n val_loss += lamb * torch.norm(l2_val, 2)\n\n val_errors.append(val_loss)\n #print(\"Validation accuracy: \", 100*correct/total)\n\n if epoch == 1 or epoch % 10 == 0:\n print('{} Epoch {}, Training loss {}, Training accuracy {} Validation accuracy {}'.format(datetime.datetime.now(), epoch, float(loss_train), float(100*float(correct_train)/float(total_train)), float(100*float(correct)/float(total))))\n\n\n# In[44]:\n\n\ntrain_dataset = my_dataset(X_train_t, y_train_t, transform=train_transform)\n\nval_dataset = my_dataset(X_val_t, y_val_t, transform=train_transform)\n\n\n# In[45]:\n\n\n#taken from Haotians Lenet code, cite this if submitting it\ndef init_weights(m):\n if type(m) == nn.Conv2d:\n nn.init.orthogonal_(m.weight)\n if type(m) == nn.Linear:\n nn.init.orthogonal_(m.weight)\n\n\n# In[46]:\n\n\nbatch_size = 1\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\nval_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)\nmodel = Deep_Emotion()\noptimizer = optim.Adam(model.parameters(), lr=1e-5)\nloss_fn = nn.CrossEntropyLoss()\nsum(p.numel() for p in model.parameters())\n\n\n# In[47]:\n\n\n#model.apply(init_weights) #initialize weights to have orthogonal projection\n\n\n# In[48]:\nexperiment_name = str(sys.argv[2])\n\ntraining_loop(\n n_epochs = 1000,\n optimizer = optimizer,\n model = model,\n loss_fn = loss_fn,\n train_loader = train_loader,\n val_loader = val_loader)\n\n\nnp.save(experiment_name + '_train_loss.npy', train_errors)\nnp.save(experiment_name + \"_val_loss.npy\", val_errors)\n\ntorch.save(model.state_dict(), experiment_name+'.pt')\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "BritChesley/Emotion-Recognition", "sub_path": "deep_emotion_exploration.py", "file_name": "deep_emotion_exploration.py", "file_ext": "py", "file_size_in_byte": 11668, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 72, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 72, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 81, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 87, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 93, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.affine_grid", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.nn.functional.grid_sample", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.nn.functional.affine_grid", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.functional.grid_sample", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 129, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 131, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 135, "usage_type": "name"}, {"api_name": "torch.nn.functional.dropout", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 141, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 190, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 192, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 195, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 196, "usage_type": "attribute"}, {"api_name": "torch.float32", "line_number": 197, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 198, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 235, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 235, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 239, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 239, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 242, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 246, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 246, "usage_type": "name"}, {"api_name": "torch.utils.data.Dataset", "line_number": 254, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 260, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 260, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 271, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 294, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 294, "usage_type": "attribute"}, {"api_name": "torch.nn.DataParallel", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 296, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 321, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 337, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 343, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.nn.utils.parameters_to_vector", "line_number": 347, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 348, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 354, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 354, "usage_type": "attribute"}, {"api_name": "torch.nn.Conv2d", "line_number": 370, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 370, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 371, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 371, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 372, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 372, "usage_type": "name"}, {"api_name": "torch.nn.init.orthogonal_", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 373, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 373, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 380, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 381, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 381, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 383, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 383, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 384, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 384, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 395, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 406, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 407, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 409, "usage_type": "call"}]} +{"seq_id": "32205768136", "text": "# -*- coding : utf-8 -*-\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.model_selection import KFold\nfrom sklearn.linear_model import LogisticRegression\nfrom collections import Counter\nfrom chap_08 import ch08_72, ch08_77\n\"78. 5分割交差検定\"\n\n\ndef make_x_y_text():\n # データ作成\n X_text = []\n y_text = []\n with open(\"data/sentiment.txt\", 'r')as f:\n for line in f:\n stemming_text = ch08_72.get_stemming_text(ch08_72.del_stop_words(line[2:]))\n X_text.append(dict(Counter(stemming_text)))\n if line[0] == '+':\n y_text.append(1)\n else:\n y_text.append(0)\n return X_text, y_text\n\n\ndef main():\n X_text, y_text = make_x_y_text()\n # K-hold\n kf = KFold(n_splits=5)\n kf.get_n_splits(X_text, y_text)\n for train_idx, test_idx in kf.split(X_text, y_text):\n lr = LogisticRegression()\n X_train, y_train = [X_text[idx] for idx in train_idx], [y_text[idx] for idx in train_idx]\n X_test, y_test = [X_text[idx] for idx in test_idx], [y_text[idx] for idx in test_idx]\n vec = DictVectorizer(sparse=False)\n X_train_vec = vec.fit_transform(X_train)\n X_test_vec = vec.transform(X_test)\n lr.fit(X_train_vec, y_train)\n ch08_77.print_accuracy_f1score(y_test, lr.predict(X_test_vec))\n print(\"***\")\n\n\nif __name__ == '__main__':\n main()", "repo_name": "shihono/100-knock", "sub_path": "chap_08/ch08_78.py", "file_name": "ch08_78.py", "file_ext": "py", "file_size_in_byte": 1412, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "chap_08.ch08_72.get_stemming_text", "line_number": 16, "usage_type": "call"}, {"api_name": "chap_08.ch08_72", "line_number": 16, "usage_type": "name"}, {"api_name": "chap_08.ch08_72.del_stop_words", "line_number": 16, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 34, "usage_type": "call"}, {"api_name": "chap_08.ch08_77.print_accuracy_f1score", "line_number": 38, "usage_type": "call"}, {"api_name": "chap_08.ch08_77", "line_number": 38, "usage_type": "name"}]} +{"seq_id": "9696367308", "text": "############################################################################### \n# Project name: Color Demo\n# File name: color_demo.py\n# Author: Therese (Teo) Patrosio @imnotartsy\n# Date: 7/21/20\n# Description: Connects spike bluetooth to onshape api for 7/23 demo\n# History: \n# Last modified by Teo 7/24/20\n# (C) Tufts Center for Engineering Education and Outreach (CEEO)\n###############################################################################\n\nimport serial #pip3 install pyserial\nimport utils.transform_utils as transform\nimport utils.onshape_utils as onshape\nimport argparse \nfrom datetime import datetime\n\n### Connect to Serial \nser = serial.Serial('/dev/tty.LEGOHubOwen-SerialPortP') # serial.Serial(port_args.port) # \n\n### Gets Spike starter message\nfor i in range(0,2):\n line = ser.readline()\n print(line.decode(), end=\"\")\n\n### Catch case for if spike goes into data spewing mode (untested) (WIP)\n# Cancels any Data Sending\nser.write('\\x03'.encode())\nser.write('\\x03'.encode())\nser.write('\\x03'.encode())\nser.write('\\x03'.encode())\n\n### Message to send to serial\n## This program gets the gesture of the spike\nmessage = \"\"\"\nimport hub,utime\\r\\n\nfrom spike.control import wait_for_seconds\\r\\n\n\nfor i in range (0, 1000):\\r\\n\\b\\b\n angle = hub.port.A.motor.get()[2]\\r\\n\\b\n print(360 - angle)\\r\\n\\b\\b\\b\n wait_for_seconds(1)\\r\\n\\b\\b\n\n\\r\\n\\r\\n\\r\\n\\r\\n\n\"\"\" \n\nprint(message)\nser.write('\\x03'.encode())\nser.write(message.encode())\n\nlast = 0\nassembly = onshape.getAssemblyInfo(False)\n# print(assembly[\"MvFKyhclA9pW5axe3\"][\"fullPath\"])\n\n### Read Data and call API\nfor i in range(0,1000):\n line = ser.readline()\n ## Prints serial line\n print(line.decode(), end=\"\")\n\n try:\n curr = int(line.decode())\n except:\n print(\"position not updated\")\n curr = last\n\n\n ## If state changes, call a transform\n if(abs(curr - last) > 5):\n\n ## Sets transformation\n args = [0, 0, 0, 0, 0, 1, curr]\n\n ## Transforms set up (get matrix and part id from assembly info)\n M = transform.getTranslationMatrix(args, False)\n partsToTransform = [assembly[\"MvFKyhclA9pW5axe3\"][\"fullPath\"]] # selects motor axle\n\n state = onshape.postTransform(M, False, partsToTransform, False)\n print(\"\\tTransformation status:\", state, datetime.now())\n last = curr\n\n\nser.close()", "repo_name": "imnotartsy/Onshape-Connections", "sub_path": "transformations/color_demo.py", "file_name": "color_demo.py", "file_ext": "py", "file_size_in_byte": 2349, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "serial.Serial", "line_number": 19, "usage_type": "call"}, {"api_name": "utils.onshape_utils.getAssemblyInfo", "line_number": 52, "usage_type": "call"}, {"api_name": "utils.onshape_utils", "line_number": 52, "usage_type": "name"}, {"api_name": "utils.transform_utils.getTranslationMatrix", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.transform_utils", "line_number": 75, "usage_type": "name"}, {"api_name": "utils.onshape_utils.postTransform", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.onshape_utils", "line_number": 78, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "72550100644", "text": "import pandas as pd\r\nimport multiprocessing\r\nfrom multiprocessing.dummy import Pool\r\nfrom random import random\r\nfrom eMicaContentAnalysis.eMicaItemDedector import *\r\nimport json\r\nimport csv\r\nimport time\r\n\r\nwith open('eMicaContentAnalysis/jsonFiles/hotelinlinks.json', 'r') as link:\r\n hotelinlinks = json.load(link)\r\n\r\n\r\ndef itemCheck(websites, retDict, item):\r\n i = websites\r\n itemCheck = item(hotelinlinks[i])\r\n if len(itemCheck) > 0:\r\n check = 2\r\n else:\r\n check = 1\r\n score = {hotelinlinks[i]['id']: check}\r\n retDict[websites] = score\r\n\r\n\r\ndef multipro(item, lenght):\r\n man = multiprocessing.Manager()\r\n retDict = man.dict()\r\n p = Pool(processes=4)\r\n\r\n for i in range(lenght):\r\n p.apply_async(func=itemCheck, args=(i, retDict, item))\r\n p.close()\r\n p.join()\r\n datas = retDict.values()\r\n return datas\r\n\r\n\r\ndef writeResults(arr, item):\r\n import pandas as pd\r\n\r\n results = arr\r\n itemC = item\r\n df = pd.read_csv('eMicaContentAnalysis/csvFiles/results.csv')\r\n\r\n df[itemC] = results\r\n print(df)\r\n df.to_csv('eMicaContentAnalysis/csvFiles/results.csv', index=False)\r\n\r\n\r\ndef start():\r\n if __name__ == '__main__':\r\n start = time.time()\r\n x = len(hotelinlinks)\r\n\r\n item = hotelPrivacy\r\n itemCol = 'hotelPrivacy'\r\n lenght = x\r\n scores = multipro(item, lenght)\r\n\r\n array = []\r\n padding = 3\r\n dict = scores[0]\r\n for i in range(1, len(scores)):\r\n dict = dict | scores[i]\r\n for i in range(1, len(dict)+1):\r\n x = str(i).zfill(padding)\r\n array.append(dict[x])\r\n\r\n if lenght == len(hotelinlinks):\r\n writeResults(array, itemCol)\r\n print(array)\r\n print(scores)\r\n end = time.time()\r\n print((end-start)/60)\r\n\r\n\r\ndef csvToExcel():\r\n df = pd.read_csv('eMicaContentAnalysis/csvFiles/results.csv')\r\n df.to_excel('eMicaContentAnalysis/excelFiles/results.xlsx',\r\n index=False, header=True)\r\n\r\n\r\ndef csvWriter():\r\n import pandas as pd\r\n\r\n with open('eMicaContentAnalysis/jsonFiles/hotelinlinks.json', 'r') as file:\r\n hotelInlinks = json.load(file)\r\n\r\n urls = []\r\n for i in range(len(hotelInlinks)):\r\n url = hotelInlinks[i]['domain']\r\n urls.append(url)\r\n\r\n rawData = {\r\n 'websites': urls\r\n }\r\n df = pd.DataFrame(rawData, columns=['websites'])\r\n df.to_csv('eMicaContentAnalysis/csvFiles/results.csv', index=False)\r\n\r\n\r\ndef checkData(x, y):\r\n import pandas as pd\r\n\r\n df = pd.DataFrame(x | y, columns=['websites', 'scores'])\r\n df.to_csv(\r\n 'eMicaContentAnalysis/csvFiles/check.csv', index=False)\r\n\r\n\r\ndef csvToExcel2():\r\n df = pd.read_csv('eMicaContentAnalysis/csvFiles/check.csv')\r\n df.to_excel('eMicaContentAnalysis/excelFiles/check.xlsx',\r\n index=False, header=True)\r\n\r\n\r\ndef websiteCheck():\r\n import random\r\n with open('eMicaContentAnalysis/jsonFiles/hotelinlinks.json', 'r') as link:\r\n hotelInlinks = json.load(link)\r\n with open('eMicaContentAnalysis/csvFiles/results.csv') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n data = {d['websites']: d['hotelRoom'] for d in list(reader)}\r\n checkWebsite = []\r\n checkScore = []\r\n ranNum = []\r\n array = []\r\n padding = 3\r\n x = random.sample(range(1, 261), 26)\r\n print(x)\r\n for i in x:\r\n y = str(i).zfill(padding)\r\n ranNum.append(y)\r\n for j in ranNum:\r\n\r\n for i in range(len(hotelInlinks)):\r\n if hotelInlinks[i]['id'] == j:\r\n website = hotelInlinks[i]['domain']\r\n score = data.get(website, 'Not Found')\r\n scores = {website: score}\r\n array.append(scores)\r\n\r\n checkWebsite.append(website)\r\n checkScore.append(score)\r\n x = {\r\n 'websites': checkWebsite\r\n }\r\n y = {\r\n 'scores': checkScore\r\n }\r\n checkData(x, y)\r\n csvToExcel2()\r\n return array\r\n\r\n\r\n# csvToExcel()\r\n\r\n# print(websiteCheck())\r\n\r\nstart()\r\n", "repo_name": "msaitag/eMicaDataAcquiring", "sub_path": "python.py", "file_name": "python.py", "file_ext": "py", "file_size_in_byte": 4177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 11, "usage_type": "call"}, {"api_name": "multiprocessing.Manager", "line_number": 26, "usage_type": "call"}, {"api_name": "multiprocessing.dummy.Pool", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 78, "usage_type": "call"}, {"api_name": "json.load", "line_number": 87, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 104, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 110, "usage_type": "call"}, {"api_name": "json.load", "line_number": 118, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 120, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 127, "usage_type": "call"}]} +{"seq_id": "25120886395", "text": "from itertools import product\n\n\ndef get_cell_value(serial, x, y):\n # rackid = x + 10\n # power = third digit of (((rackid + 10) * y) + serial ) * rackid\n # rack_id = x + 10\n # p = rack_id * y\n # p += serial\n # p *= rack_id\n # digit = int(p / 100) % 10\n # p = digit - 5\n res = (int(((((x + 10) * y) + serial) * (x + 10)) / 100) % 10) - 5\n return res\n\n\nrow_cache = {}\nzone_cache = {}\ncells = {}\n\n\ndef get_row_score(x, y, size):\n # Check if we have a cache entry for this row\n cache_key = (x, y)\n if cache_key in row_cache[size]:\n return row_cache[size][cache_key]\n score = None\n\n # Check if we have a cache entry for the same row, smaller\n if score is None and (size - 1) in row_cache:\n key = (x, y)\n if key in row_cache[size - 1]:\n score = row_cache[size - 1][key]\n score += cells[(x + size - 1, y)]\n # Check if we have a cache entry for the same row, previous col\n if x > 1:\n key = (x - 1, y)\n if key in row_cache[size]:\n score = row_cache[size][key]\n score -= cells[(x - 1, y)]\n score += cells[(x + size - 1, y)]\n # Too bad, build entry\n if score is None:\n score = 0\n for i in range(x, x + size):\n score += cells[(i, y)]\n row_cache[size][cache_key] = score\n\n return score\n\n\ndef get_best_level(serial, minsize, maxsize):\n global cells\n global row_cache\n zone_cache = {}\n cells = {}\n row_cache = {}\n\n for cell_id in product(range(1, 301), range(1, 301)):\n cells[cell_id] = get_cell_value(serial, cell_id[0], cell_id[1])\n\n best_score = 0\n best = None\n best_size = 0\n for size in range(minsize, maxsize + 1):\n row_cache[size] = {}\n zone_cache[size] = {}\n for y in range(1, 302 - size):\n for x in range(1, 302 - size):\n score = None\n # Check if we have a zone cache entry for zone, one row up\n if y > 1:\n key = (x, y - 1)\n if key in zone_cache[size]:\n score = zone_cache[size][key]\n score -= get_row_score(x, y - 1, size)\n score += get_row_score(x, y + size - 1, size)\n if score is None:\n score = 0\n for row_idx in range(y, y+size):\n score += get_row_score(x, row_idx, size)\n zone_cache[size][(x,y)] = score\n if score > best_score:\n best_score = score\n best = (x, y)\n best_size = size\n print(\"size=%s\" % size)\n print(\"%s,%s,%s = %s\" % (best[0], best[1], best_size, best_score))\n\n\nprint(get_cell_value(8, 3, 5))\nprint(get_cell_value(57, 122, 79))\nprint(get_cell_value(39, 217, 196))\nprint(get_cell_value(71, 101, 153))\nget_best_level(18, 3, 3)\nget_best_level(42, 3, 3)\nget_best_level(7400, 3, 3)\nget_best_level(7400, 3, 300)\n", "repo_name": "jeremylongo/aoc", "sub_path": "day11_2.py", "file_name": "day11_2.py", "file_ext": "py", "file_size_in_byte": 2994, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "itertools.product", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "6248801265", "text": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.test import TestCase\n\nfrom django_stachoutils.forms import NestedModelForm\nfrom .models import Car, Person\n\n\nclass PersonForm(forms.ModelForm):\n class Meta:\n model = Person\n fields = ('name', 'lastname', 'gender')\n\n\nclass CarForm(NestedModelForm):\n owner = forms.ModelChoiceField(queryset=Person.objects.all(),\n initial=None, required=False,\n widget=forms.HiddenInput())\n class Meta:\n model = Car\n fields = ('name', 'brand', 'owner')\n\n class Nested:\n form = PersonForm\n fk = 'owner'\n\n\nclass NestedModelFormTest(TestCase):\n def test_render_nested_model_form(self):\n form = CarForm()\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n )\n self.assertHTMLEqual(\n form.nested_form.as_table(),\n \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n )\n\n def test_bound_nested_model_form(self):\n form = CarForm({\n 'name': '9.3 2.0t',\n 'brand': 'Saab',\n 'owner': '',\n })\n self.assertTrue(form.is_valid())\n form.save()\n my_car = Car.objects.latest('pk')\n self.assertEqual(my_car.brand, 'Saab')\n self.assertEqual(my_car.name, '9.3 2.0t')\n\n # Data hasn't changed.\n form = CarForm({\n 'name': '9.3 2.0t',\n 'brand': 'Saab',\n 'owner': '',\n }, instance=my_car)\n self.assertFalse(form.has_changed())\n\n # Data has changed.\n form = CarForm({\n 'name': '9.3 2.0T Biopower',\n 'brand': 'Saab',\n 'owner': '',\n }, instance=my_car)\n self.assertTrue(form.has_changed())\n\n form.save()\n my_car = Car.objects.get(pk=my_car.pk)\n self.assertEqual(my_car.brand, 'Saab')\n self.assertEqual(my_car.name, '9.3 2.0T Biopower')\n\n # With nested data.\n form = CarForm({\n 'name': '900 Turbo 16',\n 'brand': 'Saab',\n 'owner': '',\n 'None_OWNER-name': 'Stan',\n 'None_OWNER-lastname': 'Guerra',\n 'None_OWNER-gender': 'male',\n })\n self.assertTrue(form.is_valid())\n form.save()\n my_car = Car.objects.latest('pk')\n self.assertEqual(my_car.brand, 'Saab')\n self.assertEqual(my_car.name, '900 Turbo 16')\n self.assertEqual(my_car.owner.name, 'Stan')\n\n def test_bound_nested_model_form_with_invalid_data(self):\n form = CarForm({\n 'name': 'Nine Three Two-liters Turbocharged',\n 'brand': 'Saab',\n 'owner': '',\n })\n self.assertFalse(form.is_valid())\n self.assertEqual(form.non_nested_errors, {'name':['Ensure this value has at most 20 characters (it has 34).']})\n\n def test_prefixed_bound_nested_model_form(self):\n form = CarForm({\n 'car-name': '9.3 2.0t',\n 'car-brand': 'Saab',\n 'car-owner': '',\n }, prefix='car')\n self.assertTrue(form.is_valid())\n form.save()\n my_car = Car.objects.latest('pk')\n self.assertEqual(my_car.brand, 'Saab')\n self.assertEqual(my_car.name, '9.3 2.0t')\n\n form = CarForm({\n 'car-name': '900 Turbo 16',\n 'car-brand': 'Saab',\n 'car-owner': '',\n 'car_OWNER-name': 'Stan',\n 'car_OWNER-lastname': 'Guerra',\n 'car_OWNER-gender': 'male',\n }, prefix='car')\n self.assertTrue(form.is_valid())\n form.save()\n my_car = Car.objects.latest('pk')\n self.assertEqual(my_car.brand, 'Saab')\n self.assertEqual(my_car.name, '900 Turbo 16')\n self.assertEqual(my_car.owner.name, 'Stan')\n", "repo_name": "Starou/django-stachoutils", "sub_path": "tests/forms/test_nested.py", "file_name": "test_nested.py", "file_ext": "py", "file_size_in_byte": 5168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Person", "line_number": 12, "usage_type": "name"}, {"api_name": "django_stachoutils.forms.NestedModelForm", "line_number": 16, "usage_type": "name"}, {"api_name": "django.forms.ModelChoiceField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "models.Person.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Person.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Person", "line_number": 17, "usage_type": "name"}, {"api_name": "django.forms.HiddenInput", "line_number": 19, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 19, "usage_type": "name"}, {"api_name": "models.Car", "line_number": 21, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 29, "usage_type": "name"}, {"api_name": "models.Car.objects.latest", "line_number": 79, "usage_type": "call"}, {"api_name": "models.Car.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "models.Car", "line_number": 79, "usage_type": "name"}, {"api_name": "models.Car.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Car.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "models.Car", "line_number": 100, "usage_type": "name"}, {"api_name": "models.Car.objects.latest", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Car.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "models.Car", "line_number": 115, "usage_type": "name"}, {"api_name": "models.Car.objects.latest", "line_number": 137, "usage_type": "call"}, {"api_name": "models.Car.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "models.Car", "line_number": 137, "usage_type": "name"}, {"api_name": "models.Car.objects.latest", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Car.objects", "line_number": 151, "usage_type": "attribute"}, {"api_name": "models.Car", "line_number": 151, "usage_type": "name"}]} +{"seq_id": "37829562835", "text": "from optuna.integration import LightGBMPruningCallback\n\n\ndef objective(trial, X, y):\n param_grid = {\n # \"device_type\": trial.suggest_categorical(\"device_type\", ['gpu']),\n \"n_estimators\": trial.suggest_categorical(\"n_estimators\", [10000]),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.01, 0.3),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 20, 3000, step=20),\n \"max_depth\": trial.suggest_int(\"max_depth\", 3, 12),\n \"min_data_in_leaf\": trial.suggest_int(\"min_data_in_leaf\", 200, 10000, step=100),\n \"lambda_l1\": trial.suggest_int(\"lambda_l1\", 0, 100, step=5),\n \"lambda_l2\": trial.suggest_int(\"lambda_l2\", 0, 100, step=5),\n \"min_gain_to_split\": trial.suggest_float(\"min_gain_to_split\", 0, 15),\n \"bagging_fraction\": trial.suggest_float(\n \"bagging_fraction\", 0.2, 0.95, step=0.1\n ),\n \"bagging_freq\": trial.suggest_categorical(\"bagging_freq\", [1]),\n \"feature_fraction\": trial.suggest_float(\n \"feature_fraction\", 0.2, 0.95, step=0.1\n ),\n }\n\n cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1121218)\n\n cv_scores = np.empty(5)\n for idx, (train_idx, test_idx) in enumerate(cv.split(X, y)):\n X_train, X_test = X.iloc[train_idx], X.iloc[test_idx]\n y_train, y_test = y[train_idx], y[test_idx]\n\n model = lgbm.LGBMClassifier(objective=\"binary\", **param_grid)\n model.fit(\n X_train,\n y_train,\n eval_set=[(X_test, y_test)],\n eval_metric=\"binary_logloss\",\n early_stopping_rounds=100,\n callbacks=[\n LightGBMPruningCallback(trial, \"binary_logloss\")\n ], # Add a pruning callback\n )\n preds = model.predict_proba(X_test)\n cv_scores[idx] = log_loss(y_test, preds)\n\n return np.mean(cv_scores)", "repo_name": "Ankitkalauni/ML-Code-Cheatsheet", "sub_path": "Optuna-search/Classification/LGBM_Optuna.py", "file_name": "LGBM_Optuna.py", "file_ext": "py", "file_size_in_byte": 1870, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "optuna.integration.LightGBMPruningCallback", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "42388483380", "text": "from typing import List\n\nclass Chart:\n CORNERS = [-1, -1], [1, 1], [1, -1], [-1, 1]\n NESW = [0, -1], [1, 0], [0, 1], [-1, 0]\n DIRECTIONS = CORNERS + NESW\n\n def __init__(self, graph):\n self.graph = graph\n self.visited = [[False] * len(graph[0]) for i in range(len(graph))]\n self.islands = []\n\n def get_islands(self):\n return self.islands\n\n def define_island(self, x, y):\n self.set_visited(x, y)\n self.islands.append([[x, y]])\n self.traverse_island(x, y)\n\n def set_visited(self, x, y):\n self.visited[y][x] = True\n\n def traverse_island(self, x, y):\n for d in Chart.DIRECTIONS:\n dx = x + d[0]\n dy = y + d[1]\n\n # If index is out of bounds, or has already been visited, skip it\n if dx < 0 or dy < 0 or dy >= len(self.graph) or dx >= len(self.graph[0]) or self.visited[dy][dx]:\n continue\n\n self.set_visited(dx, dy)\n if self.graph[dy][dx]:\n self.islands[-1].append([dx, dy])\n self.traverse_island(dx, dy)\n\n def find_islands(self):\n for y, row in enumerate(self.graph):\n for x, value in enumerate(row):\n if value and not self.visited[y][x]:\n self.define_island(x, y)\n\n def get_island_graph(self):\n island_graph = [[0] * len(self.graph[0]) for i in range(len(self.graph))]\n for i, island in enumerate(self.islands):\n for [x, y] in island:\n island_graph[y][x] = i + 1\n\n return island_graph\n\n def get_largest_island(self):\n largest = []\n for island in self.islands:\n if (len(island) > len(largest)):\n largest = island\n\n return largest\n\ndef get_largest_island(grid: List[List[int]]) -> int:\n chart = Chart(grid)\n chart.find_islands()\n largest = chart.get_largest_island()\n return len(largest)\n", "repo_name": "soccerKevin/UW", "sub_path": "AADS/island_finder.py", "file_name": "island_finder.py", "file_ext": "py", "file_size_in_byte": 1739, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "16861453995", "text": "import os\nimport sys\n\nfrom logging import getLogger\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..')))\nfrom pypasta import *\n\nlog = getLogger(__name__[-15:])\n\n\ndef ripup(config, argv):\n parser = argparse.ArgumentParser(prog='ripup',\n description='Rip up equivalence class and '\n 'reanalyse')\n\n parser.add_argument('reps', metavar='representative', type=str, nargs='+',\n help='Representatives of equivalence class. Allows to '\n 'specify multiple classes.')\n\n parser.add_argument('-cpu', dest='cpu_factor', metavar='cpu', type=float,\n default=1.0, help='CPU factor for parallelisation '\n '(default: %(default)s)')\n\n # Thresholds\n parser.add_argument('-ta', dest='thres_accept', metavar='threshold',\n type=float, default=config.thresholds.autoaccept,\n help='Autoaccept threshold (default: %(default)s)')\n parser.add_argument('-ti', dest='thres_interactive', metavar='threshold',\n type=float, default=config.thresholds.interactive,\n help='Interactive threshold (default: %(default)s)')\n parser.add_argument('-dlr', dest='thres_diff_lines', metavar='threshold',\n type=float, default=config.thresholds.diff_lines_ratio,\n help='Diff lines ratio threshold (default: %(default)s)')\n parser.add_argument('-weight', dest='weight', metavar='weight', type=float,\n default=config.thresholds.message_diff_weight,\n help='Heuristic factor for message to diff rating. '\n '(default: %(default)s)')\n parser.add_argument('-th', dest='thres_heading', metavar='threshold',\n default=config.thresholds.heading, type=float,\n help='Minimum diff hunk section heading similarity '\n '(default: %(default)s)')\n parser.add_argument('-tf', dest='thres_filename', metavar='threshold',\n default=config.thresholds.filename, type=float,\n help='Minimum filename similarity '\n '(default: %(default)s)')\n parser.add_argument('-adi', dest='thres_adi', metavar='days', type=int,\n default=config.thresholds.author_date_interval,\n help='Author date interval (default: %(default)s)')\n\n args = parser.parse_args(argv)\n representatives = args.reps\n repo = config.repo\n mbox = config.mode == Config.Mode.MBOX\n\n config.thresholds = Thresholds(args.thres_accept,\n args.thres_interactive,\n args.thres_diff_lines,\n args.thres_heading,\n args.thres_filename,\n args.weight,\n args.thres_adi)\n\n f_cluster, cluster = config.load_cluster()\n\n for representative in representatives:\n if representative not in cluster:\n log.error('Not found in any patch group: %s' % representative)\n continue\n\n elems = cluster.ripup_cluster(representative)\n\n evaluation_result = evaluate_commit_list(repo, config.thresholds,\n mbox,\n EvaluationType.PatchStack,\n elems, elems,\n parallelise=False,\n verbose=True,\n cpu_factor=args.cpu_factor)\n\n evaluation_result.load_fp(config.d_false_positives, False)\n evaluation_result.interactive_rating(repo, cluster,\n config.thresholds, False, True)\n evaluation_result.fp.to_file(config.d_false_positives)\n cluster.to_file(f_cluster)\n", "repo_name": "lfd/PaStA", "sub_path": "bin/pasta_ripup.py", "file_name": "pasta_ripup.py", "file_ext": "py", "file_size_in_byte": 4237, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 32, "dataset": "github-code", "pt": "52", "api": [{"api_name": "sys.path.insert", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "42831989059", "text": "#!/bin/python\n\nfrom bs4 import BeautifulSoup\nimport requests\n\npage = 'https://en.wikipedia.org/wiki/Most_common_words_in_English'\nr = requests.get(page)\nsoup = BeautifulSoup(r.content, 'html.parser')\n\nbody = soup.find('body')\ncontent1 = body.find('div', {'id': 'bodyContent'})\ncontent2 = content1.find('div', {'id': 'mw-content-text'})\ncontent3 = content2.find('div', {'class': 'mw-parser-output'})\ntbody = content3.find('table').find('tbody')\nentries = tbody.find_all('tr')\nwords = [e.find('td').find('a').text for e in entries[1:]]\n\nwith open('100commonwords.txt', 'w+') as f:\n for word in words:\n if len(word) > 1:\n f.write(f'{word.lower()}\\n')\n", "repo_name": "benharmonics/caesar-decoder", "sub_path": "common_words_scraper.py", "file_name": "common_words_scraper.py", "file_ext": "py", "file_size_in_byte": 669, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "75200207524", "text": "import json\n\n# JSON - JavaScript Object Notation\n# data = '{\"type\": \"forecast\",\"duration\": 3,\"city\": \"Karaganda\",\"country_code\": \"kz\"}'\n#\n# request = json.loads(data)\n# request['city'] = 'Алматы'\n#\n# data = json.dumps(request, ensure_ascii=False)\n# print(data)\n\nwith open('example.txt', encoding='utf-8') as my_file:\n request = json.load(my_file)\n\nrequest['city'] = 'Hallo'\n\nprint(my_file)\n\nwith open('example.txt', 'w', encoding='utf-8') as my_file:\n json.dump(request, my_file, ensure_ascii=False)\n", "repo_name": "yngkzk/python_classwork", "sub_path": "July-14/example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 512, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "17921265397", "text": "import cv2\n\nfrom config.settings import file_structure, ml_constants\nfrom services.file import save_to_sub_folder\n\ndef background_blur_fun(file_name: str, system_file_path: str) -> str:\n foreground = cv2.imread(system_file_path, cv2.IMREAD_COLOR)\n bg_path = file_structure.USER_DATA + system_file_path.split(\"/\")[3] + file_structure.USER_BLURRED_BACKGROUND_PATH + system_file_path.split(\"/\")[-1]\n original_img_path = system_file_path\n background = cv2.imread(original_img_path)\n background = cv2.resize(background, (foreground.shape[1], foreground.shape[0]))\n blurred_background = cv2.GaussianBlur(background, (ml_constants.BLUR_FACTOR, ml_constants.BLUR_FACTOR), 0)\n save_to_sub_folder(bg_path, blurred_background)\n return bg_path", "repo_name": "BahauddinKalyani/Illustrix", "sub_path": "backend/services/ml_services/background_blur.py", "file_name": "background_blur.py", "file_ext": "py", "file_size_in_byte": 755, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "cv2.imread", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 7, "usage_type": "attribute"}, {"api_name": "config.settings.file_structure.USER_DATA", "line_number": 8, "usage_type": "attribute"}, {"api_name": "config.settings.file_structure", "line_number": 8, "usage_type": "name"}, {"api_name": "config.settings.file_structure.USER_BLURRED_BACKGROUND_PATH", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.GaussianBlur", "line_number": 12, "usage_type": "call"}, {"api_name": "config.settings.ml_constants.BLUR_FACTOR", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.settings.ml_constants", "line_number": 12, "usage_type": "name"}, {"api_name": "services.file.save_to_sub_folder", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "40517913692", "text": "from fractions import gcd\n\n'''\ndef gcd(i,j):\n while j!= 0:\n (i,j) = (j, i%j)\n return i\n'''\n\nsmlmul=1\nfor x in range(1, 21):\n\tsmlmul= smlmul* x// gcd(x, smlmul)\nprint(smlmul)\n", "repo_name": "vabhishek-me/project-euler", "sub_path": "Problem 5 - Smallest multiple/naive_smallest_mul.py", "file_name": "naive_smallest_mul.py", "file_ext": "py", "file_size_in_byte": 187, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "fractions.gcd", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "71862961126", "text": "import asyncio\nimport aiohttp\n\nAPI_KEY = 'AIzaSyBBGS9atYkz0hk61GHmlVewlq5ziXoWpSo'\n\nasync def fetchPlaces(lat='+1', lon='-1', radius=1):\n \"\"\"\n make a single request to google places server and return the dict response\n\n Keyword Arguments:\n lat {str} -- latitude of search center (default: {'+1'})\n lon {str} -- longitude of search center (default: {'-1'})\n radius {int} -- radius of search (default: {1})\n \"\"\"\n url = f'https://maps.googleapis.com/maps/api/place/nearbysearch/json?key={API_KEY}&location={lat},{lon}&radius={radius}'\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(\n ssl=False,\n ),\n ) as session:\n async with session.get(url) as resp:\n response = await resp.json()\n return response\n", "repo_name": "JohnnyXiangyu/CS131-S2020-UCLA", "sub_path": "Project/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "aiohttp.ClientSession", "line_number": 16, "usage_type": "call"}, {"api_name": "aiohttp.TCPConnector", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "35453768194", "text": "import pandas as pd\nfrom matplotlib import pyplot as plt\n\ndata = pd.read_csv('tweetAveragesPlot.csv', sep=',', encoding='utf-8')\n\nX = data[\"TweetDate\"]\nY = data[\"AverageDailySentimentScore\"]\n\nplt.plot(X,Y,)\n\nplt.xlabel = 'TweetDates'\nplt.ylabel = 'SentimentScore'\nplt.show()\n", "repo_name": "marcdlc56/INFO3700FinalProject", "sub_path": "plotTweets.py", "file_name": "plotTweets.py", "file_ext": "py", "file_size_in_byte": 275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 12, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}]} +{"seq_id": "37979323210", "text": "import pandas as pd\nfrom tqdm import tqdm\n\nif __name__ == \"__main__\":\n df = pd.read_csv(\"../data/total_dataset.csv\")\n\ndef parkinson_filter(data):\n k_star = df['Kstar_M']\n B0 = df['B0_M']\n \n for i in tqdm(range(0,len(k_star)-1)):\n \n if k_star[i] < 792 or k_star[i] > 992:\n df.drop(i, axis = 0, inplace = True)\n \n if B0[i] < 4850 or B0[i] > 5780:\n df.drop(i, axis = 0, inplace = True)\n \n df.reset_index(inplace = True)\n", "repo_name": "TBPS-Team10/TBPS-Project", "sub_path": "src/filtering/parkinson_filtering.py", "file_name": "parkinson_filtering.py", "file_ext": "py", "file_size_in_byte": 500, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "27770816162", "text": "import re\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import Element\nfrom typing import List\n\n\n# dictionary with namespaces\nNAMESPACES = {\n \"rh\": \"http://rdf.rhea-db.org/\",\n \"rdfs\": \"http://www.w3.org/2000/01/rdf-schema#\",\n \"rdf\": \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\"\n}\n\n# Collection of commonly used attribute keys to `xml.etree.ElementTree.Element.attrib`\nATTRIB_KEYS = {\n # expands to \"{http://www.w3.org/1999/02/22-rdf-syntax-ns#}resource\"\n \"rdf:resource\": f\"{{{NAMESPACES['rdf']}}}resource\",\n # expands to \"{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about\"\n \"rdf:about\": f\"{{{NAMESPACES['rdf']}}}about\"\n}\n\n# Collection of commonly used base IRIs\nBASE_IRIS = {\n \"rh\": NAMESPACES[\"rh\"],\n \"pubmed\": \"http://rdf.ncbi.nlm.nih.gov/pubmed/\",\n \"obo\": \"http://purl.obolibrary.org/obo/\",\n \"ec\": \"http://purl.uniprot.org/enzyme/\"\n}\n\n\nclass ReactivePartDataFactory:\n \"\"\"\n A macromolecule (of type \"rh:GenericCompound\") has reactive parts, specified by description like:\n\n \n \n N(6)-methyl-L-lysine residue\n ...\n C7H15N2O\n 1\n \n \n \n\n The relative IRI \"Compound_9846_rp1\" indicates this is the first reactive part of compound \"9846\".\n\n This data factory produce all such associations in tuples (comp_num, rp_entry).\n \"\"\"\n relative_iri_pattern = re.compile(r\"Compound_\\d+_rp\\d\")\n\n @classmethod\n def is_valid_relative_iri(cls, relative_iri: str):\n match = cls.relative_iri_pattern.fullmatch(relative_iri)\n return match is not None\n\n # Adds name key and associated value to compounds with reactive parts\n @classmethod\n def _add_rp_name(cls, rp_entry: dict, description: Element):\n node = description.find(\"rh:name\", NAMESPACES)\n if node is not None:\n rp_name = node.text\n rp_entry[\"name\"] = rp_name\n\n # Adds formula key and associated value to compounds with reactive parts\n @classmethod\n def _add_rp_formula(cls, rp_entry: dict, description: Element):\n node = description.find(\"rh:formula\", NAMESPACES)\n if node is not None:\n rp_formula = node.text\n rp_entry[\"formula\"] = rp_formula\n\n # Adds charge key and associated value to compounds with reactive parts\n @classmethod\n def _add_rp_charge(cls, rp_entry: dict, description: Element):\n node = description.find(\"rh:charge\", NAMESPACES)\n if node is not None:\n rp_charge = int(node.text)\n rp_entry[\"charge\"] = rp_charge\n\n # Adds chebi key and associated id to compounds with reactive parts\n @classmethod\n def _add_rp_chebi_id(cls, rp_entry: dict, description: Element):\n node = description.find(\"rh:chebi\", NAMESPACES)\n if node is not None:\n rp_chebi = node.attrib[ATTRIB_KEYS[\"rdf:resource\"]].lstrip(BASE_IRIS[\"obo\"]).replace(\"_\", \":\")\n rp_entry[\"chebi_id\"] = rp_chebi\n\n # Adds reactive part information to the list associated with the key \"reactive_parts\" to compounds with this annoted\n @classmethod\n def produce(cls, relative_iri: str, description: Element):\n # we can assume here that relative IRI has a pattern of \"Compound_\\d+_rp\\d\", e.g. \"Compound_10594_rp2\"\n # comp_num = re.sub(pattern=r\"_rp\\d\", repl=\"\", string=relative_iri).lstrip(\"Compound_\")\n comp_num = relative_iri.split(\"_\")[1]\n\n rp_entry = {}\n cls._add_rp_name(rp_entry, description)\n cls._add_rp_formula(rp_entry, description)\n cls._add_rp_charge(rp_entry, description)\n cls._add_rp_chebi_id(rp_entry, description)\n\n yield comp_num, rp_entry\n\n\nclass SideDataFactory:\n \"\"\"\n Each reaction specifies its two side (\"L\" for left, \"R\" for right). E.g.\n\n \n # some other tags ignored\n \n \n\n \n \n \n\n Each side description specifies a \"contains\" relationship for each participating compound. E.g.\n\n \n \n \n \n\n Note that \"rh:contains\" only specifies containing without stoichiometry. \"rh:contains1\" indicates a containing\n relationship along with stoichiometry 1.\n\n Therefore from this side description we derive the following associations:\n\n - The right side of reaction \"35975\" contains compound \"3512\", and\n - the compound's stoichiometry is 1 in the reaction.\n\n We can also get the location (\"in\" or \"out\") of a compound, if its reaction is a transport reaction and the\n compound is found in both sides of the reaction. E.g.\n\n \n \n \n \n\n The location of compound \"3249\" is \"out\".\n\n This data factory produces association quadruples of (rhea_id, side_key, comp_num, participation_entry), which\n indicates:\n\n - The reaction with `rhea_id`, on its left/right side specified by `side_key`, contains a compound indicated by\n `comp_num`, and\n - the compound's stoichiometry and location (if any) are wrapped in the `participation_entry`.\n \"\"\"\n\n # expands to \"{http://rdf.rhea-db.org/}contains\"\n contains_prefix = f\"{{{NAMESPACES['rh']}}}contains\"\n\n side_char_to_key = {\n \"L\": \"side_l\",\n \"R\": \"side_r\"\n }\n\n @classmethod\n def is_valid_relative_iri(cls, relative_iri: str):\n rmc = relative_iri[-1] # right-most character\n return rmc in cls.side_char_to_key\n\n @classmethod\n def is_stoichiometric_tag(cls, tag: str):\n \"\"\"\n Check if a tag has the form of \"rh:contains[x]\" from which the stoichiometry of [x] can be inferred\n \"\"\"\n return (tag != cls.contains_prefix) and tag.startswith(cls.contains_prefix)\n\n @classmethod\n def get_stoichiometry(cls, tag: str):\n \"\"\"\n Trim the prefix \"rh:contains\" from a stoichimetric tag to get the stoichiometry.\n\n Note that there exist special stoichimetric tags like\n\n - \"containsN\",\n - \"contains2n\" (no idea why it's not \"2N\"),\n - \"containsNplus1\", and\n - \"containsNminus1\"\n\n Therefore stoichiometry's datatype must be string.\n \"\"\"\n return tag.lstrip(cls.contains_prefix)\n\n @classmethod\n def produce(cls, relative_iri: str, description: Element):\n # we assume relative_iri is valid\n side_char = relative_iri[-1]\n side_key = cls.side_char_to_key[side_char]\n\n child_tags = [child.tag for child in description if cls.is_stoichiometric_tag(child.tag)]\n for tag in child_tags:\n # namespaces not needed here for description.find() because `tag` has the expanded namespace already\n contained_absolute_iri = description.find(tag).attrib[ATTRIB_KEYS[\"rdf:resource\"]]\n contained_relative_iri = contained_absolute_iri.lstrip(BASE_IRIS['rh'])\n\n # E.g. contained_relative_iri = \"Participant_69560_compound_3249_out\"\n contained_relative_iri_parts = contained_relative_iri.split(\"_\")\n rhea_num = contained_relative_iri_parts[1]\n comp_num = contained_relative_iri_parts[3]\n location = contained_relative_iri_parts[4] if len(contained_relative_iri_parts) == 5 else None\n\n \"\"\"\n Here we assume that rhea id can be inferred from the contained IRI (because accession id is not\n available).\n E.g. an IRI like \"Participant_35975_compound_3512\" indicates that compound_lib[\"3512\"] is associated\n with reaction \"RHEA:35975\"\n It's also possible to infer from the upper-level IRI (the `relative_iri` argument). E.g. \"35975_R\" in\n the above example\n \"\"\"\n rhea_id = \"RHEA:\" + rhea_num\n stoich = cls.get_stoichiometry(tag)\n\n participation_entry = {\"stoich\": stoich}\n\n # Adds positional information for compounds to rhea entries that specify a transport reaction\n if location:\n participation_entry[\"location\"] = location\n\n yield rhea_id, side_key, comp_num, participation_entry\n\n\nclass CompoundDataFactory:\n \"\"\"\n A compound's description is like\n\n \n 1454\n CHEBI:58413\n (R)-6-hydroxynicotine\n (<i>R</i>)-6-hydroxynicotine\n C10H15N2O\n 1\n \n \n \n \n\n Note that only such descriptions with a valid accession ID will be parsed by this data factory. Currently there\n are 3 types of valid accession IDs for compounds, i.e. \"CHEBI\", \"GENERIC\", and \"POLYMER\".\n\n This data factory produces basic information (without reactive parts, stoichiometry, nor location) for each\n compound.\n \"\"\"\n compound_prefixes = set([\"CHEBI:\", \"GENERIC:\", \"POLYMER:\"])\n\n @classmethod\n def is_valid_accession_id(cls, accession_id: str):\n for prefix in cls.compound_prefixes:\n if accession_id.startswith(prefix):\n return True\n return False\n\n # Adds ID key and value to compound entries\n @classmethod\n def _add_comp_id(cls, comp_entry: dict, accession_id: str):\n if \"CHEBI:\" in accession_id:\n comp_entry[\"chebi_id\"] = accession_id\n elif \"GENERIC:\" in accession_id:\n comp_entry[\"generic_id\"] = accession_id.lstrip(\"GENERIC:\")\n elif \"POLYMER:\" in accession_id:\n comp_entry[\"poly_id\"] = accession_id.lstrip(\"POLYMER:\")\n else:\n raise ValueError(f\"Cannot recognize accession type. Got accession id {accession_id}\")\n\n # Adds name key and id to compound entries\n @classmethod\n def _add_comp_name(cls, comp_entry: dict, description: Element):\n node = description.find(\"rh:name\", NAMESPACES)\n if node is not None:\n comp_name = node.text\n comp_entry[\"name\"] = comp_name\n\n # Adds formula key and value to compound entries\n @classmethod\n def _add_comp_formula(cls, comp_entry: dict, description: Element):\n node = description.find(\"rh:formula\", NAMESPACES)\n if node is not None:\n formula = node.text\n if formula is not None:\n formula = formula.rstrip(\"n\")\n comp_entry[\"formula\"] = formula\n\n # Adds charge key and value to compound entries\n @classmethod\n def _add_comp_charge(cls, comp_entry: dict, description: Element):\n node = description.find(\"rh:charge\", NAMESPACES)\n if node is not None:\n comp_charge = node.text.rstrip(\"n\")\n # comp_charge can be a string like '(-4)(-1)' so its datatype cannot be integer\n comp_entry[\"charge\"] = comp_charge\n\n @classmethod\n def produce(cls, relative_iri: str, accession_id: str, description: Element):\n comp_entry = {}\n\n # we can assume here that relative IRI has a pattern of \"Compound_\\d\", e.g. \"Compound_10594\"\n comp_num = relative_iri.split(\"_\")[1]\n comp_entry[\"comp_num\"] = comp_num\n\n cls._add_comp_id(comp_entry, accession_id)\n cls._add_comp_name(comp_entry, description)\n cls._add_comp_formula(comp_entry, description)\n cls._add_comp_charge(comp_entry, description)\n\n yield comp_entry\n\n @classmethod\n def pack(cls, comp_entries: List[dict]):\n \"\"\"\n Pack a list of compound entries into a dictionary of {comp_num : comp_entry}\n \"\"\"\n return dict((comp_entry[\"comp_num\"], comp_entry) for comp_entry in comp_entries)\n\n\nclass ReactionDataFactory:\n \"\"\"\n A reaction's description is like\n\n \n \n 10000\n RHEA:10000\n H2O + pentanamide = NH4(+) + pentanoate\n H2O + pentanamide = NH4(+) + pentanoate\n ...\n \n \n \n \n \n true\n \n false\n ...\n \n \n \n \n \n\n Note that only such descriptions with a valid accession ID will be selected to this data factory. Currently there\n are 1 type of valid accession IDs for reactions, i.e. those starting with \"RHEA\".\n\n Also note that for each reaction, there will be 4 variants, i.e.\n\n - the master reaction (direction undefined, e.g. \"RHEA:10000\")\n - 2 directional reactions (left-to-right, e.g. \"RHEA:10001\", and right-to-left, e.g. \"RHEA:10002\")\n - the bidirectional reaction (e.g. RHEA:10003)\n\n In this data factory, only the master reactions will be parsed to individual entries, the directional and \n bidirectional reactions will be attached to their master reactions as \"children_rheas\".\n \"\"\"\n @classmethod\n def is_valid_accession_id(cls, accession_id):\n return accession_id.startswith(\"RHEA:\")\n\n @classmethod\n def is_master_reaction(cls, description):\n \"\"\"\n There are multiple ways to tell if a reaction is a master reaction.\n\n Method 1: tell by \"rdfs:subClassOf\", whose values are \"Reaction\", \"DirectionalReaction\",\n \"BidirectionalReaction\".\n Method 2: tell by the existence of \"rh:substrates\" and/or \"rh:products\" (only in \"DirectionalReaction\"),\n plus \"rh:substratesOrProducts\" (only in \"BidirectionalReaction\")\n\n Here we use method 2.\n\n Note that a master reaction's RHEA ID is not necessarily a multiple of 4. E.g. \"RHEA:26018\" for some reason is\n not used, and the next master reaction is \"RHEA:26019\". Therefore there is no modulo relationship between a\n reaction's type and its RHEA ID.\n \"\"\"\n return (description.find(\"rh:substrates\", NAMESPACES) is None) and \\\n (description.find(\"rh:substratesOrProducts\", NAMESPACES) is None)\n\n # Adds equation key and associated value to reaction entry\n @classmethod\n def _add_rhea_equation(cls, reaction_entry: dict, description: Element):\n node = description.find(\"rh:equation\", NAMESPACES)\n if node is not None:\n reaction_entry[\"equation\"] = node.text\n\n # Adds is_transport key and associated boolean to reaction entry\n @classmethod\n def _add_rhea_transport(cls, reaction_entry: dict, description: Element):\n node = description.find(\"rh:isTransport\", NAMESPACES)\n if node is not None:\n \"\"\"\n \"rh:isTransport\" has specified rdf:datatype=\"http://www.w3.org/2001/XMLSchema#boolean\",\n therefore only 2 unique values are possible, \"true\" and \"false\"\n \"\"\"\n is_transport = node.text # string type\n is_transport = (is_transport == \"true\") # boolean type\n reaction_entry[\"is_transport\"] = is_transport\n\n # Adds ec_link and ec_id keys and associated values to reaction entry\n # ENZYME is an enzyme nomenclature database, which assigns an EC (Enzyme Commission) number for each enzyme\n @classmethod\n def _add_rhea_ec(cls, reaction_entry: dict, description: Element):\n node = description.find(\"rh:ec\", NAMESPACES)\n if node is not None:\n ec_link = node.attrib[ATTRIB_KEYS[\"rdf:resource\"]]\n ec_id = ec_link.lstrip(BASE_IRIS[\"ec\"])\n\n reaction_entry[\"ec_link\"] = ec_link\n reaction_entry[\"ec_id\"] = ec_id\n\n # Adds status key and associated value to reaction entry. 3 possible values: Approved, Preliminary, Obsolete\n @classmethod\n def _add_rhea_status(cls, reaction_entry: dict, description: Element):\n node = description.find(\"rh:status\", NAMESPACES)\n if node is not None:\n status = node.attrib[ATTRIB_KEYS[\"rdf:resource\"]].lstrip(BASE_IRIS[\"rh\"])\n reaction_entry[\"status\"] = status\n\n # Adds citations key and associated values in a list to reaction entry.\n # Some entries will have no citations and thus no citations key\n @classmethod\n def _add_rhea_citations(cls, reaction_entry: dict, description: Element):\n nodes = description.findall(\"rh:citation\", NAMESPACES)\n if nodes:\n for node in nodes:\n citation = node.attrib[ATTRIB_KEYS[\"rdf:resource\"]].lstrip(BASE_IRIS[\"pubmed\"])\n citation = \"PMID:\" + citation\n reaction_entry.setdefault(\"citations\", []).append(citation)\n\n # Adds the children_rheas key and list of associated rhea ids (should be 3) to reaction entry\n @classmethod\n def _add_rhea_children(cls, reaction_entry: dict, description: Element):\n directional_reactions = description.findall(\"rh:directionalReaction\", NAMESPACES)\n if directional_reactions:\n for reaction in directional_reactions:\n child_absoulte_iri = reaction.attrib[ATTRIB_KEYS[\"rdf:resource\"]]\n child_relative_iri = child_absoulte_iri.lstrip(BASE_IRIS[\"rh\"])\n child_rhea_id = \"RHEA:\" + child_relative_iri\n\n reaction_entry.setdefault(\"children_rheas\", []).append(child_rhea_id)\n\n bidirectional_reaction = description.find(\"rh:bidirectionalReaction\", NAMESPACES)\n if bidirectional_reaction is not None:\n child_absoulte_iri = bidirectional_reaction.attrib[ATTRIB_KEYS[\"rdf:resource\"]]\n child_relative_iri = child_absoulte_iri.lstrip(BASE_IRIS[\"rh\"])\n child_rhea_id = \"RHEA:\" + child_relative_iri\n\n reaction_entry.setdefault(\"children_rheas\", []).append(child_rhea_id)\n\n # Fills rhea entries with associated information\n @classmethod\n def produce(cls, accession_id: str, description: Element):\n reaction_entry = {}\n\n reaction_entry[\"rhea_id\"] = accession_id\n\n # reaction_entry[\"side_l\"] = []\n # reaction_entry[\"side_r\"] = []\n\n cls._add_rhea_equation(reaction_entry, description)\n cls._add_rhea_transport(reaction_entry, description)\n cls._add_rhea_ec(reaction_entry, description)\n cls._add_rhea_status(reaction_entry, description)\n cls._add_rhea_citations(reaction_entry, description)\n cls._add_rhea_children(reaction_entry, description)\n\n yield reaction_entry\n\n @classmethod\n def pack(cls, reaction_entries: List[dict]):\n return dict((reaction_entry[\"rhea_id\"], reaction_entry) for reaction_entry in reaction_entries)\n\n\ndef load_annotations(data_folder):\n \"\"\"\n Using ElementTree, rhea.rdf is collapsed into a hierarchy of tags with associated information accessed with .find()\n and .findall() functions.\n\n The main for-loop catches reactive part associations, reaction side associations, compound entries, and reaction\n entries into 4 lists.\n\n Reactive parts are augmented to their associated compounds in the 2nd for-loop.\n\n The 3rd for-loop creates side components from compounds and participation fields, and then attaches side components\n to the associated reactions.\n\n The 4th for-loop yields all reaction docments.\n \"\"\"\n rhea_rdf = open(data_folder + \"/rhea.rdf\", \"r\")\n tree = ET.parse(rhea_rdf)\n root = tree.getroot()\n\n reactive_part_associations = []\n side_associations = []\n compound_entries = []\n reaction_entries = []\n\n for description in root.findall(\"rdf:Description\", NAMESPACES):\n absolute_iri = description.attrib[ATTRIB_KEYS[\"rdf:about\"]]\n relative_iri = absolute_iri.lstrip(BASE_IRIS[\"rh\"])\n\n accession = description.find(\"rh:accession\", NAMESPACES)\n\n if accession is None:\n if ReactivePartDataFactory.is_valid_relative_iri(relative_iri=relative_iri):\n for rp_assoc in ReactivePartDataFactory.produce(relative_iri=relative_iri, description=description):\n reactive_part_associations.append(rp_assoc)\n elif SideDataFactory.is_valid_relative_iri(relative_iri=relative_iri):\n for side_assoc in SideDataFactory.produce(relative_iri=relative_iri, description=description):\n side_associations.append(side_assoc)\n else:\n accession_id = accession.text\n if CompoundDataFactory.is_valid_accession_id(accession_id=accession_id):\n for comp_entry in CompoundDataFactory.produce(relative_iri=relative_iri,\n accession_id=accession_id,\n description=description):\n compound_entries.append(comp_entry)\n elif ReactionDataFactory.is_valid_accession_id(accession_id=accession_id) and \\\n ReactionDataFactory.is_master_reaction(description=description):\n for rhea_entry in ReactionDataFactory.produce(accession_id=accession_id, description=description):\n reaction_entries.append(rhea_entry)\n\n compound_lib = CompoundDataFactory.pack(comp_entries=compound_entries)\n reaction_lib = ReactionDataFactory.pack(reaction_entries=reaction_entries)\n\n for comp_num, rp_entry in reactive_part_associations:\n compound_lib[comp_num].setdefault(\"reactive_parts\", []).append(rp_entry)\n\n for rhea_id, side_key, comp_num, participation_entry in side_associations:\n side_component = {\n **compound_lib[comp_num], # the \"participant\"\n **participation_entry # describes how this compound partipates in the reaction\n }\n del side_component[\"comp_num\"]\n\n reaction_lib[rhea_id].setdefault(side_key, []).append(side_component)\n\n for reaction_entry in reaction_lib.values():\n reaction_entry[\"_id\"] = reaction_entry[\"rhea_id\"]\n del reaction_entry[\"rhea_id\"]\n yield reaction_entry\n", "repo_name": "biothings/Rhea", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 24321, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "re.compile", "line_number": 49, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 58, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 66, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 74, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 82, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 90, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 187, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 269, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 277, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 287, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 295, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 310, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 380, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 387, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 401, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 412, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 421, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 431, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 451, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 469, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.parse", "line_number": 489, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree", "line_number": 489, "usage_type": "name"}]} +{"seq_id": "39173949515", "text": "\"\"\"Module providingFunction to use webdriver,select the options,time,pytest.\"\"\"\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.select import Select\nimport pandas as pd\nimport pytest\n\n\nclass Test:\n \"\"\"Class representing a Test\"\"\"\n url = \"https://www.zenclass.in/class\"\n driver = webdriver.Firefox()\n driver.get(url)\n driver.implicitly_wait(5)\n driver.maximize_window()\n\n @pytest.mark.first\n def test_login(self):\n \"\"\"Function that login into application and verify the title\"\"\"\n # locator for username textbox\n xpath1 = '//*[@id=\"root\"]/div/div/div[1]/div[2]/div/div[1]/form/div[1]/div/input'\n self.driver.find_element(by=By.XPATH, value=xpath1).send_keys(\"rajapriya371@gmail.com\")\n # locator for password textbox and entering password\n xpath2 = '//*[@id=\"root\"]/div/div/div[1]/div[2]/div/div[1]/form/div[2]/div/input'\n self.driver.find_element(by=By.XPATH, value=xpath2).send_keys(\"India@123\")\n # locator for Login and clicking on login button\n xpath3 = '//*[@id=\"root\"]/div/div/div[1]/div[2]/div/div[1]/form/button'\n self.driver.find_element(by=By.XPATH, value=xpath3).click()\n verify = self.driver.title\n assert verify == \"Zen Class\"\n self.driver.implicitly_wait(5)\n\n @pytest.mark.second\n def test_data(self):\n \"\"\"Function clicking on Queries panel and move control to search text box\"\"\"\n # locators for queries\n xpath4 = '/html/body/div/div[1]/nav/ul/div[6]/li/span'\n queries = self.driver.find_element(by=By.XPATH, value=xpath4)\n time.sleep(10)\n # clicking on queries\n queries.click()\n time.sleep(10)\n # locator for search text box\n xpath5 = '/html/body/div/div[2]/div/div[1]/div[2]/input'\n search_click = self.driver.find_element(by=By.XPATH, value=xpath5)\n # moving control to search textbox\n search_click.click()\n time.sleep(5)\n\n @pytest.mark.third\n def test_excel(self):\n \"\"\"Function extract data from left panel and printing the extracted result\"\"\"\n # locator for Extracting all the information from left-hand side ribbon from the portal\n taskname =self.driver.find_elements(by=By.XPATH, value=\"//div[contains(@class,'ml-4')]\")\n mytask = []\n # printing all the information from left-hand side ribbon from the guvi portal\n for task in taskname:\n print(task.text)\n mytask.append(task.text)\n # Exporting all the data to excel\n df_data = pd.DataFrame(mytask)\n df_data.to_excel(\"Workbook.xlsx\", index=False)\n\n @pytest.mark.fourth\n def test_query(self):\n \"\"\"Function to raise queries 5 times in the Zen portal\"\"\"\n for _ in range(0, 2):\n self.driver.implicitly_wait(15)\n # self.driver.maximize_window()\n # Clicking on create query section\n xpath13 = '//*[@id=\"root\"]/div[2]/div/div[1]/div[1]/button'\n self.driver.find_element(by=By.XPATH, value=xpath13).click()\n self.driver.implicitly_wait(15)\n # locator for cancel button\n xpath6 = '/html/body/div/div[2]/div/div[2]/div[6]/div[2]/div/div/section[3]/div[2]/button[1]'\n cancel_query = self.driver.find_element(by=By.XPATH, value=xpath6)\n # Clicking on create query section\n cancel_query.click()\n self.driver.implicitly_wait(5)\n # Locators for category dropdown\n xpath7 = '/html/body/div/div[2]/div/div[2]/div/div/form/div[2]/div[1]/select'\n dropdown1 = self.driver.find_element(by=By.XPATH, value=xpath7)\n # Click on category dropdown\n dropdown1.click()\n dd1 = Select(dropdown1)\n time.sleep(3)\n dd1.select_by_index(1)\n time.sleep(5)\n xpath8 = '//*[@id=\"root\"]/div[2]/div/div[2]/div/div/form/div[2]/div[2]/select'\n # Locators for subcategory dropdown\n dropdown2 = self.driver.find_element(by=By.XPATH, value=xpath8)\n # Clicking on Subcategory dropdown\n dropdown2.click()\n dd2 = Select(dropdown2)\n time.sleep(3)\n dd2.select_by_index(1)\n time.sleep(5)\n # Locators for Preferred Voice Communication Language dropdown\n xpath9 = '/html/body/div/div[2]/div/div[2]/div/div/form/div[2]/div[4]/select'\n dropdown5 = self.driver.find_element(by=By.XPATH, value=xpath9)\n dropdown5.click()\n # Clicking on Preferred Voice Communication Language\n dd5 = Select(dropdown5)\n time.sleep(3)\n dd5.select_by_index(2)\n time.sleep(5)\n # Locators for query title\n xpath10 = '//*[@id=\"root\"]/div[2]/div/div[2]/div/div/form/div[5]/div/input'\n dropdown3 = self.driver.find_element(by=By.XPATH, value=xpath10)\n # Entering data into Query Title\n query_title = \"Guvi Python AT – 1 &2 Automation Project\"\n dropdown3.send_keys(query_title)\n time.sleep(5)\n # Locators for Query description\n xpath11 = '//*[@id=\"root\"]/div[2]/div/div[2]/div/div/form/div[5]/textarea'\n dropdown4 = self.driver.find_element(by=By.XPATH, value=xpath11)\n # Entering data into Query Description\n query_description = \"This is a Project Test Code Running for the Python Automation – 1&2 Project Given by mentor Mr. Suman Gangopadhyay.\"\n dropdown4.send_keys(query_description)\n time.sleep(5)\n # locators for submit button\n xpath12 = '/html/body/div/div[2]/div/div[2]/div/div/form/div[13]/div/button'\n button = self.driver.find_element(by=By.XPATH, value=xpath12)\n # Clicking on submit button\n button.click()\n time.sleep(5)\nt = Test()\n", "repo_name": "rajapriya371/GUVI_Python_Automation_course", "sub_path": "test_sample.py", "file_name": "test_sample.py", "file_ext": "py", "file_size_in_byte": 5980, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "52", "api": [{"api_name": "selenium.webdriver.Firefox", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 23, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 23, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 26, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 29, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 18, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 46, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 46, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 55, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 55, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 51, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 73, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 73, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 83, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 83, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 92, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 92, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 98, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 101, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 101, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.select.Select", "line_number": 104, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 105, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 107, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 110, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 110, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 117, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 117, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 121, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 124, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 124, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 127, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "875749706", "text": "#!/usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n#\r\n# filename: xlsx_parse.py\r\n# Debug on Python 3.5.2\r\n# 2017.04.17 by Oicebot \r\n#\r\n# 运行方法,用 Python IDLE 打开,按 F5 运行\r\n#\r\n#\r\n\r\nfrom openpyxl import Workbook, load_workbook\r\nimport parse_functions # parse_sonography, parse_pathology\r\n\r\ntable_title=['编号','姓名','年龄','住院号',\r\n '描述','位置','大小','形态','腺管','生长方向',\r\n '边缘','分布','内部回声','钙化','后方回声','CDFI'\r\n ]\r\n \r\ntable2_title = ['编号','姓名','年龄','住院号',\r\n '位置','定性','分级','单发/多灶',\r\n '伴随病变的情况','累及周围组织情况',\r\n '淋巴结转移情况'\r\n ]\r\n\r\ndef create_rows(indata_sheet, table_title, col_number=8, method='sonography', testrange=0):\r\n '''\r\n 此函数用于读取具体工作表中所需的单元格数据,以 dict 对象的形式返回。\r\n \r\n indata_sheet : 要处理的工作表对象\r\n col_number : 要提取数据的列号\r\n method : 提取方法: sonography, pathology ... etc\r\n testrange : 提取多少层,默认 0 为不限\r\n\r\n '''\r\n \r\n row_objs = [] \r\n \r\n index = 0\r\n \r\n #if testrange > 0:\r\n # print(\"Parsing: {} Col: {} method: {} range: {} \".format(indata_sheet.title, col_number, method, testrange))\r\n \r\n for row in indata_sheet.rows:\r\n if testrange > 0:\r\n if index > testrange:\r\n break\r\n \r\n #函数化调用方法\r\n methodtocall = getattr(parse_functions,'parse_' + method)\r\n \r\n #if testrange > 0:\r\n # print(\"Row {} : Col: {}, Value: {} \".format(index, col_number, row[col_number].value) )\r\n \r\n parsed_data = methodtocall(row[col_number].value)\r\n\r\n for data_item in parsed_data:\r\n\r\n info_data = dict.fromkeys(list(table_title),'')\r\n info_data['编号'] = row[0].value\r\n info_data['姓名'] = row[1].value\r\n info_data['年龄'] = row[3].value\r\n info_data['住院号'] = row[4].value\r\n \r\n for key,item in data_item.items():\r\n info_data[key] = item\r\n\r\n row_objs.append(info_data)\r\n index += 1\r\n\r\n return row_objs\r\n \r\n \r\ndef add_rows_to_table(display_info, table_title, indata_sheet, col=8, mtd='sonography', tstrange=0):\r\n \r\n outtable= [ table_title, ]\r\n\r\n row_objs = create_rows(indata_sheet, table_title, col_number=col, method=mtd, testrange=tstrange )\r\n \r\n index = 0\r\n print('添加{}:'.format(display_info))\r\n for i in row_objs:\r\n if index % 10 == 0:\r\n print(str(index),end='')\r\n else:\r\n print('.',end='')\r\n \r\n #print('----- {} -----'.format(index))\r\n index += 1\r\n next_row =[]\r\n for key in table_title:\r\n #print('{} : {}'.format(key,i[key]))\r\n next_row.append(i[key])\r\n\r\n outtable.append(next_row)\r\n print(' 共导出 {} 条数据。'.format(index))\r\n return outtable\r\n \r\n\r\nif __name__ == '__main__':\r\n\r\n indata = load_workbook('testdata.xlsx')\r\n #indata = load_workbook('info.xlsx')\r\n outdata_filename = \"outinfo.xlsx\"\r\n\r\n print('该文件中含有如下工作表:')\r\n sheets = []\r\n for sheet in indata:\r\n sheets.append(sheet)\r\n\r\n print('===========')\r\n SheetID = 'A'\r\n indata_sheet = None\r\n while not SheetID.isdigit():\r\n \r\n i = 0\r\n for sheet in sheets:\r\n print (\"{} : {}\".format(i,sheet.title))\r\n i += 1\r\n print('请输入你想要解析的工作表的编号:',end='')\r\n SheetID = input()\r\n\r\n try:\r\n indata_sheet = sheets[int(SheetID)]\r\n except Exception as errinfo :\r\n print('打开失败……', errinfo)\r\n SheetID = 'A'\r\n else:\r\n print('已打开表:“{}”,解析中…'.format(indata_sheet.title))\r\n \r\n '''\r\n 备注: indata_sheet的表头结构:\r\n 0 1 2 3 4 5 6 7 8 9\r\n 编号 姓名 性别 年龄 住院号 检查时间 病史 化疗 M-超声描述 M-超声诊断 ...\r\n 14 15 21 22 23 24 25 26 27 28\r\n M-穿刺病理 M-穿刺免疫组化 钼靶描述 钼靶诊断 MRI描述 MRI诊断 手术名称 手术过程 术中冰冻 石蜡病理\r\n '''\r\n \r\n data1 = add_rows_to_table('描述数据', table_title, indata_sheet, col=8, mtd='sonography',)\r\n data2 = add_rows_to_table('病理诊断', table2_title, indata_sheet, col=28, mtd='pathology', tstrange=10, )\r\n\r\n wb = Workbook()\r\n ws = wb.active\r\n \r\n line_index = 0\r\n lastrow=[]\r\n current_side = -1 #0-left, 1-right 2-both\r\n current_ID = -1\r\n for row in data1:\r\n outrow = list(row)\r\n if line_index < len(data2):\r\n #第一行标题续后,之后只有编号相同的续上\r\n if line_index == 0 or data2[line_index][0] <= outrow[0]:\r\n\r\n while data2[line_index][0] < outrow[0]: #如果切出来的东西比较多,就要塞空行进去\r\n addrow = list(lastrow[:4]) #前4列还是塞进去\r\n addrow.extend(list(\" \" * 11)) #塞入11列空格\r\n addrow.extend(data2[line_index][4:]) #跳过 '编号','姓名','年龄','住院号',\r\n ws.append(addrow)\r\n line_index += 1\r\n\r\n if line_index > 0: #Exclude 1st/title row\r\n if outrow[0] != current_ID: #Only change cache when meet new ID\r\n current_ID = data2[line_index][0] #cache current ID '编号'\r\n #cache current side\r\n if all(i in data2[line_index][4] for i in ['左','右']):\r\n current_side = 2\r\n elif '左' in data2[line_index][4]:\r\n current_side = 0\r\n elif '右' in data2[line_index][4]:\r\n current_side = 1\r\n else:\r\n current_side = -1\r\n\r\n compare_info = data2[line_index][4]\r\n #if both sides exist, or left and right compared, then do nothing\r\n if current_side == 2 or ('左' in outrow[4] and current_side == 0) or ('右' in outrow[4] and current_side == 1):\r\n pass\r\n else:\r\n compare_info = compare_info + '【卧槽】'\r\n\r\n outrow.append(compare_info)\r\n\r\n else:\r\n outrow.append('位置')\r\n\r\n outrow.extend(data2[line_index][5:]) #跳过 '编号','姓名','年龄','住院号','位置'\r\n\r\n line_index += 1\r\n\r\n #//TODO: 石蜡病理的数量少, insert 【卧槽】 info\r\n else:\r\n compare_info = \"\" #data2[line_index][4]\r\n #if both sides exist, or left and right compared, then do nothing\r\n if current_side == 2 or ('左' in outrow[4] and current_side == 0) or ('右' in outrow[4] and current_side == 1):\r\n pass\r\n else:\r\n compare_info = compare_info + '【卧槽】'\r\n\r\n outrow.append(compare_info)\r\n \r\n ws.append(outrow)\r\n lastrow=list(outrow) #保存之前处理过的行\r\n \r\n print('解析完毕!')\r\n\r\n wb.save(outdata_filename)\r\n\r\n", "repo_name": "oicebot/xlsx_parse", "sub_path": "xlsx_parse.py", "file_name": "xlsx_parse.py", "file_ext": "py", "file_size_in_byte": 7776, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "52", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 102, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "71849363364", "text": "from tkinter import *\nfrom collections import deque as queue\nimport time\nfrom tkinter.ttk import Combobox\nfrom venv import create\nfrom mazes import maze1, maze2\nimport sys\n\nsys.setrecursionlimit(5000)\n\nroot = Tk()\nroot.config(bg='white')\nroot.minsize(1100, 600)\nroot.title('Shortes Path')\ncanvas = Canvas(root, width=700, height=700, bg='white')\ncanvas.pack(side=LEFT)\ninfo_label = Label(root, text='', bg='white', fg='red', font=('arial', 15))\ninfo_label.place(x=710, y=65)\n\na = maze1\n\nn = 50\nm = 50\nresise_img = 700\n\nwhichChar = StringVar()\nwhichAlg = StringVar()\nchoseMaze = StringVar()\n\nfor j in range(n):\n for i in range(n):\n a[j] = list(a[j])\n\nfinal_row = -1\nfinal_col = -1\n\n# colors\n# wall = X\n# wall edited from gui = x\n# path = A\n# cell = .\n# start = S\n# end = E\n# cell which must go through = M\n\nwall_colo = 'red'\npath_colo = 'black'\nablep_colo = 'white'\nstart_colo = 'blue'\nend_colo = '#CCCC00'\nbfs_colo = '#C0C0C0'\nbfs_colo2 = '#FF9933'\n\n# cordenades inici\nrow = -1\ncol = -1\nfor i in range(n):\n for j in range(m):\n if a[i][j] == 'S':\n row = i\n col = j\n\ninc_x = [1, -1, 0, 0]\ninc_y = [0, 0, 1, -1]\ndist = [[-1 for i in range(m)] for i in range(n)]\n\ndef reset_arrayA():\n for i in range(n):\n for j in range(m):\n if a[i][j] == 'A':\n a[i][j] = '.'\n\n return\n\ndef reset_variables():\n global dist, row, col, final_col, final_row, a\n\n reset_arrayA()\n\n for j in range(n):\n for i in range(n):\n a[j] = list(a[j])\n\n dist = [[-1 for i in range(m)] for i in range(n)]\n\n row = -1\n col = -1\n for i in range(n):\n for j in range(m):\n if a[i][j] == 'S':\n row = i\n col = j\n\n final_row = -1\n final_col = -1\n\ndef drawdata(x, y, col):\n global canvas\n \n canvas.create_rectangle(x*14, y*14, (x*14)+14, (y*14)+14, fill=col)\n #time.sleep(0.0005)\n #root.update_idletasks()\n\ndef print_matrix(matrix):\n for i in range(n):\n for j in range(m):\n if matrix[i][j] == 'X' or matrix[i][j] == 'x':\n drawdata(i, j, wall_colo)\n elif matrix[i][j] == '.':\n drawdata(i, j, ablep_colo)\n\n elif matrix[i][j] == 'S':\n drawdata(i, j, start_colo)\n\n elif matrix[i][j] == 'E':\n drawdata(i, j, end_colo)\n\n elif matrix[i][j] == 'A':\n drawdata(i, j, path_colo)\n\nprint_matrix(a)\n\ndef chose_maze_func():\n global chose_maze, a\n\n if choseMaze.get() == 'default':\n a = maze1\n \n elif choseMaze.get() == 'no walls':\n a = maze2\n\n else:\n pass\n\n for i in range(n):\n for j in range(m):\n if a[i][j] == 'x':\n a[i][j] = '.'\n\n reset_variables()\n print_matrix(a)\n\nchose_maze_func()\n\n\ndef bfs(f, c, end):\n global final_row, final_col, canvas\n info_label.config(text='')\n q = queue()\n\n dist[f][c] = 0\n q.append((f, c))\n\n while (len(q) > 0):\n\n p = q.popleft()\n x = p[0]\n y = p[1]\n\n drawdata(x, y, bfs_colo)\n root.update_idletasks()\n\n for d in range(4):\n x2 = x + inc_x[d]\n y2 = y + inc_y[d]\n\n if (x2 >= 0 and x2 < n and y2 >= 0 and y2 < m and dist[x2][y2] == -1):\n dist[x2][y2] = dist[x][y] + 1\n if a[x2][y2] == end:\n #print('Minium distance', dist[x2][y2])\n info_label.config(text=f'Minium distance {dist[x2][y2]}')\n final_row = x2\n final_col = y2\n time.sleep(0.5)\n return True\n\n if a[x2][y2] == '.':\n q.append((x2, y2))\n drawdata(x2, y2, bfs_colo2)\n root.update_idletasks()\n\n info_label.config(text=\"It's not reachable\")\n return False\n\ndef create_path():\n var = dist[final_row][final_col]\n x = final_row\n y = final_col\n\n while True:\n if x == row and y == col:\n a[row][col] = 'S'\n break\n\n for i in range(4):\n x2 = x + inc_x[i]\n y2 = y + inc_y[i]\n\n if (x2 >= 0 and x2 < n and y2 >= 0 and y2 < m and dist[x2][y2] == var-1 and a[x2][y2] != 'X' and a[x2][y2] != 'x'):\n a[x2][y2] = 'A'\n var -= 1\n x = x2\n y = y2\n break\n return\n\nflag = True\ndef dfs(x, y):\n global flag\n\n dist[x][y] = 0\n\n drawdata(x, y, bfs_colo)\n root.update_idletasks()\n \n for k in range(4):\n x2 = x + inc_x[k]\n y2 = y + inc_y[k]\n if (flag and x2 >= 0 and x2 < n and y2 >= 0 and y2 < m and a[x2][y2] != 'x' and a[x2][y2] != 'X' and dist[x2][y2] == -1):\n if (a[x2][y2] == 'E'):\n flag = False\n info_label.config(text='There\\'s a path')\n return True\n else:\n dfs(x2, y2)\n \n if (flag):\n info_label.config(text='There\\'s no path')\n return False\n\ndef execute_func():\n global must_cell_var, mustcord_x, mustcord_y, flag\n\n if which_alg.get() == 'BFS':\n x = bfs(row, col, 'E')\n if x:\n create_path()\n print_matrix(a)\n elif which_alg.get() == 'DFS':\n flag = True\n if dfs(row, col):\n print_matrix(a)\n\ndef reset_func():\n reset_variables()\n print_matrix(a)\n\n\ndef change_cells():\n global entry_x, entry_y, a, which_char, must_cell_var, mustcord_x, mustcord_y\n\n try:\n x_cord = int(entry_x.get())\n y_cord = int(entry_y.get())\n\n except:\n x_cord = 1\n y_cord = 1\n\n if x_cord <= 48 and x_cord > 0 and y_cord <= 48 and y_cord >0 and (a[x_cord][y_cord] != 'S' and a[x_cord][y_cord] != 'F'):\n if which_char.get() == 'Start (blue)':\n a[x_cord][y_cord] = 'S'\n a[row][col] = '.'\n drawdata(x_cord, y_cord, start_colo) \n drawdata(row, col, ablep_colo)\n reset_variables()\n return\n\n elif which_char.get() == 'End (yellow)':\n for i in range(n):\n for j in range(m):\n if a[i][j] == 'E':\n a[i][j] = '.'\n a[x_cord][y_cord] = 'E'\n drawdata(i, j, ablep_colo)\n drawdata(x_cord, y_cord, end_colo)\n reset_variables()\n return\n\n elif which_char.get() == 'Wall/path (red/white)':\n if a[x_cord][y_cord] == '.':\n a[x_cord][y_cord] = 'x'\n drawdata(x_cord, y_cord, wall_colo)\n reset_variables()\n return\n \n elif a[x_cord][y_cord] == 'X' or a[x_cord][y_cord] == 'x':\n a[x_cord][y_cord] = '.'\n drawdata(x_cord, y_cord, ablep_colo)\n reset_variables()\n return\n \n reset_variables()\n\ndef bind_func(event):\n global mouse_x, mouse_y, entry_x, entry_y, which_char\n\n mouse_x = event.x\n mouse_y = event.y\n\n b = True\n\n for w in widget_list:\n if w is event.widget:\n b = False\n\n if mouse_x >= 14 and mouse_y >= 14 and mouse_x <= 686 and mouse_y <= 686 and which_char.get() != 'No' and b:\n cordx = mouse_x // 14\n cordy = mouse_y // 14\n\n entry_x.delete(0, END)\n entry_y.delete(0, END)\n\n entry_y.insert(0, cordy)\n entry_x.insert(0, cordx)\n\n change_cells()\n mouse_x = 0\n mouse_y = 0\n\ndef bind_auto(event):\n global mouse_x, mouse_y, entry_x, entry_y, which_char \n mouse_x = event.x\n mouse_y = event.y\n\n b = True\n\n for w in widget_list:\n if w is event.widget:\n b = False\n\n if mouse_x >= 14 and mouse_y >= 14 and mouse_x <= 686 and mouse_y <= 686 and which_char.get() == 'Drag walls' and b:\n cordx = mouse_x // 14\n cordy = mouse_y // 14\n\n entry_x.delete(0, END)\n entry_y.delete(0, END)\n\n entry_y.insert(0, cordy)\n entry_x.insert(0, cordx)\n\n if a[cordx][cordy] == '.':\n drawdata(cordx, cordy, wall_colo)\n a[cordx][cordy] = 'x'\n reset_variables()\n\n mouse_x = 0\n mouse_y = 0\n\n# graphics\nexecute = Button(root, text='FIND', bg='#45E180', width=6, command=execute_func)\nexecute.place(x=resise_img+10, y=10)\n\nreset = Button(root, text='RESET', bg='white', width=6, command=reset_func)\nreset.place(x=788, y=10)\n\nchose_maze = Combobox(root, textvariable=choseMaze, width=10, values=['default', 'no walls'])\nchose_maze.place(x=875, y=15)\nchose_maze.current(['0'])\n\nexe_chose_maze = Button(root, text='RESET\\n MAZE', bg='white', width=6, command=chose_maze_func)\nexe_chose_maze.place(x=985, y=10)\n\nentry_x = Entry(root, width=5)\nentry_y = Entry(root, width=5)\n\ncontr = Label(root, text=\"CONTROLS: \", bg='white', font=('arial', 12))\ncontr.place(x=resise_img+10, y=110)\n\nwhich_char = Combobox(root, textvariable=whichChar, values=[\n 'Start (blue)', 'End (yellow)', 'Wall/path (red/white)', 'Drag walls'])\nwhich_char.place(x=820, y=110)\nwhich_char.current([3])\n\nalg_label = Label(root, text=\"ALGORITHM: \", bg='white', font=('arial', 12))\nalg_label.place(x=resise_img+10, y=150)\n\nwhich_alg = Combobox(root, textvariable=whichAlg, values=['BFS', 'DFS'])\nwhich_alg.place(x=820, y=150)\nwhich_alg.current([1])\n\nwidget_list = [contr, alg_label, execute, reset, entry_x, entry_y, which_char, info_label, which_alg, exe_chose_maze, chose_maze]\n\nroot.bind('